ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a576180fd8980eb9d4f1e2a7ac20e3a32dbd4ee
|
import ctypes
import struct
# 3p
import bson
from bson.codec_options import CodecOptions
from bson.son import SON
# project
from ...compat import to_unicode
from ...ext import net as netx
from ...internal.logger import get_logger
log = get_logger(__name__)
# MongoDB wire protocol commands
# http://docs.mongodb.com/manual/reference/mongodb-wire-protocol
OP_CODES = {
1: 'reply',
1000: 'msg', # DEV: 1000 was deprecated at some point, use 2013 instead
2001: 'update',
2002: 'insert',
2003: 'reserved',
2004: 'query',
2005: 'get_more',
2006: 'delete',
2007: 'kill_cursors',
2010: 'command',
2011: 'command_reply',
2013: 'msg',
}
# The maximum message length we'll try to parse
MAX_MSG_PARSE_LEN = 1024 * 1024
header_struct = struct.Struct('<iiii')
class Command(object):
""" Command stores information about a pymongo network command, """
__slots__ = ['name', 'coll', 'db', 'tags', 'metrics', 'query']
def __init__(self, name, db, coll):
self.name = name
self.coll = coll
self.db = db
self.tags = {}
self.metrics = {}
self.query = None
def __repr__(self):
return (
'Command('
'name=%s,'
'db=%s,'
'coll=%s)'
) % (self.name, self.db, self.coll)
def parse_msg(msg_bytes):
""" Return a command from a binary mongo db message or None if we shoudln't
trace it. The protocol is documented here:
http://docs.mongodb.com/manual/reference/mongodb-wire-protocol
"""
# NOTE[matt] this is used for queries in pymongo <= 3.0.0 and for inserts
# in up to date versions.
msg_len = len(msg_bytes)
if msg_len <= 0:
return None
header = header_struct.unpack_from(msg_bytes, 0)
(length, req_id, response_to, op_code) = header
op = OP_CODES.get(op_code)
if not op:
log.debug('unknown op code: %s', op_code)
return None
db = None
coll = None
offset = header_struct.size
cmd = None
if op == 'query':
# NOTE[matt] inserts, updates and queries can all use this opcode
offset += 4 # skip flags
ns = _cstring(msg_bytes[offset:])
offset += len(ns) + 1 # include null terminator
# note: here coll could be '$cmd' because it can be overridden in the
# query itself (like {'insert':'songs'})
db, coll = _split_namespace(ns)
offset += 8 # skip numberToSkip & numberToReturn
if msg_len <= MAX_MSG_PARSE_LEN:
# FIXME[matt] don't try to parse large messages for performance
# reasons. ideally we'd just peek at the first bytes to get
# the critical info (op type, collection, query, # of docs)
# rather than parse the whole thing. i suspect only massive
# inserts will be affected.
codec = CodecOptions(SON)
spec = next(bson.decode_iter(msg_bytes[offset:], codec_options=codec))
cmd = parse_spec(spec, db)
else:
# let's still note that a command happened.
cmd = Command('command', db, 'untraced_message_too_large')
# If the command didn't contain namespace info, set it here.
if not cmd.coll:
cmd.coll = coll
elif op == 'msg':
# Skip header and flag bits
offset += 4
# Parse the msg kind
kind = ord(msg_bytes[offset:offset+1])
offset += 1
# Kinds: https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#sections
# - 0: BSON Object
# - 1: Document Sequence
if kind == 0:
if msg_len <= MAX_MSG_PARSE_LEN:
codec = CodecOptions(SON)
spec = next(bson.decode_iter(msg_bytes[offset:], codec_options=codec))
cmd = parse_spec(spec, db)
else:
# let's still note that a command happened.
cmd = Command('command', db, 'untraced_message_too_large')
else:
# let's still note that a command happened.
cmd = Command('command', db, 'unsupported_msg_kind')
if cmd:
cmd.metrics[netx.BYTES_OUT] = msg_len
return cmd
def parse_query(query):
""" Return a command parsed from the given mongo db query. """
db, coll = None, None
ns = getattr(query, 'ns', None)
if ns:
# version < 3.1 stores the full namespace
db, coll = _split_namespace(ns)
else:
# version >= 3.1 stores the db and coll seperately
coll = getattr(query, 'coll', None)
db = getattr(query, 'db', None)
# pymongo < 3.1 _Query does not have a name field, so default to 'query'
cmd = Command(getattr(query, 'name', 'query'), db, coll)
cmd.query = query.spec
return cmd
def parse_spec(spec, db=None):
""" Return a Command that has parsed the relevant detail for the given
pymongo SON spec.
"""
# the first element is the command and collection
items = list(spec.items())
if not items:
return None
name, coll = items[0]
cmd = Command(name, db or spec.get('$db'), coll)
if 'ordered' in spec: # in insert and update
cmd.tags['mongodb.ordered'] = spec['ordered']
if cmd.name == 'insert':
if 'documents' in spec:
cmd.metrics['mongodb.documents'] = len(spec['documents'])
elif cmd.name == 'update':
updates = spec.get('updates')
if updates:
# FIXME[matt] is there ever more than one here?
cmd.query = updates[0].get('q')
elif cmd.name == 'delete':
dels = spec.get('deletes')
if dels:
# FIXME[matt] is there ever more than one here?
cmd.query = dels[0].get('q')
return cmd
def _cstring(raw):
""" Return the first null terminated cstring from the bufffer. """
return ctypes.create_string_buffer(raw).value
def _split_namespace(ns):
""" Return a tuple of (db, collecton) from the 'db.coll' string. """
if ns:
# NOTE[matt] ns is unicode or bytes depending on the client version
# so force cast to unicode
split = to_unicode(ns).split('.', 1)
if len(split) == 1:
raise Exception("namespace doesn't contain period: %s" % ns)
return split
return (None, None)
|
py
|
1a576296b4b14f5d132ab314800d53670be0c06d
|
# Generated by Django 2.1.5 on 2019-03-09 22:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('stocks', '0001_initial'),
('portfolio', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='portfolio',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='portfolio', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='asset',
name='portfolio',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='asset', to='portfolio.Portfolio'),
),
migrations.AddField(
model_name='asset',
name='stocks',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='stocks.Stocks'),
),
]
|
py
|
1a57648dfbe7bfac001cab3eeb57ddbfe11bfcc7
|
import click
import getpass
from eth_wallet.cli.utils_cli import (
get_api,
)
from eth_wallet.configuration import (
Configuration,
)
@click.command()
@click.option('-m', '--mnemonic-sentence', default='', prompt='Mnemonic sentence',
help='Remembered mnemonic sentence to restore wallet.')
def restore_wallet(mnemonic_sentence):
"""Creates new wallet and store encrypted keystore file."""
passphrase = getpass.getpass('Passphrase: ') # Prompt the user for a password of keystore file
configuration = Configuration().load_configuration()
api = get_api()
wallet = api.restore_wallet(configuration, mnemonic_sentence, passphrase)
click.echo('Account address: %s' % str(wallet.get_address()))
click.echo('Account pub key: %s' % str(wallet.get_public_key()))
click.echo('Keystore path: %s' % configuration.keystore_location + configuration.keystore_filename)
click.echo('Remember these words to restore eth-wallet: %s' % wallet.get_mnemonic())
|
py
|
1a5764a28fbba1b1832288eec7f1e52a4f08d966
|
#!/usr/bin/env python
# sudo apt install python3-tk
from camera import *
c = Camera('192.168.0.100', 52381)
def save_preset_labels():
with open('preset_labels.txt', 'w') as f:
for entry in entry_boxes:
f.write(entry.get())
f.write('\n')
f.close()
# GUI
from tkinter import Tk, StringVar, Button, Label, Scale, Entry, W
root = Tk()
#display_message = StringVar()
root.title('VISCA IP Camera Controller')
root['background'] = 'white'
#Label(root, text='VISCA IP Camera Controller').grid(row=0, column=0, columnspan=100)
store_column = 0
label_column = 1
recall_column = 2
pan_tilt_column = 5
pan_tilt_row = 1
zoom_column = 3
zoom_row = 1
focus_column = 3
focus_row = 8
on_off_column = 3
on_off_row = 13
button_width = 8
store_color = 'red'
recall_color = 'light grey'
pan_tilt_color = 'white'
zoom_color = 'light blue'
focus_color = 'cyan'
on_off_color = 'violet'
# Preset store buttons
Label(root, text='Store', bg=store_color).grid(row=1, column=store_column)
Button(root, text=0, width=3, bg=store_color, command=lambda: c.memory_set(0)).grid(row=2, column=store_column)
Button(root, text=1, width=3, bg=store_color, command=lambda: c.memory_set(1)).grid(row=3, column=store_column)
Button(root, text=2, width=3, bg=store_color, command=lambda: c.memory_set(2)).grid(row=4, column=store_column)
Button(root, text=3, width=3, bg=store_color, command=lambda: c.memory_set(3)).grid(row=5, column=store_column)
Button(root, text=4, width=3, bg=store_color, command=lambda: c.memory_set(4)).grid(row=6, column=store_column)
Button(root, text=5, width=3, bg=store_color, command=lambda: c.memory_set(5)).grid(row=7, column=store_column)
Button(root, text=6, width=3, bg=store_color, command=lambda: c.memory_set(6)).grid(row=8, column=store_column)
Button(root, text=7, width=3, bg=store_color, command=lambda: c.memory_set(7)).grid(row=9, column=store_column)
Button(root, text=8, width=3, bg=store_color, command=lambda: c.memory_set(8)).grid(row=10, column=store_column)
Button(root, text=9, width=3, bg=store_color, command=lambda: c.memory_set(9)).grid(row=11, column=store_column)
Button(root, text='A', width=3, bg=store_color, command=lambda: c.memory_set(10)).grid(row=12, column=store_column)
Button(root, text='B', width=3, bg=store_color, command=lambda: c.memory_set(11)).grid(row=13, column=store_column)
Button(root, text='C', width=3, bg=store_color, command=lambda: c.memory_set(12)).grid(row=14, column=store_column)
Button(root, text='D', width=3, bg=store_color, command=lambda: c.memory_set(13)).grid(row=15, column=store_column)
Button(root, text='E', width=3, bg=store_color, command=lambda: c.memory_set(14)).grid(row=16, column=store_column)
Button(root, text='F', width=3, bg=store_color, command=lambda: c.memory_set(15)).grid(row=17, column=store_column)
# Recall buttons and entries (as labels)
Label(root, text='Recall', bg=recall_color).grid(row=1, column=recall_column)
Button(root, text=0, width=5, bg=recall_color, command=lambda: c.memory_recall(0)).grid(row=2, column=recall_column)
Button(root, text=1, width=5, bg=recall_color, command=lambda: c.memory_recall(1)).grid(row=3, column=recall_column)
Button(root, text=2, width=5, bg=recall_color, command=lambda: c.memory_recall(2)).grid(row=4, column=recall_column)
Button(root, text=3, width=5, bg=recall_color, command=lambda: c.memory_recall(3)).grid(row=5, column=recall_column)
Button(root, text=4, width=5, bg=recall_color, command=lambda: c.memory_recall(4)).grid(row=6, column=recall_column)
Button(root, text=5, width=5, bg=recall_color, command=lambda: c.memory_recall(5)).grid(row=7, column=recall_column)
Button(root, text=6, width=5, bg=recall_color, command=lambda: c.memory_recall(6)).grid(row=8, column=recall_column)
Button(root, text=7, width=5, bg=recall_color, command=lambda: c.memory_recall(7)).grid(row=9, column=recall_column)
Button(root, text=8, width=5, bg=recall_color, command=lambda: c.memory_recall(8)).grid(row=10, column=recall_column)
Button(root, text=9, width=5, bg=recall_color, command=lambda: c.memory_recall(9)).grid(row=11, column=recall_column)
Button(root, text='A', width=5, bg=recall_color, command=lambda: c.memory_recall(10)).grid(row=12, column=recall_column)
Button(root, text='B', width=5, bg=recall_color, command=lambda: c.memory_recall(11)).grid(row=13, column=recall_column)
Button(root, text='C', width=5, bg=recall_color, command=lambda: c.memory_recall(12)).grid(row=14, column=recall_column)
Button(root, text='D', width=5, bg=recall_color, command=lambda: c.memory_recall(13)).grid(row=15, column=recall_column)
Button(root, text='E', width=5, bg=recall_color, command=lambda: c.memory_recall(14)).grid(row=16, column=recall_column)
Button(root, text='F', width=5, bg=recall_color, command=lambda: c.memory_recall(15)).grid(row=17, column=recall_column)
try:
with open('preset_labels.txt', 'r') as f:
labels = f.read().splitlines()
f.close()
except:
pass
entry_boxes = []
for e in range(16):
box = Entry(root, justify='right')
try:
box.insert(-1, labels[e])
except:
pass
box.grid(row=e+2, column=label_column)
entry_boxes.append(box)
Button(root, text='Save preset labels', bg=store_color, command=lambda: save_preset_labels()).grid(row=1, column=label_column)
# Pan speed and Tilt speed sliders
Label(root, text='Pan Speed', bg=pan_tilt_color).grid(row=pan_tilt_row, column=pan_tilt_column)
pan_speed_slider = Scale(root, from_=24, to=0, bg=pan_tilt_color)
pan_speed_slider.set(7)
pan_speed_slider.grid(row=pan_tilt_row+1, column=pan_tilt_column, rowspan=4)
Label(root, text='Tilt Speed', bg=pan_tilt_color).grid(row=pan_tilt_row, column=pan_tilt_column+1)
tilt_speed_slider = Scale(root, from_=24, to=0, bg=pan_tilt_color)
tilt_speed_slider.set(7)
tilt_speed_slider.grid(row=pan_tilt_row+1, column=pan_tilt_column+1, rowspan=4)
#Button(root, text='test', command=lambda: print(pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=0,column=0)
# Pan and tilt buttons
Button(root, text='↑', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('up', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row, column=pan_tilt_column+3)
Button(root, text='←', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('left', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row+1, column=pan_tilt_column+2)
Button(root, text='→', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('right', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row+1, column=pan_tilt_column+4)
Button(root, text='↓', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('down', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row+2, column=pan_tilt_column+3)
Button(root, text='↖', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('upleft', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row, column=pan_tilt_column+2)
Button(root, text='↗', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('upright', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row, column=pan_tilt_column+4)
Button(root, text='↙', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('downleft', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row+2, column=pan_tilt_column+2)
Button(root, text='↘', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('downright', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row+2, column=pan_tilt_column+4)
Button(root, text='■', width=3, bg=pan_tilt_color, command=lambda: c.pantilt_stop()).grid(row=pan_tilt_row+1, column=pan_tilt_column+3)
#Button(root, text='Home', command=lambda: send_message(pan_home)).grid(row=pan_tilt_row+2, column=pan_tilt_column+1)
# Zoom buttons
Label(root, text='Zoom', bg=zoom_color, width=button_width).grid(row=zoom_row, column=zoom_column)
Button(root, text='In', bg=zoom_color, width=button_width, command=lambda: c.zoom_in()).grid(row=zoom_row+1, column=zoom_column)
Button(root, text='Stop', bg=zoom_color, width=button_width, command=lambda: c.zoom_stop()).grid(row=zoom_row+2, column=zoom_column)
Button(root, text='Out', bg=zoom_color, width=button_width, command=lambda: c.zoom_out()).grid(row=zoom_row+3, column=zoom_column)
# On off connect buttons
Label(root, text='Camera', bg=on_off_color, width=button_width).grid(row=on_off_row, column=on_off_column)
Button(root, text='On', bg=on_off_color, width=button_width, command=lambda: c.on()).grid(row=on_off_row+1, column=on_off_column)
Button(root, text='Connect', bg=on_off_color, width=button_width, command=lambda: c.connect()).grid(row=on_off_row+2, column=on_off_column)
Button(root, text='Off', bg=on_off_color, width=button_width, command=lambda: c.off()).grid(row=on_off_row+3, column=on_off_column)
Button(root, text='Info Off', bg=on_off_color, width=button_width, command=lambda: c.info_display_off()).grid(row=on_off_row+4, column=on_off_column)
# IP Label
#Label(root, text=camera_ip+':'+str(camera_port)).grid(row=6, column=0, columnspan=3)
# Connection Label
#Label(root, textvariable=display_message).grid(row=6, column=4, columnspan=3)
root.mainloop()
#'''
|
py
|
1a5764b1bd710a48b3d671199c47036f0f77e511
|
#!/usr/bin/env python
#
# Electrum - Lightweight Merge Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
from threading import Thread
import re
from decimal import Decimal
from kivy.clock import Clock
from electrum.i18n import _
from electrum.plugin import hook
from .trustedcoin import TrustedCoinPlugin, server, KIVY_DISCLAIMER, TrustedCoinException, ErrorConnectingServer
class Plugin(TrustedCoinPlugin):
disclaimer_msg = KIVY_DISCLAIMER
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
@hook
def load_wallet(self, wallet, window):
if not isinstance(wallet, self.wallet_class):
return
self.start_request_thread(wallet)
def go_online_dialog(self, wizard):
# we skip this step on android
wizard.run('accept_terms_of_use')
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
from ...gui.kivy.uix.dialogs.label_dialog import LabelDialog
msg = _('Please enter your Google Authenticator code')
d = LabelDialog(msg, '', lambda otp: self.on_otp(wallet, tx, otp, on_success, on_failure))
d.open()
def on_otp(self, wallet, tx, otp, on_success, on_failure):
try:
wallet.on_otp(tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
Clock.schedule_once(lambda dt: on_failure(_('Invalid one-time password.')))
else:
Clock.schedule_once(lambda dt, bound_e=e: on_failure(_('Error') + ':\n' + str(bound_e)))
except Exception as e:
Clock.schedule_once(lambda dt, bound_e=e: on_failure(_('Error') + ':\n' + str(bound_e)))
else:
on_success(tx)
def accept_terms_of_use(self, wizard):
def handle_error(msg, e):
wizard.show_error(msg + ':\n' + str(e))
wizard.terminate()
try:
tos = server.get_terms_of_service()
except ErrorConnectingServer as e:
Clock.schedule_once(lambda dt, bound_e=e: handle_error(_('Error connecting to server'), bound_e))
except Exception as e:
Clock.schedule_once(lambda dt, bound_e=e: handle_error(_('Error'), bound_e))
else:
f = lambda x: self.read_email(wizard)
wizard.tos_dialog(tos=tos, run_next=f)
def read_email(self, wizard):
f = lambda x: self.create_remote_key(x, wizard)
wizard.email_dialog(run_next=f)
def request_otp_dialog(self, wizard, short_id, otp_secret, xpub3):
f = lambda otp, reset: self.check_otp(wizard, short_id, otp_secret, xpub3, otp, reset)
wizard.otp_dialog(otp_secret=otp_secret, run_next=f)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
self.start_request_thread(wallet)
Clock.schedule_once(
lambda dt: window.show_error(_('Requesting account info from TrustedCoin server...') + '\n' +
_('Please try again.')))
return True
return False
|
py
|
1a576523c2c98343d3dc53a8b18a53505163caf8
|
# test classification dataset
from sklearn.datasets import make_classification
# define dataset
X, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_redundant=5, random_state=1)
# summarize the dataset
print(X.shape, y.shape)
|
py
|
1a576620b7d1bf47ed6cd572aadb6c6b64aa50e2
|
import unittest
import types
import os
import sys
import tempfile
import shutil
import subprocess
from openmdao.api import Problem
from openmdao.test_suite.components.sellar import SellarNoDerivatives
from openmdao.devtools import iprof_mem
@unittest.skip("interactive test, not to be run with test suite")
class TestProfileMemory(unittest.TestCase):
def test_sellar(self):
prob = Problem(SellarNoDerivatives()).setup()
with iprof_mem.memtrace(min_mem=0.1):
prob.run_model()
# expect output similar to the following:
# 0.11 (435 calls) </Users/banaylor/dev/blue/openmdao/utils/name_maps.py:124>.name2abs_name
# 0.11 (14 calls) ExplicitComponent._solve_nonlinear:(IndepVarComp)
# 0.11 (7 calls) NonlinearRunOnce.solve
# 0.11 (150 calls) Vector.__contains__:(DefaultVector)
# 0.12 (7 calls) Group._solve_nonlinear
# 0.13 (1 calls) Driver._update_voi_meta
# 0.14 (2 calls) DefaultTransfer._setup_transfers
# 0.16 (1 calls) NonlinearBlockGS._iter_initialize
# 0.16 (1 calls) NonlinearSolver._iter_initialize:(NonlinearBlockGS)
# 0.19 (24 calls) ExplicitComponent._apply_nonlinear:(ExecComp)
# 0.20 (1 calls) System._setup_vectors:(SellarNoDerivatives)
# 0.25 (105 calls) _IODict.__getitem__
# 0.26 (80 calls) Vector.__init__:(DefaultVector)
# 0.26 (21 calls) ExplicitComponent._solve_nonlinear:(ExecComp)
# 0.34 (45 calls) ExecComp.compute
# 0.39 (8 calls) NonlinearSolver._run_apply:(NonlinearBlockGS)
# 0.39 (8 calls) Group._apply_nonlinear:(SellarNoDerivatives)
# 0.57 (7 calls) NonlinearBlockGS._single_iteration
# 0.59 (1 calls) System._final_setup:(SellarNoDerivatives)
# 0.75 (1 calls) Problem.final_setup
# 1.07 (1 calls) NonlinearSolver.solve:(NonlinearBlockGS)
# 1.07 (1 calls) Solver._run_iterator:(NonlinearBlockGS)
# 1.07 (1 calls) System.run_solve_nonlinear:(SellarNoDerivatives)
# 1.07 (1 calls) Group._solve_nonlinear:(SellarNoDerivatives)
# 1.83 (1 calls) Problem.run_model
class TestCmdlineMemory(unittest.TestCase):
def setUp(self):
try:
import psutil
except ImportError:
raise unittest.SkipTest("psutil is not installed")
self.tstfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'mem_model.py')
self.startdir = os.getcwd()
self.tempdir = tempfile.mkdtemp(prefix='TestDOEDriver-')
os.chdir(self.tempdir)
def tearDown(self):
os.chdir(self.startdir)
try:
shutil.rmtree(self.tempdir)
except OSError:
pass
def _run_command(self, cmd):
try:
output = subprocess.check_output(cmd).decode('utf-8', 'ignore')
except subprocess.CalledProcessError as err:
msg = "Running command '{}' failed. " + \
"Output was: \n{}".format(cmd, err.output.decode('utf-8'))
self.fail(msg)
def test_mem(self):
self._run_command(['openmdao', 'mem', self.tstfile])
self._run_command(['openmdao', 'mempost', 'mem_trace.raw'])
def test_mem_tree(self):
self._run_command(['openmdao', 'mem', '-t', self.tstfile])
self._run_command(['openmdao', 'mempost', '-t', 'mem_trace.raw'])
if __name__ == "__main__":
unittest.main()
|
py
|
1a576658bcfce39e84ef8ff17eec7fdea32c1e12
|
# https://bitcoin.stackexchange.com/questions/67791/calculate-hash-of-block-header
from hashlib import sha256
import hashlib
import binascii
# https://www.blockchain.com/btc/block/0000000000000000000635bda771916ca727db53fea5441508f7161386e066be
# https://blockchain.info/block/0000000000000000000635bda771916ca727db53fea5441508f7161386e066be?format=hex
header = binascii.unhexlify("0000002066720b99e07d284bd4fe67ff8c49a5db1dd8514fcdab610000000000000000007829844f4c3a41a537b3131ca992643eaa9d093b2383e4cdc060ad7dc548118751eb505ac1910018de19b302")
print(binascii.hexlify(sha256(sha256(header).digest()).digest()[::-1]).decode())
|
py
|
1a576841137485fbdd390a10175eeeb0a70f5da4
|
import yaml
from unittest import TestCase
from .utils import TEST_DATA_PATH
from foliant.meta.classes import Chapter
from foliant.meta.classes import Meta
from foliant.meta.classes import MetaChapterDoesNotExistError
from foliant.meta.classes import MetaDublicateIDError
from foliant.meta.classes import MetaSectionDoesNotExistError
from foliant.meta.classes import Section
from foliant.meta.generate import load_meta
class TestLoadMetaFromFile(TestCase):
maxDiff = None
def test_load_sample_file1(self):
meta = Meta()
with open(TEST_DATA_PATH / 'meta1.yml', encoding='utf8') as f:
source = yaml.load(f, yaml.Loader)
meta.load_meta_from_file(TEST_DATA_PATH / 'meta1.yml')
self.assertEqual(meta.dump(), source)
def test_load_sample_file2(self):
meta = Meta()
with open(TEST_DATA_PATH / 'meta2.yml', encoding='utf8') as f:
source = yaml.load(f, yaml.Loader)
meta.load_meta_from_file(TEST_DATA_PATH / 'meta2.yml')
self.assertEqual(meta.dump(), source)
def test_load_sample_file3(self):
meta = Meta()
with open(TEST_DATA_PATH / 'meta3.yml', encoding='utf8') as f:
source = yaml.load(f, yaml.Loader)
meta.load_meta_from_file(TEST_DATA_PATH / 'meta3.yml')
self.assertEqual(meta.dump(), source)
class TestProcessIds(TestCase):
def test_load_sample_file(self):
section1 = Section(level=0,
start=0,
end=100,
data={'id': 'id1'},
title='title1')
section2 = Section(level=1,
start=10,
end=100,
data={'id': 'id2'},
title='title2')
chapter1 = Chapter(filename='filename',
name='chapter_name',
main_section=None)
section1.add_child(section2)
chapter1.main_section = section1
section3 = Section(level=0,
start=0,
end=100,
data={'id': 'id3'},
title='title3')
section4 = Section(level=1,
start=10,
end=100,
data={'id': 'id4'},
title='title4')
chapter2 = Chapter(filename='filename2',
name='chapter_name2',
main_section=None)
section3.add_child(section4)
chapter2.main_section = section3
meta = Meta()
meta.add_chapter(chapter1)
meta.add_chapter(chapter2)
expected_ids = ['id1', 'id2', 'id3', 'id4']
meta.process_ids()
for section, expected_id in zip(meta.iter_sections(), expected_ids):
self.assertEqual(section.id, expected_id)
def test_dublicate_ids(self):
section1 = Section(level=0,
start=0,
end=100,
data={'id': 'id1'},
title='title1')
section2 = Section(level=1,
start=10,
end=100,
data={'id': 'id1'},
title='title2')
chapter1 = Chapter(filename='filename',
name='chapter_name',
main_section=None)
section1.add_child(section2)
chapter1.main_section = section1
meta = Meta()
meta.add_chapter(chapter1)
with self.assertRaises(MetaDublicateIDError):
meta.process_ids()
def test_generate_ids(self):
section1 = Section(level=0,
start=0,
end=100,
data={'id': 'id1'},
title='title1')
section2 = Section(level=1,
start=10,
end=100,
data={},
title='My Section Title (78)')
chapter1 = Chapter(filename='filename',
name='chapter_name',
main_section=None)
section1.add_child(section2)
chapter1.main_section = section1
section3 = Section(level=0,
start=0,
end=100,
data={'id': 'original'},
title='title3')
section4 = Section(level=1,
start=10,
end=100,
data={},
title='original')
chapter2 = Chapter(filename='filename2',
name='chapter_name2',
main_section=None)
section3.add_child(section4)
chapter2.main_section = section3
meta = Meta()
meta.add_chapter(chapter1)
meta.add_chapter(chapter2)
expected_ids = ['id1', 'my-section-title-78', 'original', 'original-2']
meta.process_ids()
for section, expected_id in zip(meta.iter_sections(), expected_ids):
self.assertEqual(section.id, expected_id)
class TestGetChapter(TestCase):
def setUp(self):
md_root = 'test/test_data/load_meta'
chapters = [
'chapter_only_yfm.md',
'chapter_with_meta.md',
'chapter_with_one_meta_tag.md',
'chapter_without_meta.md'
]
self.meta = load_meta(chapters, md_root)
def test_wrong_chapter(self):
with self.assertRaises(MetaChapterDoesNotExistError):
self.meta.get_chapter('wrong/chapter/path')
def test_relative_path(self):
filename = 'test/test_data/load_meta/chapter_with_meta.md'
chapter = self.meta.get_chapter(filename)
self.assertTrue(chapter.filename.endswith('chapter_with_meta.md'))
def test_absolute_path(self):
filename = TEST_DATA_PATH / 'load_meta/chapter_with_meta.md'
chapter = self.meta.get_chapter(filename)
self.assertTrue(chapter.filename.endswith('chapter_with_meta.md'))
class TestGetByID(TestCase):
def test_id_exists(self):
meta = Meta()
meta.load_meta_from_file(TEST_DATA_PATH / 'meta3.yml')
id_ = 'subsection'
section = meta.get_by_id(id_)
self.assertEqual(section.id, id_)
def test_id_doesnt_exist(self):
meta = Meta()
meta.load_meta_from_file(TEST_DATA_PATH / 'meta3.yml')
id_ = 'nonexistant_id'
with self.assertRaises(MetaSectionDoesNotExistError):
section = meta.get_by_id(id_)
|
py
|
1a576850c5965dabacf8b5601edcc5c978441ed5
|
from flask import Flask, request,render_template
app = Flask(__name__)
@app.route('/')
def index():
# return "<h1>Hello World!</h1>"
return render_tenmplate('index.html')
@app.route('/user/<name>')
def user(name):
# 直接实现的代码
'''
user_agent = request.headers.get('User-Agent')
s = '<h1>Hello,you are {}!</h1>'.format(name)
s = s+ '<p>Your browser is {}</p>'.format(user_agent)
return s
'''
# 利用模板的代码
return render_template('user.html',name=name)
|
py
|
1a57686fba1980cbdd56654e0e5d352ceac5840f
|
def add(x, y):
return x+y
def multiply(x, y):
return x*y
def multiply_float(x, y):
return float(x)*y
def divide_float(x, y):
return float(x)/y
def divide(x, y):
return x/y
|
py
|
1a5768c19f33a30244b078200d0cd92fb69d04e1
|
from distutils.core import setup, Extension
module_device = Extension('device',
sources = ['device.cpp'],
library_dirs=["C:\Program Files (x86)\Windows Kits\10\Lib"]
)
setup (name = 'WindowsDevices',
version = '1.0',
description = 'Get device list with DirectShow',
ext_modules = [module_device])
|
py
|
1a576a19f99497188e555717a73056708b2d5adb
|
import numpy as np
from scipy.stats import norm
import unittest
import ray
import ray.rllib.algorithms.dqn as dqn
import ray.rllib.algorithms.pg as pg
import ray.rllib.algorithms.ppo as ppo
import ray.rllib.algorithms.sac as sac
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.test_utils import check, framework_iterator
from ray.rllib.utils.numpy import one_hot, fc, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT
tf1, tf, tfv = try_import_tf()
def do_test_log_likelihood(
run,
config,
prev_a=None,
continuous=False,
layer_key=("fc", (0, 4), ("_hidden_layers.0.", "_logits.")),
logp_func=None,
):
config = config.copy()
# Run locally.
config["num_workers"] = 0
# Env setup.
if continuous:
env = "Pendulum-v1"
obs_batch = preprocessed_obs_batch = np.array([[0.0, 0.1, -0.1]])
else:
env = "FrozenLake-v1"
config["env_config"] = {"is_slippery": False, "map_name": "4x4"}
obs_batch = np.array([0])
# PG does not preprocess anymore by default.
preprocessed_obs_batch = (
one_hot(obs_batch, depth=16) if run is not pg.PG else obs_batch
)
prev_r = None if prev_a is None else np.array(0.0)
# Test against all frameworks.
for fw in framework_iterator(config):
algo = run(config=config, env=env)
policy = algo.get_policy()
vars = policy.get_weights()
# Sample n actions, then roughly check their logp against their
# counts.
num_actions = 1000 if not continuous else 50
actions = []
for _ in range(num_actions):
# Single action from single obs.
actions.append(
algo.compute_single_action(
obs_batch[0],
prev_action=prev_a,
prev_reward=prev_r,
explore=True,
# Do not unsquash actions
# (remain in normalized [-1.0; 1.0] space).
unsquash_action=False,
)
)
# Test all taken actions for their log-likelihoods vs expected values.
if continuous:
for idx in range(num_actions):
a = actions[idx]
if fw != "torch":
if isinstance(vars, list):
expected_mean_logstd = fc(
fc(obs_batch, vars[layer_key[1][0]]), vars[layer_key[1][1]]
)
else:
expected_mean_logstd = fc(
fc(
obs_batch,
vars["default_policy/{}_1/kernel".format(layer_key[0])],
),
vars["default_policy/{}_out/kernel".format(layer_key[0])],
)
else:
expected_mean_logstd = fc(
fc(
obs_batch,
vars["{}_model.0.weight".format(layer_key[2][0])],
framework=fw,
),
vars["{}_model.0.weight".format(layer_key[2][1])],
framework=fw,
)
mean, log_std = np.split(expected_mean_logstd, 2, axis=-1)
if logp_func is None:
expected_logp = np.log(norm.pdf(a, mean, np.exp(log_std)))
else:
expected_logp = logp_func(mean, log_std, a)
logp = policy.compute_log_likelihoods(
np.array([a]),
preprocessed_obs_batch,
prev_action_batch=np.array([prev_a]) if prev_a else None,
prev_reward_batch=np.array([prev_r]) if prev_r else None,
actions_normalized=True,
)
check(logp, expected_logp[0], rtol=0.2)
# Test all available actions for their logp values.
else:
for a in [0, 1, 2, 3]:
count = actions.count(a)
expected_prob = count / num_actions
logp = policy.compute_log_likelihoods(
np.array([a]),
preprocessed_obs_batch,
prev_action_batch=np.array([prev_a]) if prev_a else None,
prev_reward_batch=np.array([prev_r]) if prev_r else None,
)
check(np.exp(logp), expected_prob, atol=0.2)
class TestComputeLogLikelihood(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_dqn(self):
"""Tests, whether DQN correctly computes logp in soft-q mode."""
config = dqn.DEFAULT_CONFIG.copy()
# Soft-Q for DQN.
config["exploration_config"] = {"type": "SoftQ", "temperature": 0.5}
config["seed"] = 42
do_test_log_likelihood(dqn.DQN, config)
def test_pg_cont(self):
"""Tests PG's (cont. actions) compute_log_likelihoods method."""
config = pg.DEFAULT_CONFIG.copy()
config["seed"] = 42
config["model"]["fcnet_hiddens"] = [10]
config["model"]["fcnet_activation"] = "linear"
prev_a = np.array([0.0])
do_test_log_likelihood(
pg.PG,
config,
prev_a,
continuous=True,
layer_key=("fc", (0, 2), ("_hidden_layers.0.", "_logits.")),
)
def test_pg_discr(self):
"""Tests PG's (cont. actions) compute_log_likelihoods method."""
config = pg.DEFAULT_CONFIG.copy()
config["seed"] = 42
prev_a = np.array(0)
do_test_log_likelihood(pg.PG, config, prev_a)
def test_ppo_cont(self):
"""Tests PPO's (cont. actions) compute_log_likelihoods method."""
config = ppo.DEFAULT_CONFIG.copy()
config["seed"] = 42
config["model"]["fcnet_hiddens"] = [10]
config["model"]["fcnet_activation"] = "linear"
prev_a = np.array([0.0])
do_test_log_likelihood(ppo.PPO, config, prev_a, continuous=True)
def test_ppo_discr(self):
"""Tests PPO's (discr. actions) compute_log_likelihoods method."""
config = ppo.DEFAULT_CONFIG.copy()
config["seed"] = 42
prev_a = np.array(0)
do_test_log_likelihood(ppo.PPO, config, prev_a)
def test_sac_cont(self):
"""Tests SAC's (cont. actions) compute_log_likelihoods method."""
config = sac.DEFAULT_CONFIG.copy()
config["seed"] = 42
config["policy_model_config"]["fcnet_hiddens"] = [10]
config["policy_model_config"]["fcnet_activation"] = "linear"
prev_a = np.array([0.0])
# SAC cont uses a squashed normal distribution. Implement it's logp
# logic here in numpy for comparing results.
def logp_func(means, log_stds, values, low=-1.0, high=1.0):
stds = np.exp(np.clip(log_stds, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT))
unsquashed_values = np.arctanh((values - low) / (high - low) * 2.0 - 1.0)
log_prob_unsquashed = np.sum(
np.log(norm.pdf(unsquashed_values, means, stds)), -1
)
return log_prob_unsquashed - np.sum(
np.log(1 - np.tanh(unsquashed_values) ** 2), axis=-1
)
do_test_log_likelihood(
sac.SAC,
config,
prev_a,
continuous=True,
layer_key=(
"fc",
(0, 2),
("action_model._hidden_layers.0.", "action_model._logits."),
),
logp_func=logp_func,
)
def test_sac_discr(self):
"""Tests SAC's (discrete actions) compute_log_likelihoods method."""
config = sac.DEFAULT_CONFIG.copy()
config["seed"] = 42
config["policy_model_config"]["fcnet_hiddens"] = [10]
config["policy_model_config"]["fcnet_activation"] = "linear"
prev_a = np.array(0)
do_test_log_likelihood(sac.SAC, config, prev_a)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
py
|
1a576a1d1adcc114ac03396dea2fdae74b19be7c
|
# JO-KEN-PO
from random import randint
from time import sleep
opçoes = ('PEDRA', 'PAPEL', 'TESOURA')
computador = randint(0, 2)
print('''\nFAÇA SUA ESCOLHA:
[0] PEDRA
[1] PAPEL
[2] TESOURA''')
jogador = int(input("Qual sua jogada? "))
print('JO')
sleep(1)
print('KEN')
sleep(1)
print("PO")
sleep(1)
print("-="*15)
print("Computador jogou {} ".format(opçoes[computador]))
print("Jogador jogou {}".format(opçoes[jogador]))
print("-="*15)
if computador == 0:
if jogador == 0:
print("EMPATE.")
elif jogador == 1:
print("JOGADOR VENCEU.")
elif jogador == 2:
print("COMPUTADOR VENCEU")
elif computador == 1:
if jogador == 0:
print("COMPUTADOR VENCEU")
elif jogador == 1:
print("EMPATE")
elif jogador == 2:
print("JOGADOR VENCEU")
elif computador == 2:
if jogador == 0:
print("JOGADOR VENCEU")
elif jogador == 1:
print("COMPUTADOR VENCEU")
elif jogador == 2:
print("EMPATE.")
|
py
|
1a576b3b9dfe6e7b44ee8eba07d3ab58acc883c3
|
"""
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
from typing import Optional, Sequence
import torch.nn as nn
class ClassifierFCN(nn.Module):
def __init__(self, in_ch: int, num_classes: Optional[int], layers_description: Sequence[int]=(256,), dropout_rate: float = 0.1):
super().__init__()
layer_list = []
layer_list.append(nn.Conv2d(in_ch, layers_description[0], kernel_size=1, stride=1))
layer_list.append(nn.ReLU())
if dropout_rate is not None and dropout_rate > 0:
layer_list.append(nn.Dropout(p=dropout_rate))
last_layer_size = layers_description[0]
for curr_layer_size in layers_description[1:]:
layer_list.append(nn.Conv2d(last_layer_size, curr_layer_size, kernel_size=1, stride=1))
layer_list.append(nn.ReLU())
if dropout_rate is not None and dropout_rate > 0:
layer_list.append(nn.Dropout(p=dropout_rate))
last_layer_size = curr_layer_size
if num_classes is not None:
layer_list.append(nn.Conv2d(last_layer_size, num_classes, kernel_size=1, stride=1))
self.classifier = nn.Sequential(*layer_list)
def forward(self, x):
x = self.classifier(x)
return x
class ClassifierMLP(nn.Module):
def __init__(self, in_ch: int, num_classes: Optional[int], layers_description: Sequence[int]=(256,), dropout_rate: float = 0.1):
super().__init__()
layer_list = []
layer_list.append(nn.Linear(in_ch, layers_description[0]))
layer_list.append(nn.ReLU())
if dropout_rate is not None and dropout_rate > 0:
layer_list.append(nn.Dropout(p=dropout_rate))
last_layer_size = layers_description[0]
for curr_layer_size in layers_description[1:]:
layer_list.append(nn.Linear(last_layer_size, curr_layer_size))
layer_list.append(nn.ReLU())
if dropout_rate is not None and dropout_rate > 0:
layer_list.append(nn.Dropout(p=dropout_rate))
last_layer_size = curr_layer_size
if num_classes is not None:
layer_list.append(nn.Linear(last_layer_size, num_classes))
self.classifier = nn.Sequential(*layer_list)
def forward(self, x):
x = self.classifier(x)
return x
|
py
|
1a576d039065eaf6c70eef3a3026e0834b3e08ad
|
from pathlib import Path
import pytest
import torch.autograd
import colossalai
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.nn.layer._parallel_utilities import _gather
CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2p5d.py')
def eval(engine):
engine.eval()
accumulated_loss = 0
correct_sum = 0
total_sum = 0
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
accumulated_loss += loss.detach().cpu().numpy()
output = _gather(
output[0],
ParallelMode.PARALLEL_2P5D_ROW,
1
)
output = _gather(
output,
ParallelMode.PARALLEL_2P5D_COL,
0,
)
output = _gather(
output,
ParallelMode.PARALLEL_2P5D_DEP,
0,
)
output = torch.argmax(output, dim=-1)
correct = torch.sum(label[0] == output)
correct_sum += correct
total_sum += label[0].size(0)
avg_loss = accumulated_loss / engine.schedule.num_steps
return correct_sum, total_sum, avg_loss
def train(engine):
engine.train()
accumulated_loss = 0
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
accumulated_loss += loss.detach().cpu().numpy()
avg_loss = accumulated_loss / engine.schedule.num_steps
return avg_loss
@pytest.mark.dist
@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus")
def test_2p5d_parallel_vision_transformer():
# init dist
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize(
CONFIG_PATH)
logger = get_global_dist_logger()
engine = Engine(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
logger.info('start training')
for epoch in range(gpc.config.num_epochs):
train_loss = train(engine)
logger.info(f'epoch {epoch} - train loss: {train_loss}')
if epoch % 2 == 0:
correct_sum, total_sum, eval_loss = eval(engine)
logger.info(
f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, '
f'correct: {correct_sum}, acc: {correct_sum / total_sum}')
if __name__ == '__main__':
test_2p5d_parallel_vision_transformer()
|
py
|
1a576d8404e0694da5fc28c89ce80267f655af12
|
"""
The processors exist in Pythia to make data processing pipelines in various
datasets as similar as possible while allowing code reuse.
The processors also help maintain proper abstractions to keep only what matters
inside the dataset's code. This allows us to keep the dataset ``get_item``
logic really clean and no need about maintaining opinions about data type.
Processors can work on both images and text due to their generic structure.
To create a new processor, follow these steps:
1. Inherit the ``BaseProcessor`` class.
2. Implement ``_call`` function which takes in a dict and returns a dict with
same keys preprocessed as well as any extra keys that need to be returned.
3. Register the processor using ``@registry.register_processor('name')`` to
registry where 'name' will be used to refer to your processor later.
In processor's config you can specify ``preprocessor`` option to specify
different kind of preprocessors you want in your dataset.
Let's break down processor's config inside a dataset (VQA2.0) a bit to understand
different moving parts.
Config::
dataset_attributes:
vqa2:
processors:
text_processor:
type: vocab
params:
max_length: 14
vocab:
type: intersected
embedding_name: glove.6B.300d
vocab_file: vocabs/vocabulary_100k.txt
answer_processor:
type: vqa_answer
params:
num_answers: 10
vocab_file: vocabs/answers_vqa.txt
preprocessor:
type: simple_word
params: {}
``BaseDataset`` will init the processors and they will available inside your
dataset with same attribute name as the key name, for e.g. `text_processor` will
be available as `self.text_processor` inside your dataset. As is with every module
in Pythia, processor also accept a ``ConfigNode`` with a `type` and `params`
attributes. `params` defined the custom parameters for each of the processors.
By default, processor initialization process will also init `preprocessor` attribute
which can be a processor config in itself. `preprocessor` can be then be accessed
inside the processor's functions.
Example::
from pythia.common.registry import registry
from pythia.datasets.processors import BaseProcessor
class MyProcessor(BaseProcessor):
def __init__(self, config, *args, **kwargs):
return
def __call__(self, item, *args, **kwargs):
text = item['text']
text = [t.strip() for t in text.split(" ")]
return {"text": text}
"""
import logging
import warnings
from collections import defaultdict
import numpy as np
import torch
from sam.spatial_utils import build_graph_using_normalized_boxes
from tools.registry import registry
from ..phoc import build_phoc
from .textvqa_vocab import VocabDict
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
from easydict import EasyDict as edict
def _pad_tokens(tokens, PAD_TOKEN, max_length):
padded_tokens = [PAD_TOKEN] * max_length
token_length = min(len(tokens), max_length)
padded_tokens[:token_length] = tokens[:token_length]
token_length = torch.tensor(token_length, dtype=torch.long)
return padded_tokens, token_length
class WordToVectorDict:
def __init__(self, model):
self.model = model
def __getitem__(self, word):
# Check if mean for word split needs to be done here
return np.mean([self.model.get_word_vector(w) for w in word.split(" ")], axis=0)
class BaseProcessor:
"""Every processor in Pythia needs to inherit this class for compatability
with Pythia. End user mainly needs to implement ``__call__`` function.
Args:
config (ConfigNode): Config for this processor, containing `type` and
`params` attributes if available.
"""
def __init__(self, config, *args, **kwargs):
return
def __call__(self, item, *args, **kwargs):
"""Main function of the processor. Takes in a dict and returns back
a dict
Args:
item (Dict): Some item that needs to be processed.
Returns:
Dict: Processed dict.
"""
return item
class Processor:
"""Wrapper class used by Pythia to initialized processor based on their
``type`` as passed in configuration. It retrieves the processor class
registered in registry corresponding to the ``type`` key and initializes
with ``params`` passed in configuration. All functions and attributes of
the processor initialized are directly available via this class.
Args:
config (ConfigNode): ConfigNode containing ``type`` of the processor to
be initialized and ``params`` of that procesor.
"""
def __init__(self, config, *args, **kwargs):
self.writer = registry.get("writer")
if not hasattr(config, "type"):
raise AttributeError(
"Config must have 'type' attribute to specify type of processor"
)
processor_class = registry.get_processor_class(config.type)
params = {}
if not hasattr(config, "params"):
self.writer.write(
"Config doesn't have 'params' attribute to "
"specify parameters of the processor "
"of type {}. Setting to default \{\}".format(config.type)
)
else:
params = config.params
self.processor = processor_class(params, *args, **kwargs)
self._dir_representation = dir(self)
def __call__(self, item, *args, **kwargs):
return self.processor(item, *args, **kwargs)
def __getattr__(self, name):
if name in self._dir_representation:
return getattr(self, name)
elif hasattr(self.processor, name):
return getattr(self.processor, name)
else:
raise AttributeError(name)
class FastTextProcessor:
"""FastText processor, similar to GloVe processor but returns FastText vectors.
Args:
config (ConfigNode): Configuration values for the processor.
"""
def __init__(self, config, *args, **kwargs):
self.max_length = config.max_length
self._load_fasttext_model("/srv/share/ykant3/pythia/vector_cache/wiki.en.bin")
self.PAD_INDEX = 0
self.PAD_TOKEN = "<pad>"
def _load_fasttext_model(self, model_file):
from fasttext import load_model
self.model = load_model(model_file)
# String to Vector
self.stov = WordToVectorDict(self.model)
def _map_strings_to_indices(self, tokens):
length = min(len(tokens), self.max_length)
tokens = tokens[:length]
output = torch.full(
(self.max_length, self.model.get_dimension()),
fill_value=self.PAD_INDEX,
dtype=torch.float,
)
for idx, token in enumerate(tokens):
output[idx] = torch.from_numpy(self.stov[token])
return output
def __call__(self, item):
# indices are padded
indices = self._map_strings_to_indices(item["tokens"])
# pad tokens
tokens, length = _pad_tokens(item["tokens"], self.PAD_TOKEN, self.max_length)
return {
"padded_token_indices": indices,
"padded_tokens": tokens,
"length": length,
}
class VQAAnswerProcessor(BaseProcessor):
"""Processor for generating answer scores for answers passed using VQA
accuracy formula. Using VocabDict class to represent answer vocabulary,
so parameters must specify "vocab_file". "num_answers" in parameter config
specify the max number of answers possible. Takes in dict containing
"answers" or "answers_tokens". "answers" are preprocessed to generate
"answers_tokens" if passed.
Args:
config (ConfigNode): Configuration for the processor
Attributes:
answer_vocab (VocabDict): Class representing answer vocabulary
"""
DEFAULT_NUM_ANSWERS = 10
def __init__(self, config, *args, **kwargs):
self.writer = registry.get("writer")
if not hasattr(config, "vocab_file"):
raise AttributeError(
"'vocab_file' argument required, but not "
"present in AnswerProcessor's config"
)
self.answer_vocab = VocabDict(config.vocab_file, *args, **kwargs)
self.preprocessor = None
if hasattr(config, "preprocessor"):
self.preprocessor = Processor(config.preprocessor)
if self.preprocessor is None:
raise ValueError(
"No processor named {} is defined.".format(config.preprocessor)
)
if hasattr(config, "num_answers"):
self.num_answers = config.num_answers
else:
self.num_answers = self.DEFAULT_NUM_ANSWERS
warnings.warn(
"'num_answers' not defined in the config. "
"Setting to default of {}".format(self.DEFAULT_NUM_ANSWERS)
)
def __call__(self, item):
"""Takes in dict with answers or answers_tokens, and returns back
a dict with answers (processed), "answers_indices" which point to
indices of the answers if present and "answers_scores" which represent
VQA style scores for the answers.
Args:
item (Dict): Dict containing answers or answers_tokens
Returns:
Dict: Processed answers, indices and scores.
"""
tokens = None
if not isinstance(item, dict):
raise TypeError("'item' passed to processor must be a dict")
if "answer_tokens" in item:
tokens = item["answer_tokens"]
elif "answers" in item:
if self.preprocessor is None:
raise AssertionError(
"'preprocessor' must be defined if you "
"don't pass 'answer_tokens'"
)
tokens = [
self.preprocessor({"text": answer})["text"]
for answer in item["answers"]
]
else:
raise AssertionError(
"'answers' or 'answer_tokens' must be passed"
" to answer processor in a dict"
)
tokens = self._increase_to_ten(tokens)
answers_indices = torch.zeros(self.DEFAULT_NUM_ANSWERS, dtype=torch.long)
answers_indices.fill_(self.answer_vocab.get_unk_index())
for idx, token in enumerate(tokens):
answers_indices[idx] = self.answer_vocab.word2idx(token)
answers_scores = self.compute_answers_scores(answers_indices)
return {
"answers": tokens,
"answers_indices": answers_indices,
"answers_scores": answers_scores,
}
def get_vocab_size(self):
"""Get vocab size of the answer vocabulary. Can also include
soft copy dynamic answer space size.
Returns:
int: size of the answer vocabulary
"""
return self.answer_vocab.num_vocab
def get_true_vocab_size(self):
"""True vocab size can be different from normal vocab size in some cases
such as soft copy where dynamic answer space is added.
Returns:
int: True vocab size.
"""
return self.answer_vocab.num_vocab
def word2idx(self, word):
"""Convert a word to its index according to vocabulary
Args:
word (str): Word to be converted to index.
Returns:
int: Index of the word.
"""
return self.answer_vocab.word2idx(word)
def idx2word(self, idx):
"""Index to word according to the vocabulary.
Args:
idx (int): Index to be converted to the word.
Returns:
str: Word corresponding to the index.
"""
return self.answer_vocab.idx2word(idx)
def compute_answers_scores(self, answers_indices):
"""Generate VQA based answer scores for answers_indices.
Args:
answers_indices (torch.LongTensor): tensor containing indices of the answers
Returns:
torch.FloatTensor: tensor containing scores.
"""
scores = torch.zeros(self.get_vocab_size(), dtype=torch.float)
gt_answers = list(enumerate(answers_indices))
unique_answers = set(answers_indices.tolist())
for answer in unique_answers:
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == answer]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
avg_acc = sum(accs) / len(accs)
if answer != self.answer_vocab.UNK_INDEX:
scores[answer] = avg_acc
return scores
def _increase_to_ten(self, tokens):
while len(tokens) < self.DEFAULT_NUM_ANSWERS:
tokens += tokens[: self.DEFAULT_NUM_ANSWERS - len(tokens)]
return tokens
class PhocProcessor:
"""
Compute PHOC features from text tokens
"""
def __init__(self, config, *args, **kwargs):
self.max_length = config.max_length
self.config = config
self.PAD_INDEX = 0
self.PAD_TOKEN = "<pad>"
def _map_strings_to_indices(self, tokens):
length = min(len(tokens), self.max_length)
tokens = tokens[:length]
phoc_dim = 604
output = torch.full(
(self.max_length, phoc_dim),
fill_value=self.PAD_INDEX,
dtype=torch.float,
)
for idx, token in enumerate(tokens):
output[idx] = torch.from_numpy(build_phoc(token))
return output
def __call__(self, item):
indices = self._map_strings_to_indices(item["tokens"])
tokens, length = _pad_tokens(item["tokens"], self.PAD_TOKEN, self.max_length)
return {
"padded_phoc_features": indices,
"padded_tokens": tokens,
"length": length,
}
class CopyProcessor(BaseProcessor):
"""
Copy boxes from numpy array
"""
def __init__(self, config, *args, **kwargs):
self.max_length = config.max_length
def __call__(self, item):
blob = item["blob"]
final_blob = np.zeros((self.max_length,) + blob.shape[1:], blob.dtype)
final_blob[: len(blob)] = blob[: len(final_blob)]
return {"blob": torch.from_numpy(final_blob)}
def SpatialProcessor(pad_obj_ocr_bboxes):
adj_matrix = build_graph_using_normalized_boxes(
pad_obj_ocr_bboxes, distance_threshold=registry.distance_threshold
)
return adj_matrix
class BertTokenizerProcessor:
"""
Tokenize a text string with BERT tokenizer, using Tokenizer passed to the dataset.
"""
def __init__(self, config, tokenizer):
self.max_length = config.max_length
self.bert_tokenizer = tokenizer
# self.bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
assert self.bert_tokenizer.encode(self.bert_tokenizer.pad_token) == [0]
def get_vocab_size(self):
return self.bert_tokenizer.vocab_size
def __call__(self, item):
# [PAD] in self.bert_tokenizer is zero (as checked in assert above)
token_inds = torch.zeros(self.max_length, dtype=torch.long)
indices = self.bert_tokenizer.encode(item["question"], add_special_tokens=True)
indices = indices[: self.max_length]
token_inds[: len(indices)] = torch.tensor(indices)
token_num = torch.tensor(len(indices), dtype=torch.long)
tokens_mask = torch.zeros(self.max_length, dtype=torch.long)
tokens_mask[: len(indices)] = 1
results = {
"token_inds": token_inds,
"token_num": token_num,
"tokens_mask": tokens_mask,
}
return results
class M4CAnswerProcessor:
"""
Process a TextVQA answer for iterative decoding in SAM4C.
# (YK): Modified to activate logits of the same word in ocr/vocabulary in targets.
"""
def __init__(self, config, *args, **kwargs):
if config.vocab_type == "5k":
self.answer_vocab = VocabDict(
registry["Vocabs"]["vocab5k"], *args, **kwargs
)
elif config.vocab_type == "5k_stvqa":
self.answer_vocab = VocabDict(
registry["Vocabs"]["vocab5k_stvqa"], *args, **kwargs
)
else:
raise ValueError
self.PAD_IDX = self.answer_vocab.word2idx("<pad>")
self.BOS_IDX = self.answer_vocab.word2idx("<s>")
self.EOS_IDX = self.answer_vocab.word2idx("</s>")
self.UNK_IDX = self.answer_vocab.UNK_INDEX
registry.PAD_IDX = self.answer_vocab.word2idx("<pad>")
registry.BOS_IDX = self.answer_vocab.word2idx("<s>")
registry.EOS_IDX = self.answer_vocab.word2idx("</s>")
registry.UNK_IDX = self.answer_vocab.UNK_INDEX
registry.answer_vocab = self.answer_vocab
# make sure PAD_IDX, BOS_IDX and PAD_IDX are valid (not <unk>)
assert self.PAD_IDX != self.answer_vocab.UNK_INDEX
assert self.BOS_IDX != self.answer_vocab.UNK_INDEX
assert self.EOS_IDX != self.answer_vocab.UNK_INDEX
assert self.PAD_IDX == 0
self.num_answers = config.num_answers
self.max_ocr_tokens = config.max_ocr_tokens
self.max_copy_steps = config.max_copy_steps
assert self.max_copy_steps >= 1
def match_answer_to_vocab_ocr_seq(
self, answer, vocab2idx_dict, ocr2inds_dict, max_match_num=20
):
"""
Match an answer to a list of sequences of indices
each index corresponds to either a fixed vocabulary or an OCR token
(in the index address space, the OCR tokens are after the fixed vocab)
"""
num_vocab = len(vocab2idx_dict)
answer_words = answer.split()
answer_word_matches = []
for word in answer_words:
# match answer word to fixed vocabulary
matched_inds = []
if word in vocab2idx_dict:
matched_inds.append(vocab2idx_dict.get(word))
# match answer word to OCR
# we put OCR after the fixed vocabulary in the answer index space
# so add num_vocab offset to the OCR index
matched_inds.extend([num_vocab + idx for idx in ocr2inds_dict[word]])
if len(matched_inds) == 0:
return []
answer_word_matches.append(matched_inds)
# expand per-word matched indices into the list of matched sequences
if len(answer_word_matches) == 0:
return []
idx_seq_list = [()]
for matched_inds in answer_word_matches:
idx_seq_list = [
seq + (idx,) for seq in idx_seq_list for idx in matched_inds
]
if len(idx_seq_list) > max_match_num:
idx_seq_list = idx_seq_list[:max_match_num]
return idx_seq_list
def get_vocab_size(self):
answer_vocab_nums = self.answer_vocab.num_vocab
answer_vocab_nums += self.max_ocr_tokens
return answer_vocab_nums
def __call__(self, item):
answers = item["answers"]
item["context_tokens"] = item["context_tokens"][: self.max_ocr_tokens]
assert len(answers) == self.num_answers
assert len(self.answer_vocab) == len(self.answer_vocab.word2idx_dict)
# Step 1: calculate the soft score of ground-truth answers
gt_answers = list(enumerate(answers))
unique_answers = sorted(set(answers))
unique_answer_scores = [0] * len(unique_answers)
for idx, unique_answer in enumerate(unique_answers):
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [
item for item in other_answers if item[1] == unique_answer
]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
unique_answer_scores[idx] = sum(accs) / len(accs)
unique_answer2score = {
a: s for a, s in zip(unique_answers, unique_answer_scores)
}
# Step 2: fill the first step soft scores for tokens
scores = torch.zeros(
self.max_copy_steps, self.get_vocab_size(), dtype=torch.float
)
# match answers to fixed vocabularies and OCR tokens.
ocr2inds_dict = defaultdict(list)
for idx, token in enumerate(item["context_tokens"]):
ocr2inds_dict[token].append(idx)
answer_dec_inds = [
self.match_answer_to_vocab_ocr_seq(
a, self.answer_vocab.word2idx_dict, ocr2inds_dict
)
for a in answers
]
# Collect all the valid decoding sequences for each answer.
# This part (idx_seq_list) was pre-computed in imdb (instead of online)
# to save time
all_idx_seq_list = []
for answer, idx_seq_list in zip(answers, answer_dec_inds):
all_idx_seq_list.extend(idx_seq_list)
# fill in the soft score for the first decoding step
score = unique_answer2score[answer]
for idx_seq in idx_seq_list:
score_idx = idx_seq[0]
# the scores for the decoding Step 0 will be the maximum
# among all answers starting with that vocab
# for example:
# if "red apple" has score 0.7 and "red flag" has score 0.8
# the score for "red" at Step 0 will be max(0.7, 0.8) = 0.8
try:
scores[0, score_idx] = max(scores[0, score_idx], score)
except:
import pdb
pdb.set_trace()
# train_prev_inds is the previous prediction indices in auto-regressive
# decoding
train_prev_inds = torch.zeros(self.max_copy_steps, dtype=torch.long)
# train_loss_mask records the decoding steps where losses are applied
train_loss_mask = torch.zeros(self.max_copy_steps, dtype=torch.float)
train_acc_mask = torch.zeros(self.max_copy_steps, dtype=torch.float)
if len(all_idx_seq_list) > 0:
# sample a random decoding answer sequence for teacher-forcing
idx_seq = all_idx_seq_list[np.random.choice(len(all_idx_seq_list))]
dec_step_num = min(1 + len(idx_seq), self.max_copy_steps)
train_loss_mask[:dec_step_num] = 1.0
train_acc_mask[: dec_step_num - 1] = 1.0
train_prev_inds[0] = self.BOS_IDX
for t in range(1, dec_step_num):
train_prev_inds[t] = idx_seq[t - 1]
score_idx = idx_seq[t] if t < len(idx_seq) else self.EOS_IDX
# if item["question_id"] == 35909:
# import pdb
# pdb.set_trace()
# this means step 1:N have only one non-zero index
# this means there will be no case with EOS_IDX_SCORE and OTHER score non-zero together!
# gather indices from both ocr/vocabulary for the same word!
all_indices = self.get_all_indices(
ocr2inds_dict, item["context_tokens"], score_idx
)
assert self.UNK_IDX not in all_indices
for idx in all_indices:
scores[t, idx] = 1.0
# scores[t, score_idx] = 1.
else:
idx_seq = ()
answer_info = {
"answers": answers,
"targets": scores,
# 'sampled_idx_seq': [train_prev_inds.new(idx_seq)],
"train_prev_inds": train_prev_inds,
"train_loss_mask": train_loss_mask,
"train_acc_mask": train_acc_mask,
}
return answer_info
def get_all_indices(self, ocr2indices, ocr_tokens, score_idx):
return_indices = [score_idx]
if score_idx >= len(self.answer_vocab):
word = ocr_tokens[score_idx - len(self.answer_vocab)]
assert word != "<pad>"
vocab_idx = self.answer_vocab.word2idx(word)
if vocab_idx != self.UNK_IDX:
return_indices.append(vocab_idx)
else:
word = self.answer_vocab.idx2word(score_idx)
ocr_indices = [x + len(self.answer_vocab) for x in ocr2indices[word]]
return_indices.extend(ocr_indices)
return return_indices
class Processors:
"""
Contains static-processors used for processing question/ocr-tokens, image/ocr features,
decoding answer.
"""
def __init__(self, bert_tokenizer, vocab_type="4k", only_registry=False):
logger.info("Loading Processors")
logger.info(f"Vocab Type: {vocab_type}")
# decode-answers
answer_config = edict()
answer_config.max_copy_steps = 12
answer_config.num_answers = 10
answer_config.max_ocr_tokens = 50
answer_config.vocab_type = vocab_type
self.answer_processor = M4CAnswerProcessor(answer_config)
self.only_registry = only_registry
# Attach bert-tokenizer
registry["bert_tokenizer"] = bert_tokenizer
if only_registry:
logger.info("Only registry processor initialized")
return
# question
question_config = edict()
question_config.max_length = 20
self.bert_processor = BertTokenizerProcessor(question_config, bert_tokenizer)
# ocr-tokens
ocr_config = edict()
ocr_config.max_length = 50
self.fasttext_processor = FastTextProcessor(ocr_config)
self.phoc_processor = PhocProcessor(ocr_config)
@staticmethod
def word_cleaner(word):
word = word.lower()
word = word.replace(",", "").replace("?", "").replace("'s", " 's")
return word.strip()
@staticmethod
def word_cleaner_lower(word):
word = word.lower()
return word.strip()
|
py
|
1a576e398f683a788b39cd282447302867080fd5
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities related to layer/model functionality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.conv_utils import convert_kernel
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.get_source_inputs')
def get_source_inputs(tensor, layer=None, node_index=None):
"""Returns the list of input tensors necessary to compute `tensor`.
Output will always be a list of tensors
(potentially with 1 element).
Arguments:
tensor: The tensor to start from.
layer: Origin layer of the tensor. Will be
determined via tensor._keras_history if not provided.
node_index: Origin node index of the tensor.
Returns:
List of input tensors.
"""
if not hasattr(tensor, '_keras_history'):
return tensor
if layer is None or node_index:
layer, node_index, _ = tensor._keras_history
if not layer._inbound_nodes:
return [tensor]
else:
node = layer._inbound_nodes[node_index]
if not node.inbound_layers:
# Reached an Input layer, stop recursion.
return nest.flatten(node.input_tensors)
else:
source_tensors = []
for layer, node_index, _, tensor in node.iterate_inbound():
previous_sources = get_source_inputs(tensor, layer, node_index)
# Avoid input redundancy.
for x in previous_sources:
if all(x is not t for t in source_tensors):
source_tensors.append(x)
return source_tensors
def validate_string_arg(input_data,
allowable_strings,
layer_name,
arg_name,
allow_none=False,
allow_callables=False):
"""Validates the correctness of a string-based arg."""
if allow_none and input_data is None:
return
elif allow_callables and callable(input_data):
return
elif isinstance(input_data,
six.string_types) and input_data in allowable_strings:
return
else:
allowed_args = '`None`, ' if allow_none else ''
allowed_args += 'a `Callable`, ' if allow_callables else ''
allowed_args += 'or one of the following values: %s' % allowable_strings
raise ValueError(("%s's %s arg received an invalid value %s. " +
'Allowed values are %s.') %
(layer_name, arg_name, input_data, allowed_args))
def count_params(weights):
"""Count the total number of scalars composing the weights.
Arguments:
weights: An iterable containing the weights on which to compute params
Returns:
The total number of scalars composing the weights
"""
unique_weights = object_identity.ObjectIdentitySet(weights)
weight_shapes = [w.shape.as_list() for w in unique_weights]
standardized_weight_shapes = [
[0 if w_i is None else w_i for w_i in w] for w in weight_shapes
]
return int(sum(np.prod(p) for p in standardized_weight_shapes))
def print_summary(model, line_length=None, positions=None, print_fn=None):
"""Prints a summary of a model.
Arguments:
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
It defaults to `print` (prints to stdout).
"""
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
elif not model._is_graph_network:
# We treat subclassed models as a simple sequence of layers, for logging
# purposes.
sequential_like = True
else:
sequential_like = True
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
if (len(v) > 1) or (len(v) == 1 and
len(nest.flatten(v[0].inbound_layers)) > 1):
# if the model has multiple nodes
# or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
nodes += v
if sequential_like:
# search for shared layers
for layer in model.layers:
flag = False
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 98
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('Model: "{}"'.format(model.name))
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
"""Prints a summary for a single layer.
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
except RuntimeError: # output_shape unknown in Eager mode.
output_shape = '?'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer (including topological connections).
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound():
connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index,
tensor_index))
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [
name + ' (' + cls_name + ')', output_shape,
layer.count_params(), first_connection
]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
model._check_trainable_weights_consistency()
if hasattr(model, '_collected_trainable_weights'):
trainable_count = count_params(model._collected_trainable_weights)
else:
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def gather_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected trainable weights/variables.
"""
if not trainable:
return []
weights = []
for layer in sub_layers:
weights += layer.trainable_weights
trainable_extra_variables = [
v for v in extra_variables if v.trainable]
return weights + trainable_extra_variables
def gather_non_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the non-trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected non-trainable weights/variables.
"""
trainable_extra_variables = []
non_trainable_extra_variables = []
for v in extra_variables:
if v.trainable:
trainable_extra_variables.append(v)
else:
non_trainable_extra_variables.append(v)
weights = []
for layer in sub_layers:
weights += layer.non_trainable_weights
if not trainable:
trainable_weights = []
for layer in sub_layers:
trainable_weights += layer.trainable_weights
return (trainable_weights + trainable_extra_variables
+ weights + non_trainable_extra_variables)
return weights + non_trainable_extra_variables
@keras_export('keras.utils.convert_all_kernels_in_model')
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
Arguments:
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
Arguments:
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
def is_builtin_layer(layer):
if not getattr(layer, '_keras_api_names', None):
return False
# Subclasses of `Layer` that are not exported inherit the export name
# of the base layer class.
return (layer._keras_api_names != ('keras.layers.Layer',) and
layer._keras_api_names_v1 != ('keras.layers.Layer',))
|
py
|
1a576e7c4ff7aac4226ca7d6ee4502f71189e0e9
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.xml.dom
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.xml.dom import XElement as XElement
setattr(XElement, '__ooo_ns__', 'com.sun.star.xml.dom')
setattr(XElement, '__ooo_full_ns__', 'com.sun.star.xml.dom.XElement')
setattr(XElement, '__ooo_type_name__', 'interface')
else:
from ....lo.xml.dom.x_element import XElement as XElement
__all__ = ['XElement']
|
py
|
1a576f8032e7c4362ca0a1059b79e398e87f19fd
|
import vcf
import argparse
from pyfaidx import Fasta
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import MutableSeq
from Bio.Seq import Seq
parser = argparse.ArgumentParser(description='Extract ref sequence and variants for a cluster')
parser.add_argument('-f', help='the reference genome fasta', required=True)
parser.add_argument('-v', help='the input VCF file.', required=True)
parser.add_argument('-t', help='the TSV with cluster information.', required=True)
parser.add_argument('-c', help='the cluster to extract.', required=True)
parser.add_argument('-ov', help='the output VCF file', required=True)
parser.add_argument('-of', help='the output FASTA file', required=True)
args = parser.parse_args()
## extract cluster information from tsv
tsv_in = open(args.t, 'r')
chr_name = ''
start_pos = 0
end_pos = 0
svids = []
for line in tsv_in:
line = line.rstrip().split('\t')
if(line[0] == args.c):
chr_name = line[1]
start_pos = int(line[2])
end_pos = int(line[3])
svids = line[4].split(',')
# retrieve reference sequence of the region
# Open reference fasta
ref = Fasta(args.f)
reg_seq = ref[chr_name][start_pos:end_pos]
reg_seq = reg_seq.seq
# read vcf
vcfi = open(args.v, 'r')
vcf_reader = vcf.Reader(vcfi)
vcf_out = []
for record in vcf_reader:
# skip if not in variants of interest
if(record.ID not in svids):
continue
# make a VCF record
var_pos = record.POS - start_pos
rec = [chr_name, str(var_pos), record.ID, str(record.REF), str(record.ALT[0]),
'.', '.', '.']
rec = '\t'.join(rec)
vcf_out.append(rec)
vcfi.close()
# write VCF
# VCF header
vcf_h = '##fileformat=VCFv4.2\n'
vcf_h += '##contig=<ID={},length={}>\n'.format(chr_name, len(reg_seq))
vcf_h += '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n'
with open(args.ov, 'w') as outf:
outf.write(vcf_h + '\n'.join(vcf_out))
# write FASTA with the reference sequence
fa_rec = SeqRecord(MutableSeq(reg_seq.upper()), id=chr_name,
description='cl_' + args.c)
# write fasta
SeqIO.write(fa_rec, args.of, "fasta")
|
py
|
1a5770bad2e06e77370eece87f55cfc0eede61a8
|
#!/usr/bin/env conda run -n py27Env python2.7
# -*- coding: utf-8 -*-
"""
Hyperalign on one half of a hyperscanning task and look
for improvements in leave-one-out ISC in the other half.
"""
import numpy as np
import pickle
from mvpa2.suite import *
import time
import glob
import sys
sys.path.append('/dartfs-hpc/rc/lab/W/WheatleyT/f00589z/hyperscanning/support_scripts/')
from phaseScramble_2 import *
from scipy import stats
def main():
print('\nlets hyperalign\n')
# define hyperscanning task descriptions
taskDescrips = ['storytelling_independent',
'storytelling_joint',
'listening',
'reading']
# parameters
debug = False
task = 4 # see task descriptions above
radius = 3 # number of voxels in hyperalignment searchlight radius
sparse_radius = 3 # number of voxels between neighboring searchlight spheres
nproc = 10 # number of parallel processes to feed into hyperalignment function
# set dataset labels
dsLabels = ['train','test']
# set base folder
baseFolder = '/dartfs-hpc/rc/lab/W/WheatleyT/f00589z/hyperscanning/preprocessing/hyperalignment/'
if debug:
datasetFile = baseFolder + 'datasets/debug_' + taskDescrips[task]
else:
datasetFile = baseFolder + 'datasets/' + taskDescrips[task]
# load training and testing data
ds_all = h5load(datasetFile)
# get training and testing sample indices (half and half)
order = 0 # 0 = train on first half, test on second, 1 = the opposite
halfSampleNum = np.round(ds_all[0].nsamples / 2)
sampleInds = [[]] * 2
if order == 0:
sampleInds[0] = np.arange(halfSampleNum) # training sample indices
sampleInds[1] = np.arange(halfSampleNum,ds_all[0].nsamples,1) # testing sample indices
else:
sampleInds[0] = np.arange(halfSampleNum, ds_all[0].nsamples, 1) # training sample indices
sampleInds[1] = np.arange(halfSampleNum) # testing sample indices
# get number of subjects in full dataset
numSubs = len(ds_all)
# split up into training and testing datasets
ds = [[]] * 2 # initialize
for DS in range(len(ds)): # for each data set (0=training, 1=testing)
ds[DS] = [[]] * numSubs # initialize
for SUB in range(numSubs): # for each subject
ds[DS][SUB] = ds_all[SUB][sampleInds[DS],:]
ds[DS][SUB].samples = stats.zscore(ds[DS][SUB].samples, axis=0)
# verify that subject ID lists are identical between training and testing sets
# for each dataset...
EPIdata = [[]] * 2
corrData = [[]] * 2
medCorr = [[]] * 2
for DS in range(2):
# get number of subjects
numSubs = len(ds[DS])
# get EPI dimensions (samples x voxels)
dims = np.array(ds[DS][0].__array__().shape)
# initialize raw EPI data array
EPIdata[DS] = np.empty([dims[0], dims[1], len(ds[DS])])
# initialize pre-hyperalignment ISC coefficient array (subs x voxels)
corrData[DS] = np.empty([numSubs, dims[1]])
# for each subject...
for SUB in range(numSubs):
# get EPI data
EPIdata[DS][:,:,SUB] = ds[DS][SUB].__array__()
# for each subject...
for SUB in range(numSubs):
# get mean of data from all participants EXCEPT the current participant
otherSubs = np.arange(0, numSubs)
otherSubs = np.delete(otherSubs, SUB)
groupMean = np.mean(EPIdata[DS][:,:,otherSubs], axis=2)
# get correlation between current participant and groupMean
corrData[DS][SUB, :] = fastColumnCorr(EPIdata[DS][:,:,SUB], groupMean)
# get median ISC across participants
medCorr[DS] = np.median(corrData[DS], axis=0)
print('mean (across voxels) median (across subs) corr in ' + dsLabels[DS] + ' set BEFORE hyperalignment: ' + str(np.round(np.mean(medCorr[DS]),3)))
# we call SearchlightHyperalignment mostly with default values:
# each sphere has a radius of 3 voxels, sphere centers are also 3 voxels apart,
# all voxels in a given sphere are used for alignment
slhyper = SearchlightHyperalignment(radius=radius,
sparse_radius=sparse_radius,
nproc=nproc)
# call the hyperalignment object with the full dataset we have,
# resulting mappers will be stored in slhypmaps
slhyperStart = time.time()
slhypmaps = slhyper(ds[0])
print('\nHyperalignment took ' + str(time.time() - slhyperStart) + ' secs')
# compute post-hyperalignment metrics
ds_hyper = [[]] * 2
EPIdata_hyper = [[]] * 2
corrData_hyper = [[]] * 2
medCorr_hyper = [[]] * 2
for DS in range(2):
# Applying hyperalignment parameters is similar to applying any mapper in
# PyMVPA. We apply the hyperalignment parameters by running the dataset
# through the forward() function of the mapper.
ds_hyper[DS] = [h.forward(sd) for h, sd in zip(slhypmaps, ds[DS])]
# get EPI dimensions (samples x voxels)
dims = np.array(ds_hyper[DS][0].__array__().shape)
# initialize raw EPI data array
EPIdata_hyper[DS] = np.empty([dims[0], dims[1], len(ds_hyper[DS])])
# initialize pre-hyperalignment ISC coefficient array (subs x voxels)
corrData_hyper[DS] = np.empty([numSubs, dims[1]])
# for each subject...
for SUB in range(numSubs):
# get EPI data
EPIdata_hyper[DS][:, :, SUB] = ds_hyper[DS][SUB].__array__()
# for each subject...
for SUB in range(numSubs):
# get mean of data from all participants EXCEPT the current participant
otherSubs = np.arange(0, numSubs)
otherSubs = np.delete(otherSubs, SUB)
groupMean = np.mean(EPIdata_hyper[DS][:, :, otherSubs], axis=2)
# get correlation between current participant and groupMean
corrData_hyper[DS][SUB, :] = fastColumnCorr(EPIdata_hyper[DS][:, :, SUB], groupMean)
# get median ISC across participants
medCorr_hyper[DS] = np.median(corrData_hyper[DS], axis=0)
print('mean (across voxels) median (across subs) corr in ' + dsLabels[
DS] + ' set BEFORE hyperalignment: ' + str(np.round(np.mean(medCorr_hyper[DS]), 3)))
# save name
if task == 3:
saveFile = baseFolder + 'results/listening_5050'
else:
saveFile = baseFolder + 'results/reading_5050'
if debug:
saveFile = saveFile + '_debug'
print('saving files to: ')
print(saveFile + '_med_corr_pre_hyp')
print(saveFile + '_med_corr_post_hyp')
# save median correlation array
np.save(saveFile + '_med_corr_pre_hyp', medCorr)
np.save(saveFile + '_med_corr_post_hyp',medCorr_hyper)
# print('saving output...')
# with open(saveFile + '.pkl', 'wb') as f:
# pickle.dump([medCorr, medCorr_hyper], f, protocol=2)
# print('output saved here: ' + saveFile + '.pkl')
# save mapping
h5save(saveFile + '_hyperMappings.hd5', slhypmaps)
print('yay')
if __name__ == '__main__':
main()
|
py
|
1a5770c7ec7c6b2623b795c211b72090e8b9ad31
|
from datetime import date
from typing import Optional
from passlib.hash import pbkdf2_sha256
from pydantic import BaseModel, ValidationError, validator, root_validator, EmailStr, SecretStr
from pydantic.dataclasses import dataclass
from db import database, Users
from settings import SYMBOLS
class LoginValidationModel(BaseModel):
email: EmailStr
password: SecretStr
@root_validator
def password_length(cls, values):
email, password = values.get("email"), values.get("password")
user = database.fetch_one(query=Users.select().where(Users.c.email == email))
if not user or pbkdf2_sha256.verify(password.get_secret_value(), user.password):
raise ValueError(
"Please enter a correct username and password. Note that both fields may be case-sensitive."
)
return values
class RatesQueryValidationModel(BaseModel):
base: str = "EUR"
date: Optional[date]
symbols: Optional[list]
@validator("base")
def base_validation(cls, base):
if base not in list(SYMBOLS):
raise ValueError(f"Base currency {base} is not supported.")
return base
@validator("symbols", pre=True, whole=True)
def symbols_validation(cls, symbols):
symbols = symbols.split(",")
diff = list(set(symbols) - set(list(SYMBOLS)))
if diff:
raise ValueError(f"Symbols {', '.join(diff)} are not supported.")
return symbols
class RegistrationValidationModel(BaseModel):
email: EmailStr
password: SecretStr
@validator("password")
def password_length(cls, v):
if len(v.get_secret_value()) < 6:
raise ValueError("Password must be at least 6 characters long.")
return v
class VATValidationModel(BaseModel):
vat_number: str
@validator("vat_number")
def format_validation(cls, v):
# TODO: implement Regex validators
return v
class VatRatesModel(BaseModel):
vat_nu: str
|
py
|
1a5770eeba5926a069508f8f361791a96e9a5897
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Experimental Keras MNIST Example.
To test on CPU:
python mnist.py --use_tpu=False [--fake_data=true]
To test on TPU:
python mnist.py --use_tpu=True [--tpu=$TPU_NAME]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import distribute as contrib_distribute
# TODO(sourabhbajaj): Remove the need for this flag.
flags.DEFINE_bool('use_tpu', True,
'Ignored: preserved for backward compatibility.')
flags.DEFINE_string('tpu', '', 'Name of the TPU to use.')
flags.DEFINE_string(
'model_dir', None,
('The directory where the model and training/evaluation summaries '
'are stored. If unset, no summaries will be stored.'))
flags.DEFINE_bool('fake_data', False, 'Use fake data to test functionality.')
# Batch size should satify two properties to be able to run in cloud:
# num_eval_samples % batch_size == 0
# batch_size % 8 == 0
BATCH_SIZE = 200
NUM_CLASSES = 10
EPOCHS = 15
# input image dimensions
IMG_ROWS, IMG_COLS = 28, 28
FLAGS = flags.FLAGS
def mnist_model(input_shape):
"""Creates a MNIST model."""
model = tf.keras.models.Sequential()
model.add(
tf.keras.layers.Conv2D(
32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))
return model
def run():
"""Run the model training and return evaluation output."""
resolver = contrib_cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
contrib_distribute.initialize_tpu_system(resolver)
strategy = contrib_distribute.TPUStrategy(resolver, steps_per_run=100)
if FLAGS.fake_data:
print('Using fake data')
x_train = np.random.random((BATCH_SIZE, IMG_ROWS, IMG_COLS))
y_train = np.zeros([BATCH_SIZE, 1], dtype=np.int32)
x_test, y_test = x_train, y_train
else:
# the data, split between train and test sets
print('Using real data')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], IMG_ROWS, IMG_COLS, 1)
x_test = x_test.reshape(x_test.shape[0], IMG_ROWS, IMG_COLS, 1)
input_shape = (IMG_ROWS, IMG_COLS, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES)
with strategy.scope():
model = mnist_model(input_shape)
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05),
metrics=['accuracy'])
callbacks = []
if FLAGS.model_dir:
callbacks = [tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
callbacks=callbacks,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test, y_test))
return model.evaluate(x_test, y_test, batch_size=BATCH_SIZE, verbose=1)
def main(unused_dev):
score = run()
print('Loss for final step: %s;' % score[0])
print('Accuracy: %s;' % score[1])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
|
py
|
1a57713c7918e402956837e5d19d36525d768732
|
# Enter script code
keyboard.send_keys("<ctrl>+f")
|
py
|
1a5771a30a24bcdd8d034543a224a9cfb9bcc19f
|
from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['target', 'attachment', 'texture', 'level'])
def glFramebufferTextureOES(target, attachment, texture, level):
pass
|
py
|
1a5772974feb91e13b7cf5e01399e74d8c172b5c
|
'''
Extract info from a call to the clean task from CASA logs.
'''
import re
from itertools import izip
from datetime import datetime
from astropy import units as u
import numpy as np
from astropy.table import Table
# Define some strings for re
all_time_date = r"^[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}\s"
casa_datetime_format = r'%Y-%m-%d %H:%M:%S'
info = r"INFO\s"
warn = r"WARN\s"
err = r"ERROR\s"
numbers = r"[-+]?\d*\.\d+|\d+"
def collect_clean_results(log_files, filename=None, format='ascii.csv',
show_in_browser=False):
'''
Loop through the list of given log files, extract results from the clean
calls, and save as a csv file.
Parameters
----------
log_files : list or np.ndarray
List or array of the log file names.
filename : str, optional
Name of file to save with clean results. If None is given, no file is
saved.
format : str of filetype
Filetype to save the table as. See the list of writers available for
`~astropy.table` here:
`http://docs.astropy.org/en/stable/io/unified.html#built-in-readers-writers`_
show_in_browser : bool, optional
Displays the table in a web browser.
'''
results_dict = {"Name": [],
"Reached Threshold": [],
"Max Residual": [],
"Iterations": [],
"Time Elapsed": []}
for i, log in enumerate(log_files):
results = CleanResults(log)
try:
results.run_all()
# Extract units
bright_unit = results.max_residuals.unit
time_unit = results.time_elapsed.unit
results_dict["Name"].append(log.rstrip(".log"))
results_dict["Reached Threshold"].append(results.finished)
results_dict["Max Residual"].append(results.max_residuals.value)
results_dict["Iterations"].append(results.niters)
results_dict["Time Elapsed"].append(results.time_elapsed.value)
except Warning as e:
print("Failed for log: " + log)
print(e)
results_dict["Name"].append(log.rstrip(".log"))
results_dict["Reached Threshold"].append(False)
results_dict["Max Residual"].append(np.NaN)
results_dict["Iterations"].append(0)
results_dict["Time Elapsed"].append(np.NaN)
# Add units back on
results_dict["Max Residual"] *= bright_unit
results_dict["Time Elapsed"] *= time_unit
# Now gather into a table.
t = Table(results_dict.values(), names=results_dict.keys())
if filename is not None:
t.write(filename, format=format)
if show_in_browser:
t.show_in_browser()
class CleanResults(object):
"""
Read the results of running clean from a log file.
Parameters
----------
filename : str
Name of the log file to search.
"""
def __init__(self, filename):
self.filename = filename
self._lines = load_log(filename)
self._line_ranges = None
self._max_residuals = None
self._time_elapsed = None
@property
def lines(self):
return self._lines
def search_log(self, expression, view=None, return_linenum=True):
'''
Search through the log for a given expression.
Return the matched lines.
'''
re_express = re.compile(expression)
if view is None:
view = slice(None)
view = fill_in_slice(view, len(self.lines))
linenum_gen = xrange(view.start, view.stop, view.step)
matched_lines = []
matched_line_nums = []
for i, line in izip(linenum_gen, self.lines[view]):
search = re_express.search(line)
if search:
matched_lines.append(line)
if return_linenum:
matched_line_nums.append(i)
if not matched_lines:
Warning("No matches found.")
if len(matched_lines) == 1:
if return_linenum:
return matched_lines[0], matched_line_nums[0]
return matched_lines[0]
if return_linenum:
return zip(matched_lines, matched_line_nums)
return matched_lines
@property
def finished(self):
'''
Did CLEAN reach the given threshold?
'''
return self._finished_calls
def get_finished(self):
finish_re = all_time_date + info + \
"*MFMSCleanImageSkyModel::solve\s*Reached*"
if not self.line_ranges:
self.get_line_ranges()
if isinstance(self.line_ranges[0], int):
start, stop = self.line_ranges
finish_match = \
self.search_log(finish_re, view=slice(start, stop))
self._finished_calls = False if not finish_match else True
else:
finished_calls = []
for clean_range in self.line_ranges:
start, stop = clean_range
finish_match = self.search_log(finish_re,
view=slice(start, stop))
if not finish_match:
finished_calls.append(False)
else:
finished_calls.append(True)
self._finished_calls = finished_calls
@property
def line_ranges(self):
return self._line_ranges
def get_line_ranges(self):
'''
Find the beginning and end of CLEAN.
'''
start_re = all_time_date + info + \
"*clean::::.\s####.*Begin Task: clean.*"
stop_re = all_time_date + info + "*clean::::\s####.*End Task: clean.*"
start_search = self.search_log(start_re)
if start_search:
start_lines = start_search[1]
self._error = False
else:
raise Warning("Could not find CASA clean call in log.")
self._error = True
stop_search = self.search_log(stop_re)
if stop_search:
stop_lines = stop_search[1]
self._error = False
else:
Warning("Could not find end to clean call. "
"An error likely occurred in CASA. "
"Setting the end to the final log line.")
stop_lines = len(self.lines) - 1
self._error = True
# If they aren't equal, there was an error (no end line)
# Must be the last clean call, since casa always crashes
# in my experience.
try:
if len(start_lines) != len(stop_lines):
Warning("One of the CLEAN class failed.")
self._error = True
start_lines.pop(-1)
self._line_ranges = zip(start_lines, stop_lines)
except TypeError:
self._line_ranges = [start_lines, stop_lines]
self._error = False
@property
def error(self):
return self._error
@property
def time_elapsed(self):
return self._time_elapsed
def get_time_elapsed(self, output_unit=u.min):
'''
Find the time needed for CLEAN to run.
'''
if not self.line_ranges:
self.get_line_ranges()
if isinstance(self.line_ranges[0], int):
start, stop = self.line_ranges
start_time = datetime.strptime(casa_time(self.lines[start]),
casa_datetime_format)
stop_time = datetime.strptime(casa_time(self.lines[stop]),
casa_datetime_format)
self._time_elapsed = \
time_difference(start_time, stop_time, output_unit=output_unit)
else:
self._time_elapsed = []
for clean_range in self.line_ranges:
start, stop = clean_range
start_time = datetime.strptime(casa_time(self.lines[start]),
casa_datetime_format)
stop_time = datetime.strptime(casa_time(self.lines[stop]),
casa_datetime_format)
diff_time = \
time_difference(start_time, stop_time,
output_unit=output_unit)
self._time_elapsed.append(diff_time)
@property
def max_residuals(self):
return self._max_residuals
def get_max_residuals(self):
res_re = all_time_date + info + \
"*MFMSCleanImageSkyModel::solve\s*Final maximum*"
if not self.line_ranges:
self.get_line_ranges()
if isinstance(self.line_ranges[0], int):
start, stop = self.line_ranges
res_match = \
self.search_log(res_re, view=slice(start, stop))
if not res_match:
Warning("Could not find final residual value.")
self._max_residuals = np.NaN * u.Jy / u.beam
else:
self._max_residuals = \
float(re.findall(numbers, res_match[
0])[-1]) * u.Jy / u.beam
else:
self._max_residuals = []
for clean_range in self.line_ranges:
start, stop = clean_range
res_match = \
self.search_log(res_re, view=slice(start, stop))
if not res_match:
Warning("Could not find final residual value.")
self._max_residuals.append(np.NaN * u.Jy / u.beam)
else:
residual = \
float(re.findall(numbers, res_match)
[-1]) * u.Jy / u.beam
self._max_residuals.append(residual)
@property
def niters(self):
return self._niters
def get_niters(self):
iter_re = all_time_date + info + \
"*MFMSCleanImageSkyModel::solve\s*Clean used*"
if not self.line_ranges:
self.get_line_ranges()
if isinstance(self.line_ranges[0], int):
start, stop = self.line_ranges
iter_match = \
self.search_log(iter_re, view=slice(start, stop),
return_linenum=False)
if not iter_match:
Warning("Could not find number of iterations used.")
self._niters = np.NaN
else:
# Take the last one, since it is printed out for each
# major cycle.
if isinstance(iter_match, list):
last_match = iter_match[-1]
else:
last_match = iter_match
self._niters = \
int(re.findall(numbers, last_match)[-1])
else:
self._niters = []
for clean_range in self.line_ranges:
start, stop = clean_range
iter_match = \
self.search_log(iter_re, view=slice(start, stop),
return_linenum=False)
if not iter_match:
Warning("Could not find number of iterations used.")
self._niters.append(np.NaN)
else:
if isinstance(iter_match, list):
last_match = iter_match[-1]
else:
last_match = iter_match
iters = \
int(re.findall(numbers, last_match)[-1])
self._max_residuals.append(iters)
def run_all(self, time_unit=u.min):
self.get_line_ranges()
self.get_finished()
self.get_max_residuals()
self.get_time_elapsed(output_unit=time_unit)
self.get_niters()
def info_dict(self):
if isinstance(self.line_ranges[0], int):
return {"Finished": self.finished,
"Max Residual": self.max_residuals,
"Time Elapsed": self.time_elapsed,
"Iterations": self.niters}
else:
results_dicts = []
for i in xrange(len(self.line_ranges[0])):
results_dicts.append(
{"Finished": self.finished[i],
"Max Residual": self.max_residuals[i],
"Time Elapsed": self.time_elapsed[i],
"Iterations": self.niters[i]})
return results_dicts
def __repr__(self):
if isinstance(self.line_ranges[0], int):
return "Finished: " + str(self.finished) + "\nMax Residual: " + \
str(self.max_residuals) + "\nIterations: " + \
str(self.niters) + "\nTime Elapsed: " + \
str(self.time_elapsed.round(3))
else:
for i in xrange(len(self.line_ranges[0])):
return "Clean " + str(i + 1) + " Finished: " + \
str(self.finished[i]) + "\n Max Residual: " + \
str(self.max_residuals[i]) + "\n Iterations: " + \
str(self.niters[i]) + "\n Time Elapsed: " + \
str(self.time_elapsed[i].round(3))
def load_log(logfile):
'''
Load the lines of a log file in.
'''
with open(logfile) as f:
lines = f.readlines()
return lines
def fill_in_slice(view, list_len):
'''
To keep track of lines in the log, fill in
undefined slice parameters with defaults.
'''
if not view.start:
start = 0
else:
start = view.start
if not view.stop:
stop = list_len
else:
stop = view.stop
if not view.step:
step = 1
else:
step = view.step
return slice(start, stop, step)
def casa_time(line):
return line.split("\t")[0]
def time_difference(time1, time2, output_unit=u.min):
diff = time2 - time1
seconds_diff = diff.total_seconds() * u.s
return seconds_diff.to(output_unit)
|
py
|
1a5772d365aa53682e7a5bae579b5de5fb388bba
|
# Copyright 2019 ducandu GmbH, All Rights Reserved
# (this is a modified version of the Apache 2.0 licensed RLgraph file of the same name).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import tensorflow_probability as tfp
from surreal.components.distributions.distribution import Distribution
from surreal.utils import util
class Categorical(Distribution):
"""
A categorical distribution object defined by a n values {p0, p1, ...} that add up to 1, the probabilities
for picking one of the n categories.
"""
def parameterize_distribution(self, parameters):
return tfp.distributions.Categorical(logits=parameters, dtype=util.convert_dtype("int"))
def _sample_deterministic(self, distribution):
return tf.argmax(input=distribution.probs, axis=-1, output_type=util.convert_dtype("int"))
# IMPORTANT NOTE:
# Categorical.entropy calculates the Shannon entropy (- SUM(i) pi * log(pi)), but with the natural log (ln),
# rather than log2! This is documented incorrectly in the tfp documentation for the Categorical distribution.
|
py
|
1a5773407cdb709441e15873c48984d69ce900fc
|
################################################################################
# Copyright 2019 Noblis, Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
from __future__ import unicode_literals
import unittest
from unittest import TestCase
from ties.exceptions import _jsonschema_error_message
class _TestJsonSchemaValidationError(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
self.__setattr__(key, value)
class ExceptionsTests(TestCase):
def test_jsonschema_error_message_type_null_single_type(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='type', validator_value='string', instance=None, relative_path=['foo']))
self.assertEqual(error_message, 'property foo with null value should be of type string')
def test_jsonschema_error_message_type_single_type(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='type', validator_value='string', instance=1, relative_path=['foo']))
self.assertEqual(error_message, 'property type integer for property foo is not the allowed type: string')
def test_jsonschema_error_message_type_null_multiple_types(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='type', validator_value=['integer', 'string'], instance=None, relative_path=['foo']))
self.assertEqual(error_message, 'property foo with null value should be one of the allowed types: [integer, string]')
def test_jsonschema_error_message_type_multiple_types(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='type', validator_value=['integer', 'string'], instance=[], relative_path=['foo']))
self.assertEqual(error_message, 'property type array for property foo is not one of the allowed types: [integer, string]')
def test_jsonschema_error_message_required_single_property(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='required', validator_value=['foo', 'bar'], instance={'foo': None}, relative_path=[]))
self.assertEqual(error_message, 'required property bar is missing')
def test_jsonschema_error_message_required_multiple_properties(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='required', validator_value=['foo', 'bar'], instance={}, relative_path=[]))
self.assertEqual(error_message, 'required properties [bar, foo] are missing')
def test_jsonschema_error_message_additionalProperties_single_property(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='additionalProperties', schema={'properties': {'foo': None}}, instance={'foo': None, 'bar': None}, relative_path=[]))
self.assertEqual(error_message, 'additional property bar is not allowed')
def test_jsonschema_error_message_additionalProperties_multiple_properties(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='additionalProperties', schema={'properties': {}}, instance={'foo': None, 'bar': None}, relative_path=[]))
self.assertEqual(error_message, 'additional properties [bar, foo] are not allowed')
def test_jsonschema_error_message_minimum(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='minimum', validator_value=1, instance=0, relative_path=['foo']))
self.assertEqual(error_message, 'property value 0 for foo property is less than the minimum value of 1')
def test_jsonschema_error_message_maximum(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='maximum', validator_value=0, instance=1, relative_path=['foo']))
self.assertEqual(error_message, 'property value 1 for foo property is greater than the maximum value of 0')
def test_jsonschema_error_message_minLength(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='minLength', validator_value=1, instance='', relative_path=['foo']))
self.assertEqual(error_message, "property value '' for foo property is too short, minimum length 1")
def test_jsonschema_error_message_maxLength(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='maxLength', validator_value=0, instance=' ', relative_path=['foo']))
self.assertEqual(error_message, "property value ' ' for foo property is too long, maximum length 0")
def test_jsonschema_error_message_pattern(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='pattern', validator_value='^a$', instance='', relative_path=['foo']))
self.assertEqual(error_message, "property value '' for foo property does not match the pattern '^a$'")
def test_jsonschema_error_message_minItems(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='minItems', validator_value=1, instance=[], relative_path=['foo']))
self.assertEqual(error_message, 'array property foo with 0 items is too small, minimum size 1')
def test_jsonschema_error_message_maxItems(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='maxItems', validator_value=0, instance=[None], relative_path=['foo']))
self.assertEqual(error_message, 'array property foo with 1 items is too large, maximum size 0')
def test_jsonschema_error_message_uniqueItems(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='uniqueItems', instance=[None, None], relative_path=['foo']))
self.assertEqual(error_message, 'array property foo has duplicate items at index [0, 1]')
def test_jsonschema_error_message_unknown(self):
error_message = _jsonschema_error_message(_TestJsonSchemaValidationError(validator='UNKNOWN', message='an error message', relative_path=[]))
self.assertEqual(error_message, 'an error message')
if __name__ == '__main__':
unittest.main()
|
py
|
1a577349daba647b39a499bdc66be960cb74dc7c
|
def _max_len(choices):
"""Given a list of char field choices, return the field max length"""
lengths = [len(choice) for choice, _ in choices]
return max(lengths)
|
py
|
1a5773e71e7b840dfd23fe616c556bdbd6c223ef
|
from datetime import datetime
from pathlib import Path
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import utils
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
from ignite.engine import Engine, Events, create_supervised_evaluator
from ignite.handlers import Checkpoint, DiskSaver
from ignite.metrics import Accuracy, Loss
from ignite.utils import manual_seed, setup_logger
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="CIFAR10-Training", distributed_rank=local_rank)
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = f"stop-on-{config['stop_iteration']}"
folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}"
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info(f"Output path: {config['output_path']}")
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_clearml"]:
try:
from clearml import Task
except ImportError:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
task = Task.init("CIFAR10-Training", task_name=output_path.stem)
task.connect_configuration(config)
# Log hyper parameters
hyper_params = [
"model",
"batch_size",
"momentum",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
# Setup dataflow, model, optimizer, criterion
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Create trainer for current task
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = {
"accuracy": Accuracy(),
"loss": Loss(criterion),
}
# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation)
if rank == 0:
# Setup TensorBoard logging on trainer and evaluators. Logged values are:
# - Training metrics, e.g. running average loss values
# - Learning rate
# - Evaluation train/test metrics
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)
# Store 3 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=get_save_handler(config),
evaluator=evaluator,
models={"model": model},
metric_name="accuracy",
n_saved=3,
trainer=trainer,
tag="test",
)
# In order to check training resuming we can stop training on a given iteration
if config["stop_iteration"] is not None:
@trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"]))
def _():
logger.info(f"Stop training on {trainer.state.iteration} iteration")
trainer.terminate()
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
import traceback
print(traceback.format_exc())
if rank == 0:
tb_logger.close()
def run(
seed=543,
data_path="/tmp/cifar10",
output_path="/tmp/output-cifar10/",
model="resnet18",
batch_size=512,
momentum=0.9,
weight_decay=1e-4,
num_workers=12,
num_epochs=24,
learning_rate=0.4,
num_warmup_epochs=4,
validate_every=3,
checkpoint_every=200,
backend=None,
resume_from=None,
log_every_iters=15,
nproc_per_node=None,
stop_iteration=None,
with_clearml=False,
**spawn_kwargs,
):
"""Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
with_clearml (bool): if True, experiment ClearML logger is setup. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
# - Get train/test datasets
if idist.get_rank() > 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
if idist.get_rank() == 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {int(elapsed)} - {tag} metrics:\n {metrics_output}")
def log_basic_info(logger, config):
logger.info(f"Train {config['model']} on CIFAR10")
logger.info(f"- PyTorch version: {torch.__version__}")
logger.info(f"- Ignite version: {ignite.__version__}")
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info(f"\t{key}: {value}")
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info(f"\tbackend: {idist.backend()}")
logger.info(f"\tworld size: {idist.get_world_size()}")
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# Setup Ignite trainer:
# - let's define training step
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
def train_step(engine, batch):
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
model.train()
# Supervised part
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# This can be helpful for XLA to avoid performance slow down if fetch loss.item() every iteration
if config["log_every_iters"] > 0 and (engine.state.iteration - 1) % config["log_every_iters"] == 0:
batch_loss = loss.item()
engine.state.saved_batch_loss = batch_loss
else:
batch_loss = engine.state.saved_batch_loss
return {
"batch loss": batch_loss,
}
trainer = Engine(train_step)
trainer.state.saved_batch_loss = -1.0
trainer.state_dict_user_keys.append("saved_batch_loss")
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names if config["log_every_iters"] > 0 else None,
with_pbars=False,
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found"
logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}")
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def get_save_handler(config):
if config["with_clearml"]:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=config["output_path"])
return DiskSaver(config["output_path"], require_empty=False)
if __name__ == "__main__":
fire.Fire({"run": run})
|
py
|
1a57742da52feacd7434c503ebb45b96c529943e
|
"""
initialise.py file. Part of the StoryTechnologies project.
February 25, 2017
Brett Alistair Kromkamp ([email protected])
"""
import configparser
import os
from topicdb.core.store.topicstore import TopicStore
SETTINGS_FILE_PATH = os.path.join(os.path.dirname(__file__), "../settings.ini")
USER_IDENTIFIER_1 = 1
config = configparser.ConfigParser()
config.read(SETTINGS_FILE_PATH)
database_username = config["DATABASE"]["Username"]
database_password = config["DATABASE"]["Password"]
database_name = config["DATABASE"]["Database"]
database_host = config["DATABASE"]["Host"]
database_port = config["DATABASE"]["Port"]
# Instantiate and open topic store, create and subsequently populate topic maps
with TopicStore(
database_username,
database_password,
host=database_host,
port=database_port,
dbname=database_name,
) as store:
store.set_topic_map(
USER_IDENTIFIER_1,
"Bacon Ipsum Dolor",
"Bacon ipsum dolor amet in ham esse sirloin turducken kevin occaecat qui kielbasa eiusmod cow anim andouille proident pig. Laborum tail id tempor voluptate.",
)
# Populate topic maps (with pre-defined topics) for 'USER_IDENTIFIER_1'
for topic_map in store.get_topic_maps(USER_IDENTIFIER_1):
store.initialise_topic_map(topic_map.identifier)
|
py
|
1a57748d23166c6a162b2be0c71ecd7f1b2ba8b8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ************************************
# @Time : 2019/7/3 22:34
# @Author : Xiang Ling
# @Lab : nesa.zju.edu.cn
# @File : cfg_train.py
# ************************************
import numpy as np
import os
import torch
from datetime import datetime
from sklearn.metrics import auc, roc_curve
from cfg_config import cfg_args
from data import CFGDataset
from model.DenseGraphMatching import MultiLevelGraphMatchNetwork
from utils import create_dir_if_not_exists, write_log_file
from utils import generate_epoch_pair
class CFGTrainer(object):
def __init__(self, node_init_dims, data_dir, device, log_file, best_model_file, args):
super(CFGTrainer, self).__init__()
# training parameters
self.max_epoch = args.epochs
self.batch_size = args.batch_size
self.lr = args.lr
self.device = device
self.log_file = log_file
self.best_model_path = best_model_file
self.model = MultiLevelGraphMatchNetwork(node_init_dims=node_init_dims, arguments=args, device=device).to(device)
write_log_file(self.log_file, str(self.model))
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
cfg = CFGDataset(data_dir=data_dir, batch_size=self.batch_size)
self.graph_train = cfg.graph_train
self.classes_train = cfg.classes_train
self.epoch_data_valid = cfg.valid_epoch
self.epoch_data_test = cfg.test_epoch
init_val_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_valid) # evaluate the auc of init model for validation dataset
write_log_file(self.log_file, "Initial Validation AUC = {0} @ {1}".format(init_val_auc, datetime.now()))
def fit(self):
best_val_auc = None
for i in range(1, self.max_epoch + 1):
# train
loss_avg = self.train_one_epoch(model=self.model, optimizer=self.optimizer, graphs=self.graph_train, classes=self.classes_train, batch_size=self.batch_size,
device=self.device, load_data=None)
write_log_file(self.log_file, "EPOCH {0}/{1}:\tMSE loss = {2} @ {3}".format(i, self.max_epoch, loss_avg, datetime.now()))
# validation
valid_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_valid)
write_log_file(self.log_file, "Validation AUC = {0} @ {1}".format(valid_auc, datetime.now()))
# save the best validation
if best_val_auc is None or best_val_auc < valid_auc:
write_log_file(self.log_file, 'Validation AUC increased ({} ---> {}), and saving the model ... '.format(best_val_auc, valid_auc))
best_val_auc = valid_auc
torch.save(self.model.state_dict(), self.best_model_path)
write_log_file(self.log_file, 'Best Validation auc = {} '.format(best_val_auc))
return best_val_auc
def testing(self):
# load the last checkpoint with the best model
self.model.load_state_dict(torch.load(self.best_model_path))
self.model.eval()
# double check the save checkpoint model for validation
double_val_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_valid)
# evaluating on the testing dataset
final_test_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_test)
write_log_file(self.log_file, "\nDouble check for the saved best checkpoint model for validation {} ".format(double_val_auc))
write_log_file(self.log_file, "Finally, testing auc = {} @ {}".format(final_test_auc, datetime.now()))
return final_test_auc
@staticmethod
def train_one_epoch(model, optimizer, graphs, classes, batch_size, device, load_data=None):
model.train()
if load_data is None:
epoch_data = generate_epoch_pair(graphs, classes, batch_size)
else:
epoch_data = load_data
perm = np.random.permutation(len(epoch_data)) # Random shuffle
cum_loss = 0.0
num = 0
for index in perm:
cur_data = epoch_data[index]
x1, x2, adj1, adj2, y = cur_data
batch_output = model(batch_x_p=x1, batch_x_h=x2, batch_adj_p=adj1, batch_adj_h=adj2)
y = torch.FloatTensor(y).to(device)
mse_loss = torch.nn.functional.mse_loss(batch_output, y)
optimizer.zero_grad()
mse_loss.backward()
optimizer.step()
cum_loss += mse_loss
if num % int(len(perm) / 10) == 0:
print('\tTraining: {}/{}: index = {} loss = {}'.format(num, len(epoch_data), index, mse_loss))
num = num + 1
return cum_loss / len(perm)
@staticmethod
def eval_auc_epoch(model, eval_epoch_data):
model.eval()
with torch.no_grad():
tot_diff = []
tot_truth = []
for cur_data in eval_epoch_data:
x1, x2, adj1, adj2, y = cur_data
batch_output = model(batch_x_p=x1, batch_x_h=x2, batch_adj_p=adj1, batch_adj_h=adj2)
tot_diff += list(batch_output.data.cpu().numpy())
tot_truth += list(y > 0)
diff = np.array(tot_diff) * -1
truth = np.array(tot_truth)
fpr, tpr, _ = roc_curve(truth, (1 - diff) / 2)
model_auc = auc(fpr, tpr)
return model_auc
if __name__ == '__main__':
d = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg_args.gpu_index)
main_data_dir = cfg_args.data_dir
graph_name = cfg_args.dataset
graph_min = cfg_args.graph_size_min
graph_max = cfg_args.graph_size_max
graph_init_dim = cfg_args.graph_init_dim
# <-><-><-> only for log, delete below if open source
title = '{}_Min{}_Max{}'.format(graph_name, graph_min, graph_max)
main_log_dir = cfg_args.log_path + '{}_Min{}_Max{}_InitDims{}_Task_{}/'.format(graph_name, graph_min, graph_max, graph_init_dim, cfg_args.task)
create_log_str = create_dir_if_not_exists(main_log_dir)
best_model_dir = main_log_dir + 'BestModels_{}_{}_Repeat_{}/'.format(cfg_args.match_agg, cfg_args.global_agg, cfg_args.repeat_run)
create_BestModel_dir = create_dir_if_not_exists(best_model_dir)
LOG_FILE = main_log_dir + 'repeat_{}_'.format(cfg_args.repeat_run) + title + '.txt'
BestModel_FILE = best_model_dir + title + '.BestModel'
CSV_FILE = main_log_dir + title + '.csv'
# <-><-><-> only for log, delete above if open source
sub_data_dir = '{}_{}ACFG_min{}_max{}'.format(graph_name, graph_init_dim, graph_min, graph_max)
cfg_data_dir = os.path.join(main_data_dir, sub_data_dir) if 'ffmpeg' in sub_data_dir else os.path.join(main_data_dir, sub_data_dir, 'acfgSSL_6')
assert os.path.exists(cfg_data_dir), "the path of {} is not exist!".format(cfg_data_dir)
if cfg_args.only_test is True:
model_save_path = cfg_args.model_path
LOG_FILE = main_log_dir + 'OnlyTest_repeat_{}_'.format(cfg_args.repeat_run) + title + '.txt'
write_log_file(LOG_FILE, create_log_str)
write_log_file(LOG_FILE, create_BestModel_dir)
write_log_file(LOG_FILE, str(cfg_args))
cfg_trainer = CFGTrainer(node_init_dims=graph_init_dim, data_dir=cfg_data_dir, device=d, log_file=LOG_FILE, best_model_file=model_save_path, args=cfg_args)
ret_final_test_auc = cfg_trainer.testing()
else:
write_log_file(LOG_FILE, create_log_str)
write_log_file(LOG_FILE, create_BestModel_dir)
write_log_file(LOG_FILE, str(cfg_args))
cfg_trainer = CFGTrainer(node_init_dims=graph_init_dim, data_dir=cfg_data_dir, device=d, log_file=LOG_FILE, best_model_file=BestModel_FILE, args=cfg_args)
ret_best_val_auc = cfg_trainer.fit()
ret_final_test_auc = cfg_trainer.testing()
|
py
|
1a5775f7f49adb12eee643c6bac07ba52e283ffa
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DirectRunner, executing on the local machine.
The DirectRunner is a runner implementation that executes the entire
graph of transformations belonging to a pipeline on the local machine.
"""
from __future__ import absolute_import
import collections
import logging
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.runners.direct.bundle_factory import BundleFactory
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.utils.pipeline_options import DirectOptions
from apache_beam.utils.value_provider import RuntimeValueProvider
class DirectRunner(PipelineRunner):
"""Executes a single pipeline on the local machine."""
def __init__(self):
self._cache = None
def apply_CombinePerKey(self, transform, pcoll):
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.runners.direct.helper_transforms import LiftedCombinePerKey
try:
return pcoll | LiftedCombinePerKey(
transform.fn, transform.args, transform.kwargs)
except NotImplementedError:
return transform.expand(pcoll)
def run(self, pipeline):
"""Execute the entire pipeline and returns an DirectPipelineResult."""
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import \
ConsumerTrackingPipelineVisitor
from apache_beam.runners.direct.evaluation_context import EvaluationContext
from apache_beam.runners.direct.executor import Executor
from apache_beam.runners.direct.transform_evaluator import \
TransformEvaluatorRegistry
MetricsEnvironment.set_metrics_supported(True)
logging.info('Running pipeline with DirectRunner.')
self.visitor = ConsumerTrackingPipelineVisitor()
pipeline.visit(self.visitor)
evaluation_context = EvaluationContext(
pipeline.options,
BundleFactory(stacked=pipeline.options.view_as(DirectOptions)
.direct_runner_use_stacked_bundle),
self.visitor.root_transforms,
self.visitor.value_to_consumers,
self.visitor.step_names,
self.visitor.views)
evaluation_context.use_pvalue_cache(self._cache)
executor = Executor(self.visitor.value_to_consumers,
TransformEvaluatorRegistry(evaluation_context),
evaluation_context)
# Start the executor. This is a non-blocking call, it will start the
# execution in background threads and return.
if pipeline.options:
RuntimeValueProvider.set_runtime_options(pipeline.options._options_id, {})
executor.start(self.visitor.root_transforms)
result = DirectPipelineResult(executor, evaluation_context)
if self._cache:
# We are running in eager mode, block until the pipeline execution
# completes in order to have full results in the cache.
result.wait_until_finish()
self._cache.finalize()
# Unset runtime options after the pipeline finishes.
# TODO: Move this to a post finish hook and clean for all cases.
if pipeline.options:
RuntimeValueProvider.unset_runtime_options(pipeline.options._options_id)
return result
@property
def cache(self):
if not self._cache:
self._cache = BufferingInMemoryCache()
return self._cache.pvalue_cache
class BufferingInMemoryCache(object):
"""PValueCache wrapper for buffering bundles until a PValue is fully computed.
BufferingInMemoryCache keeps an in memory cache of
(applied_ptransform, tag) tuples. It accepts appending to existing cache
entries until it is finalized. finalize() will make all the existing cached
entries visible to the underyling PValueCache in their entirety, clean the in
memory cache and stop accepting new cache entries.
"""
def __init__(self):
self._cache = collections.defaultdict(list)
self._pvalue_cache = PValueCache()
self._finalized = False
@property
def pvalue_cache(self):
return self._pvalue_cache
def append(self, applied_ptransform, tag, elements):
assert not self._finalized
assert elements is not None
self._cache[(applied_ptransform, tag)].extend(elements)
def finalize(self):
"""Make buffered cache elements visible to the underlying PValueCache."""
assert not self._finalized
for key, value in self._cache.iteritems():
applied_ptransform, tag = key
self._pvalue_cache.cache_output(applied_ptransform, tag, value)
self._cache = None
class DirectPipelineResult(PipelineResult):
"""A DirectPipelineResult provides access to info about a pipeline."""
def __init__(self, executor, evaluation_context):
super(DirectPipelineResult, self).__init__(PipelineState.RUNNING)
self._executor = executor
self._evaluation_context = evaluation_context
def _is_in_terminal_state(self):
return self._state is not PipelineState.RUNNING
def wait_until_finish(self, duration=None):
if not self._is_in_terminal_state():
if duration:
raise NotImplementedError(
'DirectRunner does not support duration argument.')
try:
self._executor.await_completion()
self._state = PipelineState.DONE
except: # pylint: disable=broad-except
self._state = PipelineState.FAILED
raise
return self._state
def aggregated_values(self, aggregator_or_name):
return self._evaluation_context.get_aggregator_values(aggregator_or_name)
def metrics(self):
return self._evaluation_context.metrics()
class EagerRunner(DirectRunner):
is_eager = True
|
py
|
1a57765acc93de400133bbe1aa4f922455e42a69
|
# @date 17.05.2021.0
# @clock 22.50
# @author onur55-tr
from turtle import *
def my_goto(x,y):
penup()
goto(x,y)
pendown()
def gozler():
fillcolor('#ffffff')
begin_fill()
tracer(False)
a = 2.5
for i in range(120):
if 0 <= i < 30 or 60 <= i < 90:
a -= 0.05
lt(3)
fd(a)
else:
a += 0.05
lt(3)
fd(a)
tracer(True)
end_fill()
def sakal():
#left
my_goto(-27,135)
seth(165) #açı
fd(50) #mesafe
my_goto(-27,125)
seth(180)
fd(62)
my_goto(-27,115)
seth(195)
fd(50)
#right
my_goto(35,135)
seth(15)
fd(60)
my_goto(35,125)
seth(0)
fd(72)
my_goto(35,115)
seth(-12)
fd(60)
def mouth():
my_goto(5,148)
seth(270)
fd(100)
seth(0)
circle(110,50)
seth(230)
circle(-110,100)
def scarf():
fillcolor('#e70010')
begin_fill()
seth(0)
fd(190)
circle(-5,90)
fd(10)
circle(-5,90)
fd(207)
circle(-5,90)
fd(10)
circle(-5,90)
end_fill()
def nose():
my_goto(-10, 158)
seth(315)
fillcolor('#e70010')
begin_fill()
circle(20)
end_fill()
def siyah_gozler():
seth(0)
my_goto(-20,195)
fillcolor('#000000')
begin_fill()
circle(13)
end_fill()
pensize(6)
my_goto(20,205)
seth(75)
circle(-10,150)
pensize(3)
my_goto(-17,200)
seth(0)
fillcolor('#ffffff')
begin_fill()
circle(5)
end_fill()
my_goto(0,0)
def face():
fd(183)
lt(45)
fillcolor('#ffffff')
begin_fill()
circle(120, 100)
seth(180)
# print(pos())
fd(121)
pendown()
seth(210)
circle(120, 100)
end_fill()
my_goto(63.56,218.24)
seth(90)
gozler()
seth(180)
penup()
fd(60)
pendown()
seth(90)
gozler()
penup()
seth(180)
fd(64)
def head():
penup()
circle(150,40)
pendown()
fillcolor('#00a0de')
begin_fill()
circle(140,280)
end_fill()
def Doraemon():
head()
scarf()
face()
nose()
mouth()
sakal()
# 身体
my_goto(0, 0)
seth(0)
penup()
circle(150, 50)
pendown()
seth(30)
fd(40)
seth(70)
circle(-30, 270)
fillcolor('#00a0de')
begin_fill()
seth(230)
fd(80)
seth(90)
circle(1000, 1)
seth(-89)
circle(-1000, 10)
# print(pos())
seth(180)
fd(70)
seth(90)
circle(30, 180)
seth(180)
fd(70)
# print(pos())
seth(100)
circle(-1000, 9)
seth(-86)
circle(1000, 2)
seth(230)
fd(40)
# print(pos())
circle(-30, 230)
seth(45)
fd(81)
seth(0)
fd(203)
circle(5, 90)
fd(10)
circle(5, 90)
fd(7)
seth(40)
circle(150, 10)
seth(30)
fd(40)
end_fill()
seth(70)
fillcolor('#ffffff')
begin_fill()
circle(-30)
end_fill()
my_goto(103.74, -182.59)
seth(0)
fillcolor('#ffffff')
begin_fill()
fd(15)
circle(-15, 180)
fd(90)
circle(-15, 180)
fd(10)
end_fill()
my_goto(-96.26, -182.59)
seth(180)
fillcolor('#ffffff')
begin_fill()
fd(15)
circle(15, 180)
fd(90)
circle(15, 180)
fd(10)
end_fill()
my_goto(-133.97, -91.81)
seth(50)
fillcolor('#ffffff')
begin_fill()
circle(30)
end_fill()
my_goto(-103.42, 15.09)
seth(0)
fd(38)
seth(230)
begin_fill()
circle(90, 260)
end_fill()
my_goto(5, -40)
seth(0)
fd(70)
seth(-90)
circle(-70, 180)
seth(0)
fd(70)
my_goto(-103.42, 15.09)
fd(90)
seth(70)
fillcolor('#ffd200')
begin_fill()
circle(-20)
end_fill()
seth(170)
fillcolor('#ffd200')
begin_fill()
circle(-2, 180)
seth(10)
circle(-100, 22)
circle(-2, 180)
seth(180-10)
circle(100, 22)
end_fill()
goto(-13.42, 15.09)
seth(250)
circle(20, 110)
seth(90)
fd(15)
dot(10)
my_goto(0, -150)
siyah_gozler()
if __name__ == '__main__':
screensize(1000,600, "#f0f0f0")
pensize(4)
speed(8)
Doraemon()
my_goto(100,-300)
write('by onur55-tr', font=("Bradley Hand ITC", 30, "bold"))
mainloop()
|
py
|
1a5777398cbd149c4fb09164106d5eadaa10160f
|
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='odlt',
version='0.1',
description='Automation for importing and exporting packages of datasets, dashboards, and views in OmniSci Core / Immerse',
url='http://github.com/omnisci/odlt',
author='JP Harvey',
author_email='[email protected]',
license='Apache',
packages=['odlt'],
install_requires=[
'pymapd',
'pytest',
'markdown',
'boto3',
],
zip_safe=False)
|
py
|
1a5777c611a13526411c4333750d81534c581539
|
from .helpers import get_pylint_output, write_output
from ..automation_tools import read_json
import os, sys
# https://docs.pylint.org/features.html#general-options
def find(items, filename, coreonly):
enabled = ','.join(items)
print('Generating %s in all of pygsti%s. This should take less than a minute' %
(enabled, " (core only)" if coreonly else ""))
config = read_json('config/pylint_config.json')
commands = [config['pylint-version'],
'--disable=all',
'--enable=%s' % enabled,
'--rcfile=%s' % config['config-file'],
'--reports=n'] + (['pygsti'] if coreonly else config['packages'])
output = get_pylint_output(commands, filename) # implicitly puts to screen/saves to file
def look_for(args, coreonly=True):
if len(args) == 0:
print('Please supply a filename and list of things to check for. (see https://docs.pylint.org/features.html#general-options)')
sys.exit(1)
# If only one argument is supplied, assume it is both the filename and the itemname
elif len(sys.argv) == 2:
sys.argv.append(sys.argv[1])
find(sys.argv[2:], sys.argv[1], coreonly)
|
py
|
1a577854087d8ed49d83c5bd1d4c0683315c68ab
|
#!/usr/bin/env python
from setuptools import setup
setup(name='django-s3file', use_scm_version=True)
|
py
|
1a5778a17cf45375e94d971d170a62e0d9ebf0d5
|
import collections
import copy
import glob
import logging
import os
import pickle
import sys
import tarfile
import time
from io import BytesIO
from dxtbx.model.experiment_list import (
Experiment,
ExperimentList,
ExperimentListFactory,
)
from libtbx.phil import parse
from libtbx.utils import Abort, Sorry
import dials.util
from dials.array_family import flex
from dials.util import log
logger = logging.getLogger("dials.command_line.stills_process")
help_message = """
DIALS script for processing still images. Import, index, refine, and integrate are all done for each image
separately.
"""
control_phil_str = """
input {
file_list = None
.type = path
.help = Path to a text file with a list of images
glob = None
.type = str
.help = For large, multi-file datasets, specify the paths using wildcards (e.g. *.cbf)
.multiple = True
image_tag = None
.type = str
.multiple = True
.help = Only process images with these tag(s). For single-image files (like CBFs or SMVs), the image \
tag for each file is the file name. For multi-image files like HDF5, the image tag is \
filename_imagenumber (including leading zeros). Use show_image_tags=True to see the list \
of image tags that will be used for a dataset.
show_image_tags = False
.type = bool
.help = Show the set of image tags that would be used during processing. To process subsets of image \
files, use these tags with the image_tag parameter.
max_images = None
.type = int
.help = Limit total number of processed images to max_images
ignore_gain_mismatch = False
.type = bool
.expert_level = 3
.help = Detector gain should be set on the detector models loaded from the images or in the \
processing parameters, not both. Override the check that this is true with this flag. \
}
dispatch {
pre_import = False
.type = bool
.expert_level = 2
.help = If True, before processing import all the data. Needed only if processing \
multiple multi-image files at once (not a recommended use case)
process_percent = None
.type = int(value_min=1, value_max=100)
.help = Percent of events to process
refine = False
.expert_level = 2
.type = bool
.help = If True, after indexing, refine the experimental models
squash_errors = True
.expert_level = 2
.type = bool
.help = If True, if an image fails to process, continue to the next image. \
otherwise, halt processing and show the error.
find_spots = True
.expert_level = 2
.type = bool
.help = Whether to do spotfinding. Needed for indexing/integration
index = True
.expert_level = 2
.type = bool
.help = Attempt to index images. find_spots also needs to be True for this to work
integrate = True
.expert_level = 2
.type = bool
.help = Integrate indexed images. Ignored if index=False or find_spots=False
coset = False
.expert_level = 2
.type = bool
.help = Within the integrate dispatcher, integrate a sublattice coset intended to represent \
negative control spots with no Bragg diffraction.
hit_finder{
enable = True
.type = bool
.help = Whether to do hitfinding. hit_finder=False: process all images
minimum_number_of_reflections = 16
.type = int
.help = If the number of strong reflections on an image is less than this, and \
the hitfinder is enabled, discard this image.
maximum_number_of_reflections = None
.type = int
.help = If specified, ignores images with more than this many number of reflections
}
}
output {
output_dir = .
.type = str
.help = Directory output files will be placed
composite_output = True
.type = bool
.help = If True, save one set of experiment/reflection files per process, where each is a \
concatenated list of all the successful events examined by that process. \
If False, output a separate experiment/reflection file per image (generates a \
lot of files).
logging_dir = None
.type = str
.help = Directory output log files will be placed
experiments_filename = None
.type = str
.help = The filename for output experiments. For example, %s_imported.expt
strong_filename = None
.type = str
.help = The filename for strong reflections from spot finder output. For example: \
%s_strong.refl
indexed_filename = %s_indexed.refl
.type = str
.help = The filename for indexed reflections.
refined_experiments_filename = %s_refined.expt
.type = str
.help = The filename for saving refined experimental models
integrated_filename = %s_integrated.refl
.type = str
.help = The filename for final integrated reflections.
integrated_experiments_filename = %s_integrated.expt
.type = str
.help = The filename for saving final experimental models.
coset_filename = %s_coset%d.refl
.type = str
.help = The filename for final coset reflections.
coset_experiments_filename = %s_coset%d.expt
.type = str
.help = The filename for saving final coset experimental models.
profile_filename = None
.type = str
.help = The filename for output reflection profile parameters
integration_pickle = int-%d-%s.pickle
.type = str
.help = Filename for cctbx.xfel-style integration pickle files
}
mp {
method = *multiprocessing sge lsf pbs mpi
.type = choice
.help = "The multiprocessing method to use"
nproc = 1
.type = int(value_min=1)
.help = "The number of processes to use."
composite_stride = None
.type = int
.help = For MPI, if using composite mode, specify how many ranks to \
aggregate data from. For example, if you have 100 processes, \
composite mode will output N*100 files, where N is the number \
of file types (expt, refl, etc). If you specify stride = 25, \
then each group of 25 process will send their results to 4 \
processes and only N*4 files will be created. Ideally, match \
stride to the number of processors per node.
debug
.expert_level = 2
{
cProfile = False
.type = bool
.help = Enable code profiling. Profiling file will be available in \
the debug folder. Use (for example) runsnake to visualize \
processing performance
output_debug_logs = True
.type = bool
.help = Whether to write debugging information for every image \
processed
}
}
"""
dials_phil_str = """
input {
reference_geometry = None
.type = str
.help = Provide an models.expt file with exactly one detector model. Data processing will use \
that geometry instead of the geometry found in the image headers.
sync_reference_geom = True
.type = bool
.help = ensures the reference hierarchy agrees with the image format
}
output {
shoeboxes = True
.type = bool
.help = Save the raw pixel values inside the reflection shoeboxes during spotfinding.
}
include scope dials.util.options.geometry_phil_scope
include scope dials.algorithms.spot_finding.factory.phil_scope
include scope dials.algorithms.indexing.indexer.phil_scope
indexing {
include scope dials.algorithms.indexing.lattice_search.basis_vector_search_phil_scope
}
include scope dials.algorithms.refinement.refiner.phil_scope
include scope dials.algorithms.integration.integrator.phil_scope
include scope dials.algorithms.profile_model.factory.phil_scope
include scope dials.algorithms.spot_prediction.reflection_predictor.phil_scope
include scope dials.algorithms.integration.stills_significance_filter.phil_scope
indexing {
stills {
method_list = None
.type = strings
.help = List of indexing methods. If indexing fails with first method, indexing will be \
attempted with the next, and so forth
}
}
integration {
include scope dials.algorithms.integration.kapton_correction.absorption_phil_scope
coset {
transformation = 6
.type = int(value_min=0, value_max=6)
.multiple = False
.help = The index number(s) of the modulus=2 sublattice transformation(s) used to produce distince coset results. \
0=Double a, 1=Double b, 2=Double c, 3=C-face centering, 4=B-face centering, 5=A-face centering, 6=Body centering \
See Sauter and Zwart, Acta D (2009) 65:553
}
integration_only_overrides {
trusted_range = None
.type = floats(size=2)
.help = "Override the panel trusted range (underload and saturation) during integration."
.short_caption = "Panel trusted range"
}
}
profile {
gaussian_rs {
parameters {
sigma_b_cutoff = 0.1
.type = float
.help = Maximum sigma_b before the image is rejected
}
}
}
"""
program_defaults_phil_str = """
indexing {
method = fft1d
}
refinement {
parameterisation {
auto_reduction {
min_nref_per_parameter = 1
action = fix
}
beam.fix = all
detector.fix = all
}
reflections {
weighting_strategy.override = stills
outlier.algorithm = null
}
}
integration {
integrator = stills
profile.fitting = False
background {
algorithm = simple
simple {
outlier.algorithm = plane
model.algorithm = linear2d
}
}
}
profile.gaussian_rs.min_spots.overall = 0
"""
phil_scope = parse(control_phil_str + dials_phil_str, process_includes=True).fetch(
parse(program_defaults_phil_str)
)
def do_import(filename, load_models=True):
logger.info("Loading %s", os.path.basename(filename))
experiments = ExperimentListFactory.from_filenames([filename], load_models=False)
if len(experiments) == 0:
try:
experiments = ExperimentListFactory.from_json_file(filename)
except ValueError:
raise Abort(f"Could not load {filename}")
if len(experiments) == 0:
raise Abort(f"Could not load {filename}")
from dxtbx.imageset import ImageSetFactory
all_experiments = ExperimentList()
for experiment in experiments:
# Convert from ImageSequence to ImageSet, if needed
imageset = ImageSetFactory.imageset_from_anyset(experiment.imageset)
for i in range(len(imageset)):
# Preserve original models if they were available (in the case of an image file
# they will not be, but in the case of a previously processed experiment list,
# then they may be available
expt = Experiment(
imageset=imageset[i : i + 1],
detector=experiment.detector,
beam=experiment.beam,
scan=experiment.scan,
goniometer=experiment.goniometer,
crystal=experiment.crystal,
)
if load_models:
expt.load_models()
all_experiments.append(expt)
return all_experiments
def sync_geometry(src, dest):
dest.set_local_frame(
src.get_local_fast_axis(), src.get_local_slow_axis(), src.get_local_origin()
)
if not src.is_panel():
for src_child, dest_child in zip(src, dest):
sync_geometry(src_child, dest_child)
class Script:
"""A class for running the script."""
def __init__(self):
"""Initialise the script."""
from dials.util.options import OptionParser
# The script usage
usage = "usage: dials.stills_process [options] [param.phil] filenames"
self.tag = None
self.reference_detector = None
# Create the parser
self.parser = OptionParser(usage=usage, phil=phil_scope, epilog=help_message)
def load_reference_geometry(self):
if self.params.input.reference_geometry is None:
return
try:
ref_experiments = ExperimentListFactory.from_json_file(
self.params.input.reference_geometry, check_format=False
)
except Exception:
try:
import dxtbx
img = dxtbx.load(self.params.input.reference_geometry)
except Exception:
raise Sorry(
"Couldn't load geometry file %s"
% self.params.input.reference_geometry
)
else:
self.reference_detector = img.get_detector()
else:
assert len(ref_experiments.detectors()) == 1
self.reference_detector = ref_experiments.detectors()[0]
def run(self, args=None):
"""Execute the script."""
from libtbx import easy_mp
try:
from mpi4py import MPI
except ImportError:
rank = 0
size = 1
else:
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
size = comm.Get_size() # size: number of processes running in this job
if rank == 0:
# Parse the command line
params, options, all_paths = self.parser.parse_args(
args, show_diff_phil=False, return_unhandled=True, quick_parse=True
)
if params.input.glob:
all_paths.extend(params.input.glob)
globbed = []
for p in all_paths:
globbed.extend(glob.glob(p))
all_paths = globbed
if not all_paths and params.input.file_list is not None:
all_paths.extend(
[path.strip() for path in open(params.input.file_list).readlines()]
)
if size > 1:
if rank == 0:
transmitted_info = params, options, all_paths
else:
transmitted_info = None
params, options, all_paths = comm.bcast(transmitted_info, root=0)
# Check we have some filenames
if not all_paths:
self.parser.print_help()
return
if params.mp.debug.cProfile:
import cProfile
self.pr = cProfile.Profile()
self.pr.enable()
print(f"Have {len(all_paths)} files")
# Mask validation
for mask_path in params.spotfinder.lookup.mask, params.integration.lookup.mask:
if mask_path is not None and not os.path.isfile(mask_path):
raise Sorry(f"Mask {mask_path} not found")
# Save the options
self.options = options
self.params = params
st = time.time()
if params.mp.method == "mpi":
# Configure the logging
if params.output.logging_dir is None:
logfile = None
else:
log_path = os.path.join(
params.output.logging_dir, "log_rank%04d.out" % rank
)
error_path = os.path.join(
params.output.logging_dir, "error_rank%04d.out" % rank
)
print(f"Redirecting stdout to {log_path}")
print(f"Redirecting stderr to {error_path}")
sys.stdout = open(log_path, "a")
sys.stderr = open(error_path, "a")
print("Should be redirected now")
logfile = os.path.join(
params.output.logging_dir, "info_rank%04d.out" % rank
)
log.config(verbosity=options.verbose, logfile=logfile)
else:
# Configure logging
log.config(verbosity=options.verbose, logfile="dials.process.log")
bad_phils = [f for f in all_paths if os.path.splitext(f)[1] == ".phil"]
if len(bad_phils) > 0:
self.parser.print_help()
logger.error(
"Error: the following phil files were not understood: %s",
", ".join(bad_phils),
)
return
# Log the diff phil
diff_phil = self.parser.diff_phil.as_str()
if diff_phil != "":
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
for abs_params in self.params.integration.absorption_correction:
if abs_params.apply:
if not (
self.params.integration.debug.output
and not self.params.integration.debug.separate_files
):
raise Sorry(
"Shoeboxes must be saved to integration intermediates to apply an absorption correction. "
+ "Set integration.debug.output=True, integration.debug.separate_files=False and "
+ "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes."
)
self.load_reference_geometry()
from dials.command_line.dials_import import ManualGeometryUpdater
update_geometry = ManualGeometryUpdater(params)
# Import stuff
logger.info("Loading files...")
pre_import = params.dispatch.pre_import or len(all_paths) == 1
if pre_import:
# Handle still imagesets by breaking them apart into multiple experiments
# Further handle single file still imagesets (like HDF5) by tagging each
# frame using its index
experiments = ExperimentList()
for path in sorted(all_paths):
experiments.extend(do_import(path, load_models=False))
indices = []
basenames = []
basename_counts = {}
split_experiments = []
for i, imageset in enumerate(experiments.imagesets()):
assert len(imageset) == 1
paths = imageset.paths()
indices.append(i)
basename = os.path.splitext(os.path.basename(paths[0]))[0]
basenames.append(basename)
if basename in basename_counts:
basename_counts[basename] += 1
else:
basename_counts[basename] = 1
split_experiments.append(experiments[i : i + 1])
tags = []
split_experiments2 = []
for i, basename in zip(indices, basenames):
if basename_counts[basename] > 1:
tag = "%s_%05d" % (basename, i)
else:
tag = basename
if (
not self.params.input.image_tag
or tag in self.params.input.image_tag
):
tags.append(tag)
split_experiments2.append(split_experiments[i])
split_experiments = split_experiments2
# Wrapper function
def do_work(i, item_list, processor=None, finalize=True):
if not processor:
processor = Processor(
copy.deepcopy(params), composite_tag="%04d" % i, rank=i
)
for item in item_list:
tag = item[0]
experiments = split_experiments[item[1]]
try:
assert len(experiments) == 1
experiment = experiments[0]
experiment.load_models()
imageset = experiment.imageset
update_geometry(imageset)
experiment.beam = imageset.get_beam()
experiment.detector = imageset.get_detector()
except RuntimeError as e:
logger.warning("Error updating geometry on item %s, %s", tag, e)
continue
if self.reference_detector is not None:
experiment = experiments[0]
if self.params.input.sync_reference_geom:
imageset = experiment.imageset
sync_geometry(
self.reference_detector.hierarchy(),
imageset.get_detector().hierarchy(),
)
experiment.detector = imageset.get_detector()
else:
experiment.detector = copy.deepcopy(self.reference_detector)
processor.process_experiments(tag, experiments)
imageset.clear_cache()
if finalize:
processor.finalize()
return processor
iterable = list(zip(tags, range(len(split_experiments))))
else:
basenames = collections.defaultdict(int)
sorted_paths = sorted(all_paths)
for filename in sorted_paths:
basename = os.path.splitext(os.path.basename(filename))[0]
basenames[basename] += 1
tags = []
all_paths2 = []
for i, (basename, count) in enumerate(basenames.items()):
if count > 1:
tag = "%s_%05d" % (basename, i)
else:
tag = basename
if (
not self.params.input.image_tag
or tag in self.params.input.image_tag
):
tags.append(tag)
all_paths2.append(sorted_paths[i])
all_paths = all_paths2
# Wrapper function
def do_work(i, item_list, processor=None, finalize=True):
if not processor:
processor = Processor(
copy.deepcopy(params), composite_tag="%04d" % i, rank=i
)
for item in item_list:
tag, filename = item
experiments = do_import(filename, load_models=True)
imagesets = experiments.imagesets()
if len(imagesets) == 0 or len(imagesets[0]) == 0:
logger.info("Zero length imageset in file: %s", filename)
return
if len(imagesets) > 1:
raise Abort(f"Found more than one imageset in file: {filename}")
if len(imagesets[0]) > 1:
raise Abort(
"Found a multi-image file. Run again with pre_import=True"
)
try:
update_geometry(imagesets[0])
experiment = experiments[0]
experiment.beam = imagesets[0].get_beam()
experiment.detector = imagesets[0].get_detector()
except RuntimeError as e:
logger.warning("Error updating geometry on item %s, %s", tag, e)
continue
if self.reference_detector is not None:
if self.params.input.sync_reference_geom:
imageset = experiments[0].imageset
sync_geometry(
self.reference_detector.hierarchy(),
imageset.get_detector().hierarchy(),
)
experiments[0].detector = imageset.get_detector()
else:
experiments[0].detector = copy.deepcopy(
self.reference_detector
)
processor.process_experiments(tag, experiments)
if finalize:
processor.finalize()
return processor
iterable = list(zip(tags, all_paths))
if params.input.max_images:
iterable = iterable[: params.input.max_images]
if params.input.show_image_tags:
print("Showing image tags for this dataset and exiting")
for tag, item in iterable:
print(tag)
return
# prepare fractions of process_percent, if given
process_fractions = None
if params.dispatch.process_percent:
import fractions
percent = params.dispatch.process_percent / 100
process_fractions = fractions.Fraction(percent).limit_denominator(100)
def process_this_event(nevent):
# nevent modulo the denominator gives us which fraction we're in
n_mod_denom = nevent % process_fractions.denominator
# compare the 0-indexed modulo against the 1-indexed numerator (intentionally not <=)
n_accept = n_mod_denom < process_fractions.numerator
return n_accept
# Process the data
if params.mp.method == "mpi":
if size <= 2: # client/server only makes sense for n>2
subset = [
item for i, item in enumerate(iterable) if (i + rank) % size == 0
]
do_work(rank, subset)
else:
processor = Processor(
copy.deepcopy(params), composite_tag="%04d" % rank, rank=rank
)
if rank == 0:
# server process
for item_num, item in enumerate(iterable):
if process_fractions and not process_this_event(item_num):
continue
print("Getting next available process")
rankreq = comm.recv(source=MPI.ANY_SOURCE)
print(f"Process {rankreq} is ready, sending {item[0]}\n")
comm.send(item, dest=rankreq)
# send a stop command to each process
print("MPI DONE, sending stops\n")
for rankreq in range(size - 1):
rankreq = comm.recv(source=MPI.ANY_SOURCE)
print("Sending stop to %d\n" % rankreq)
comm.send("endrun", dest=rankreq)
print("All stops sent.")
else:
# client process
while True:
# inform the server this process is ready for an event
print("Rank %d getting next task" % rank)
comm.send(rank, dest=0)
print("Rank %d waiting for response" % rank)
item = comm.recv(source=0)
if item == "endrun":
print("Rank %d received endrun" % rank)
break
print("Rank %d beginning processing" % rank)
try:
processor = do_work(rank, [item], processor, finalize=False)
except Exception as e:
print(
"Rank %d unhandled exception processing event" % rank,
str(e),
)
print("Rank %d event processed" % rank)
processor.finalize()
else:
from dxtbx.command_line.image_average import splitit
if params.mp.nproc == 1:
do_work(0, iterable)
else:
result = list(
easy_mp.multi_core_run(
myfunction=do_work,
argstuples=list(enumerate(splitit(iterable, params.mp.nproc))),
nproc=params.mp.nproc,
)
)
error_list = [r[2] for r in result]
if error_list.count(None) != len(error_list):
print(
"Some processes failed execution. Not all images may have processed. Error messages:"
)
for error in error_list:
if error is None:
continue
print(error)
# Total Time
logger.info("")
logger.info("Total Time Taken = %f seconds", time.time() - st)
if params.mp.debug.cProfile:
self.pr.disable()
self.pr.dump_stats(
os.path.join(
self.params.output.output_dir, "debug", "cpu_%d.prof" % comm.rank
)
)
class Processor:
def __init__(self, params, composite_tag=None, rank=0):
self.params = params
self.composite_tag = composite_tag
# The convention is to put %s in the phil parameter to add a tag to
# each output datafile. Save the initial templates here.
self.experiments_filename_template = params.output.experiments_filename
self.strong_filename_template = params.output.strong_filename
self.indexed_filename_template = params.output.indexed_filename
self.refined_experiments_filename_template = (
params.output.refined_experiments_filename
)
self.integrated_filename_template = params.output.integrated_filename
self.integrated_experiments_filename_template = (
params.output.integrated_experiments_filename
)
if params.dispatch.coset:
self.coset_filename_template = params.output.coset_filename
self.coset_experiments_filename_template = (
params.output.coset_experiments_filename
)
debug_dir = os.path.join(params.output.output_dir, "debug")
if not os.path.exists(debug_dir):
try:
os.makedirs(debug_dir)
except OSError:
pass # due to multiprocessing, makedirs can sometimes fail
assert os.path.exists(debug_dir)
self.debug_file_path = os.path.join(debug_dir, "debug_%d.txt" % rank)
write_newline = os.path.exists(self.debug_file_path)
if write_newline: # needed if the there was a crash
self.debug_write("")
if params.output.composite_output:
assert composite_tag is not None
self.all_imported_experiments = ExperimentList()
self.all_strong_reflections = flex.reflection_table()
self.all_indexed_experiments = ExperimentList()
self.all_indexed_reflections = flex.reflection_table()
self.all_integrated_experiments = ExperimentList()
self.all_integrated_reflections = flex.reflection_table()
self.all_int_pickle_filenames = []
self.all_int_pickles = []
self.all_coset_experiments = ExperimentList()
self.all_coset_reflections = flex.reflection_table()
self.setup_filenames(composite_tag)
def setup_filenames(self, tag):
# before processing, set output paths according to the templates
if (
self.experiments_filename_template is not None
and "%s" in self.experiments_filename_template
):
self.params.output.experiments_filename = os.path.join(
self.params.output.output_dir,
self.experiments_filename_template % ("idx-" + tag),
)
if (
self.strong_filename_template is not None
and "%s" in self.strong_filename_template
):
self.params.output.strong_filename = os.path.join(
self.params.output.output_dir,
self.strong_filename_template % ("idx-" + tag),
)
if (
self.indexed_filename_template is not None
and "%s" in self.indexed_filename_template
):
self.params.output.indexed_filename = os.path.join(
self.params.output.output_dir,
self.indexed_filename_template % ("idx-" + tag),
)
if (
self.refined_experiments_filename_template is not None
and "%s" in self.refined_experiments_filename_template
):
self.params.output.refined_experiments_filename = os.path.join(
self.params.output.output_dir,
self.refined_experiments_filename_template % ("idx-" + tag),
)
if (
self.integrated_filename_template is not None
and "%s" in self.integrated_filename_template
):
self.params.output.integrated_filename = os.path.join(
self.params.output.output_dir,
self.integrated_filename_template % ("idx-" + tag),
)
if (
self.integrated_experiments_filename_template is not None
and "%s" in self.integrated_experiments_filename_template
):
self.params.output.integrated_experiments_filename = os.path.join(
self.params.output.output_dir,
self.integrated_experiments_filename_template % ("idx-" + tag),
)
if (
self.params.dispatch.coset
and self.coset_filename_template is not None
and "%s" in self.coset_filename_template
):
self.params.output.coset_filename = os.path.join(
self.params.output.output_dir,
self.coset_filename_template
% ("idx-" + tag, self.params.integration.coset.transformation),
)
if (
self.params.dispatch.coset
and self.coset_experiments_filename_template is not None
and "%s" in self.coset_experiments_filename_template
):
self.params.output.coset_experiments_filename = os.path.join(
self.params.output.output_dir,
self.coset_experiments_filename_template
% ("idx-" + tag, self.params.integration.coset.transformation),
)
def debug_start(self, tag):
if not self.params.mp.debug.output_debug_logs:
return
import socket
self.debug_str = f"{socket.gethostname()},{tag}"
self.debug_str += ",%s,%s,%s\n"
self.debug_write("start")
def debug_write(self, string, state=None):
if not self.params.mp.debug.output_debug_logs:
return
from xfel.cxi.cspad_ana import cspad_tbx # XXX move to common timestamp format
ts = cspad_tbx.evt_timestamp() # Now
debug_file_handle = open(self.debug_file_path, "a")
if string == "":
debug_file_handle.write("\n")
else:
if state is None:
state = " "
debug_file_handle.write(self.debug_str % (ts, state, string))
debug_file_handle.close()
def process_experiments(self, tag, experiments):
if not self.params.output.composite_output:
self.setup_filenames(tag)
self.tag = tag
self.debug_start(tag)
if self.params.output.experiments_filename:
if self.params.output.composite_output:
self.all_imported_experiments.extend(experiments)
else:
experiments.as_json(self.params.output.experiments_filename)
# Do the processing
try:
self.pre_process(experiments)
except Exception as e:
print("Error in pre-process", tag, str(e))
self.debug_write("preprocess_exception", "fail")
if not self.params.dispatch.squash_errors:
raise
return
try:
if self.params.dispatch.find_spots:
self.debug_write("spotfind_start")
observed = self.find_spots(experiments)
else:
print("Spot Finding turned off. Exiting")
self.debug_write("data_loaded", "done")
return
except Exception as e:
print("Error spotfinding", tag, str(e))
self.debug_write("spotfinding_exception", "fail")
if not self.params.dispatch.squash_errors:
raise
return
try:
if self.params.dispatch.index:
if (
self.params.dispatch.hit_finder.enable
and len(observed)
< self.params.dispatch.hit_finder.minimum_number_of_reflections
):
print("Not enough spots to index", tag)
self.debug_write(f"not_enough_spots_{len(observed)}", "stop")
return
if (
self.params.dispatch.hit_finder.maximum_number_of_reflections
is not None
):
if (
self.params.dispatch.hit_finder.enable
and len(observed)
> self.params.dispatch.hit_finder.maximum_number_of_reflections
):
print("Too many spots to index - Possibly junk", tag)
self.debug_write(f"too_many_spots_{len(observed)}", "stop")
return
self.debug_write("index_start")
experiments, indexed = self.index(experiments, observed)
else:
print("Indexing turned off. Exiting")
self.debug_write(f"spotfinding_ok_{len(observed)}", "done")
return
except Exception as e:
print("Couldn't index", tag, str(e))
if not self.params.dispatch.squash_errors:
raise
self.debug_write(f"indexing_failed_{len(observed)}", "stop")
return
self.debug_write("refine_start")
try:
experiments, indexed = self.refine(experiments, indexed)
except Exception as e:
print("Error refining", tag, str(e))
self.debug_write(f"refine_failed_{len(indexed)}", "fail")
if not self.params.dispatch.squash_errors:
raise
return
try:
if self.params.dispatch.integrate:
self.debug_write("integrate_start")
integrated = self.integrate(experiments, indexed)
else:
print("Integration turned off. Exiting")
self.debug_write(f"index_ok_{len(indexed)}", "done")
return
except Exception as e:
print("Error integrating", tag, str(e))
self.debug_write(f"integrate_failed_{len(indexed)}", "fail")
if not self.params.dispatch.squash_errors:
raise
return
self.debug_write(f"integrate_ok_{len(integrated)}", "done")
def pre_process(self, experiments):
"""Add any pre-processing steps here"""
if not self.params.input.ignore_gain_mismatch:
g1 = self.params.spotfinder.threshold.dispersion.gain
g2 = self.params.integration.summation.detector_gain
gain = g1 if g1 is not None else g2
if gain is not None and gain != 1.0:
for detector in experiments.detectors():
for panel in detector:
if panel.get_gain() != 1.0 and panel.get_gain() != gain:
raise RuntimeError(
"""
The detector is reporting a gain of %f but you have also supplied a gain of %f. Since the detector gain is not 1.0, your supplied gain will be multiplicatively applied in addition to the detector's gain, which is unlikely to be correct. Please re-run, removing spotfinder.dispersion.gain and integration.summation.detector_gain from your parameters. You can override this exception by setting input.ignore_gain_mismatch=True."""
% (panel.get_gain(), gain)
)
def find_spots(self, experiments):
st = time.time()
logger.info("*" * 80)
logger.info("Finding Strong Spots")
logger.info("*" * 80)
# Find the strong spots
observed = flex.reflection_table.from_observations(
experiments, self.params, is_stills=True
)
# Reset z coordinates for dials.image_viewer; see Issues #226 for details
xyzobs = observed["xyzobs.px.value"]
for i in range(len(xyzobs)):
xyzobs[i] = (xyzobs[i][0], xyzobs[i][1], 0)
bbox = observed["bbox"]
for i in range(len(bbox)):
bbox[i] = (bbox[i][0], bbox[i][1], bbox[i][2], bbox[i][3], 0, 1)
if self.params.output.composite_output:
n = len(self.all_strong_reflections.experiment_identifiers())
for i, experiment in enumerate(experiments):
refls = observed.select(observed["id"] == i)
refls["id"] = flex.int(len(refls), n)
del refls.experiment_identifiers()[i]
refls.experiment_identifiers()[n] = experiment.identifier
self.all_strong_reflections.extend(refls)
n += 1
else:
# Save the reflections to file
logger.info("\n" + "-" * 80)
if self.params.output.strong_filename:
self.save_reflections(observed, self.params.output.strong_filename)
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return observed
def index(self, experiments, reflections):
from dials.algorithms.indexing.indexer import Indexer
st = time.time()
logger.info("*" * 80)
logger.info("Indexing Strong Spots")
logger.info("*" * 80)
params = copy.deepcopy(self.params)
# don't do scan-varying refinement during indexing
params.refinement.parameterisation.scan_varying = False
if hasattr(self, "known_crystal_models"):
known_crystal_models = self.known_crystal_models
else:
known_crystal_models = None
if params.indexing.stills.method_list is None:
idxr = Indexer.from_parameters(
reflections,
experiments,
known_crystal_models=known_crystal_models,
params=params,
)
idxr.index()
else:
indexing_error = None
for method in params.indexing.stills.method_list:
params.indexing.method = method
try:
idxr = Indexer.from_parameters(
reflections, experiments, params=params
)
idxr.index()
except Exception as e:
logger.info("Couldn't index using method %s", method)
if indexing_error is None:
if e is None:
e = Exception(f"Couldn't index using method {method}")
indexing_error = e
else:
indexing_error = None
break
if indexing_error is not None:
raise indexing_error
indexed = idxr.refined_reflections
experiments = idxr.refined_experiments
if known_crystal_models is not None:
filtered = flex.reflection_table()
for idx in set(indexed["miller_index"]):
sel = indexed["miller_index"] == idx
if sel.count(True) == 1:
filtered.extend(indexed.select(sel))
logger.info(
"Filtered duplicate reflections, %d out of %d remaining",
len(filtered),
len(indexed),
)
print(
"Filtered duplicate reflections, %d out of %d remaining"
% (len(filtered), len(indexed))
)
indexed = filtered
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return experiments, indexed
def refine(self, experiments, centroids):
if self.params.dispatch.refine:
from dials.algorithms.refinement import RefinerFactory
st = time.time()
logger.info("*" * 80)
logger.info("Refining Model")
logger.info("*" * 80)
refiner = RefinerFactory.from_parameters_data_experiments(
self.params, centroids, experiments
)
refiner.run()
experiments = refiner.get_experiments()
predicted = refiner.predict_for_indexed()
centroids["xyzcal.mm"] = predicted["xyzcal.mm"]
centroids["entering"] = predicted["entering"]
centroids = centroids.select(refiner.selection_used_for_refinement())
# Re-estimate mosaic estimates
from dials.algorithms.indexing.nave_parameters import NaveParameters
nv = NaveParameters(
params=self.params,
experiments=experiments,
reflections=centroids,
refinery=refiner,
graph_verbose=False,
)
nv()
acceptance_flags_nv = nv.nv_acceptance_flags
centroids = centroids.select(acceptance_flags_nv)
if self.params.output.composite_output:
if (
self.params.output.refined_experiments_filename
or self.params.output.indexed_filename
):
assert (
self.params.output.refined_experiments_filename is not None
and self.params.output.indexed_filename is not None
)
n = len(self.all_indexed_experiments)
self.all_indexed_experiments.extend(experiments)
for i, experiment in enumerate(experiments):
refls = centroids.select(centroids["id"] == i)
refls["id"] = flex.int(len(refls), n)
del refls.experiment_identifiers()[i]
refls.experiment_identifiers()[n] = experiment.identifier
self.all_indexed_reflections.extend(refls)
n += 1
else:
# Dump experiments to disk
if self.params.output.refined_experiments_filename:
experiments.as_json(self.params.output.refined_experiments_filename)
if self.params.output.indexed_filename:
self.save_reflections(centroids, self.params.output.indexed_filename)
if self.params.dispatch.refine:
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return experiments, centroids
def integrate(self, experiments, indexed):
st = time.time()
logger.info("*" * 80)
logger.info("Integrating Reflections")
logger.info("*" * 80)
indexed, _ = self.process_reference(indexed)
if self.params.integration.integration_only_overrides.trusted_range:
for detector in experiments.detectors():
for panel in detector:
panel.set_trusted_range(
self.params.integration.integration_only_overrides.trusted_range
)
if self.params.dispatch.coset:
from xfel.util.sublattice_helper import integrate_coset
integrate_coset(self, experiments, indexed)
# Get the integrator from the input parameters
logger.info("Configuring integrator from input parameters")
from dials.algorithms.integration.integrator import create_integrator
from dials.algorithms.profile_model.factory import ProfileModelFactory
# Compute the profile model
# Predict the reflections
# Match the predictions with the reference
# Create the integrator
experiments = ProfileModelFactory.create(self.params, experiments, indexed)
new_experiments = ExperimentList()
new_reflections = flex.reflection_table()
for expt_id, expt in enumerate(experiments):
if (
self.params.profile.gaussian_rs.parameters.sigma_b_cutoff is None
or expt.profile.sigma_b()
< self.params.profile.gaussian_rs.parameters.sigma_b_cutoff
):
refls = indexed.select(indexed["id"] == expt_id)
refls["id"] = flex.int(len(refls), len(new_experiments))
# refls.reset_ids()
del refls.experiment_identifiers()[expt_id]
refls.experiment_identifiers()[len(new_experiments)] = expt.identifier
new_reflections.extend(refls)
new_experiments.append(expt)
else:
logger.info(
"Rejected expt %d with sigma_b %f"
% (expt_id, expt.profile.sigma_b())
)
experiments = new_experiments
indexed = new_reflections
if len(experiments) == 0:
raise RuntimeError("No experiments after filtering by sigma_b")
logger.info("")
logger.info("=" * 80)
logger.info("")
logger.info("Predicting reflections")
logger.info("")
predicted = flex.reflection_table.from_predictions_multi(
experiments,
dmin=self.params.prediction.d_min,
dmax=self.params.prediction.d_max,
margin=self.params.prediction.margin,
force_static=self.params.prediction.force_static,
)
predicted.match_with_reference(indexed)
logger.info("")
integrator = create_integrator(self.params, experiments, predicted)
# Integrate the reflections
integrated = integrator.integrate()
# correct integrated intensities for absorption correction, if necessary
for abs_params in self.params.integration.absorption_correction:
if abs_params.apply:
if abs_params.algorithm == "fuller_kapton":
from dials.algorithms.integration.kapton_correction import (
multi_kapton_correction,
)
elif abs_params.algorithm == "kapton_2019":
from dials.algorithms.integration.kapton_2019_correction import (
multi_kapton_correction,
)
experiments, integrated = multi_kapton_correction(
experiments, integrated, abs_params.fuller_kapton, logger=logger
)()
if self.params.significance_filter.enable:
from dials.algorithms.integration.stills_significance_filter import (
SignificanceFilter,
)
sig_filter = SignificanceFilter(self.params)
filtered_refls = sig_filter(experiments, integrated)
accepted_expts = ExperimentList()
accepted_refls = flex.reflection_table()
logger.info(
"Removed %d reflections out of %d when applying significance filter",
len(integrated) - len(filtered_refls),
len(integrated),
)
for expt_id, expt in enumerate(experiments):
refls = filtered_refls.select(filtered_refls["id"] == expt_id)
if len(refls) > 0:
accepted_expts.append(expt)
refls["id"] = flex.int(len(refls), len(accepted_expts) - 1)
accepted_refls.extend(refls)
else:
logger.info(
"Removed experiment %d which has no reflections left after applying significance filter",
expt_id,
)
if len(accepted_refls) == 0:
raise Sorry("No reflections left after applying significance filter")
experiments = accepted_expts
integrated = accepted_refls
# Delete the shoeboxes used for intermediate calculations, if requested
if self.params.integration.debug.delete_shoeboxes and "shoebox" in integrated:
del integrated["shoebox"]
if self.params.output.composite_output:
if (
self.params.output.integrated_experiments_filename
or self.params.output.integrated_filename
):
assert (
self.params.output.integrated_experiments_filename is not None
and self.params.output.integrated_filename is not None
)
n = len(self.all_integrated_experiments)
self.all_integrated_experiments.extend(experiments)
for i, experiment in enumerate(experiments):
refls = integrated.select(integrated["id"] == i)
refls["id"] = flex.int(len(refls), n)
del refls.experiment_identifiers()[i]
refls.experiment_identifiers()[n] = experiment.identifier
self.all_integrated_reflections.extend(refls)
n += 1
else:
# Dump experiments to disk
if self.params.output.integrated_experiments_filename:
experiments.as_json(self.params.output.integrated_experiments_filename)
if self.params.output.integrated_filename:
# Save the reflections
self.save_reflections(
integrated, self.params.output.integrated_filename
)
self.write_integration_pickles(integrated, experiments)
from dials.algorithms.indexing.stills_indexer import (
calc_2D_rmsd_and_displacements,
)
rmsd_indexed, _ = calc_2D_rmsd_and_displacements(indexed)
log_str = f"RMSD indexed (px): {rmsd_indexed:f}\n"
for i in range(6):
bright_integrated = integrated.select(
(
integrated["intensity.sum.value"]
/ flex.sqrt(integrated["intensity.sum.variance"])
)
>= i
)
if len(bright_integrated) > 0:
rmsd_integrated, _ = calc_2D_rmsd_and_displacements(bright_integrated)
else:
rmsd_integrated = 0
log_str += (
"N reflections integrated at I/sigI >= %d: % 4d, RMSD (px): %f\n"
% (i, len(bright_integrated), rmsd_integrated)
)
for crystal_model in experiments.crystals():
if hasattr(crystal_model, "get_domain_size_ang"):
log_str += ". Final ML model: domain size angstroms: {:f}, half mosaicity degrees: {:f}".format(
crystal_model.get_domain_size_ang(),
crystal_model.get_half_mosaicity_deg(),
)
logger.info(log_str)
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return integrated
def write_integration_pickles(self, integrated, experiments, callback=None):
"""
Write a serialized python dictionary with integrated intensities and other information
suitible for use by cxi.merge or prime.postrefine.
@param integrated Reflection table with integrated intensities
@param experiments Experiment list. One integration pickle for each experiment will be created.
@param callback Deriving classes can use callback to make further modifications to the dictionary
before it is serialized. Callback should be a function with this signature:
def functionname(params, outfile, frame), where params is the phil scope, outfile is the path
to the pickle that will be saved, and frame is the python dictionary to be serialized.
"""
if not hasattr(self.params.output, "integration_pickle"):
return
if self.params.output.integration_pickle is not None:
from xfel.command_line.frame_extractor import ConstructFrame
# Split everything into separate experiments for pickling
for e_number, experiment in enumerate(experiments):
e_selection = integrated["id"] == e_number
reflections = integrated.select(e_selection)
frame = ConstructFrame(reflections, experiment).make_frame()
frame["pixel_size"] = experiment.detector[0].get_pixel_size()[0]
if not hasattr(self, "tag") or self.tag is None:
try:
# if the data was a file on disc, get the path
event_timestamp = os.path.splitext(
experiments[0].imageset.paths()[0]
)[0]
except NotImplementedError:
# if the data is in memory only, check if the reader set a timestamp on the format object
event_timestamp = (
experiment.imageset.reader().get_format(0).timestamp
)
event_timestamp = os.path.basename(event_timestamp)
if event_timestamp.find("shot-") == 0:
event_timestamp = os.path.splitext(event_timestamp)[
0
] # micromanage the file name
else:
event_timestamp = self.tag
if hasattr(self.params.output, "output_dir"):
outfile = os.path.join(
self.params.output.output_dir,
self.params.output.integration_pickle
% (e_number, event_timestamp),
)
else:
outfile = os.path.join(
os.path.dirname(self.params.output.integration_pickle),
self.params.output.integration_pickle
% (e_number, event_timestamp),
)
if callback is not None:
callback(self.params, outfile, frame)
if self.params.output.composite_output:
self.all_int_pickle_filenames.append(os.path.basename(outfile))
self.all_int_pickles.append(frame)
else:
with open(outfile, "wb") as fh:
pickle.dump(frame, fh, protocol=pickle.HIGHEST_PROTOCOL)
def process_reference(self, reference):
"""Load the reference spots."""
if reference is None:
return None, None
st = time.time()
assert "miller_index" in reference
assert "id" in reference
logger.info("Processing reference reflections")
logger.info(" read %d strong spots", len(reference))
mask = reference.get_flags(reference.flags.indexed)
rubbish = reference.select(~mask)
if mask.count(False) > 0:
reference.del_selected(~mask)
logger.info(" removing %d unindexed reflections", mask.count(True))
if len(reference) == 0:
raise Sorry(
"""
Invalid input for reference reflections.
Expected > %d indexed spots, got %d
"""
% (0, len(reference))
)
mask = reference["miller_index"] == (0, 0, 0)
if mask.count(True) > 0:
rubbish.extend(reference.select(mask))
reference.del_selected(mask)
logger.info(" removing %d reflections with hkl (0,0,0)", mask.count(True))
mask = reference["id"] < 0
if mask.count(True) > 0:
raise Sorry(
"""
Invalid input for reference reflections.
%d reference spots have an invalid experiment id
"""
% mask.count(True)
)
logger.info(" using %d indexed reflections", len(reference))
logger.info(" found %d junk reflections", len(rubbish))
logger.info(" time taken: %g", time.time() - st)
return reference, rubbish
def save_reflections(self, reflections, filename):
"""Save the reflections to file."""
st = time.time()
logger.info("Saving %d reflections to %s", len(reflections), filename)
reflections.as_file(filename)
logger.info(" time taken: %g", time.time() - st)
def finalize(self):
"""Perform any final operations"""
if self.params.output.composite_output:
if self.params.mp.composite_stride is not None:
assert self.params.mp.method == "mpi"
stride = self.params.mp.composite_stride
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
size = comm.Get_size() # size: number of processes running in this job
comm.barrier()
if rank % stride == 0:
subranks = [rank + i for i in range(1, stride) if rank + i < size]
for i in range(len(subranks)):
logger.info("Rank %d waiting for sender", rank)
(
sender,
imported_experiments,
strong_reflections,
indexed_experiments,
indexed_reflections,
integrated_experiments,
integrated_reflections,
coset_experiments,
coset_reflections,
int_pickles,
int_pickle_filenames,
) = comm.recv(source=MPI.ANY_SOURCE)
logger.info("Rank %d received data from rank %d", rank, sender)
def extend_with_bookkeeping(
src_expts, src_refls, dest_expts, dest_refls
):
n = len(dest_refls.experiment_identifiers())
src_refls["id"] += n
idents = src_refls.experiment_identifiers()
keys = idents.keys()
values = idents.values()
for key in keys:
del idents[key]
for i, key in enumerate(keys):
idents[key + n] = values[i]
dest_expts.extend(src_expts)
dest_refls.extend(src_refls)
if len(imported_experiments) > 0:
extend_with_bookkeeping(
imported_experiments,
strong_reflections,
self.all_imported_experiments,
self.all_strong_reflections,
)
if len(indexed_experiments) > 0:
extend_with_bookkeeping(
indexed_experiments,
indexed_reflections,
self.all_indexed_experiments,
self.all_indexed_reflections,
)
if len(integrated_experiments) > 0:
extend_with_bookkeeping(
integrated_experiments,
integrated_reflections,
self.all_integrated_experiments,
self.all_integrated_reflections,
)
if len(coset_experiments) > 0:
extend_with_bookkeeping(
coset_experiments,
coset_reflections,
self.all_coset_experiments,
self.all_coset_reflections,
)
self.all_int_pickles.extend(int_pickles)
self.all_int_pickle_filenames.extend(int_pickle_filenames)
else:
destrank = (rank // stride) * stride
logger.info(
"Rank %d sending results to rank %d",
rank,
(rank // stride) * stride,
)
comm.send(
(
rank,
self.all_imported_experiments,
self.all_strong_reflections,
self.all_indexed_experiments,
self.all_indexed_reflections,
self.all_integrated_experiments,
self.all_integrated_reflections,
self.all_coset_experiments,
self.all_coset_reflections,
self.all_int_pickles,
self.all_int_pickle_filenames,
),
dest=destrank,
)
self.all_imported_experiments = (
self.all_strong_reflections
) = (
self.all_indexed_experiments
) = (
self.all_indexed_reflections
) = (
self.all_integrated_experiments
) = (
self.all_integrated_reflections
) = (
self.all_coset_experiments
) = (
self.all_coset_reflections
) = self.all_int_pickles = self.all_integrated_reflections = []
# Dump composite files to disk
if (
len(self.all_imported_experiments) > 0
and self.params.output.experiments_filename
):
self.all_imported_experiments.as_json(
self.params.output.experiments_filename
)
if (
len(self.all_strong_reflections) > 0
and self.params.output.strong_filename
):
self.save_reflections(
self.all_strong_reflections, self.params.output.strong_filename
)
if (
len(self.all_indexed_experiments) > 0
and self.params.output.refined_experiments_filename
):
self.all_indexed_experiments.as_json(
self.params.output.refined_experiments_filename
)
if (
len(self.all_indexed_reflections) > 0
and self.params.output.indexed_filename
):
self.save_reflections(
self.all_indexed_reflections, self.params.output.indexed_filename
)
if (
len(self.all_integrated_experiments) > 0
and self.params.output.integrated_experiments_filename
):
self.all_integrated_experiments.as_json(
self.params.output.integrated_experiments_filename
)
if (
len(self.all_integrated_reflections) > 0
and self.params.output.integrated_filename
):
self.save_reflections(
self.all_integrated_reflections,
self.params.output.integrated_filename,
)
if self.params.dispatch.coset:
if (
len(self.all_coset_experiments) > 0
and self.params.output.coset_experiments_filename
):
self.all_coset_experiments.as_json(
self.params.output.coset_experiments_filename
)
if (
len(self.all_coset_reflections) > 0
and self.params.output.coset_filename
):
self.save_reflections(
self.all_coset_reflections, self.params.output.coset_filename
)
# Create a tar archive of the integration dictionary pickles
if len(self.all_int_pickles) > 0 and self.params.output.integration_pickle:
tar_template_integration_pickle = (
self.params.output.integration_pickle.replace("%d", "%s")
)
outfile = (
os.path.join(
self.params.output.output_dir,
tar_template_integration_pickle % ("x", self.composite_tag),
)
+ ".tar"
)
tar = tarfile.TarFile(outfile, "w")
for i, (fname, d) in enumerate(
zip(self.all_int_pickle_filenames, self.all_int_pickles)
):
string = BytesIO(pickle.dumps(d, protocol=2))
info = tarfile.TarInfo(name=fname)
info.size = string.getbuffer().nbytes
info.mtime = time.time()
tar.addfile(tarinfo=info, fileobj=string)
tar.close()
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
|
py
|
1a5779b25327b884487f5baedef5f43b9400358b
|
from django.contrib.auth import views as auth_views
from django.urls import path
from prometheus_client import Gauge
import vote.views
from management import views
from management.models import ElectionManager
from vote.models import Election, Session
app_name = 'management'
election_gauge = Gauge('wahlfang_election_count', 'Wahlfang Number of Elections')
election_gauge.set_function(lambda: Election.objects.all().count())
election_manager_gauge = Gauge('wahlfang_election_manager_count', 'Wahlfang Number of Election Managers')
election_manager_gauge.set_function(lambda: ElectionManager.objects.all().count())
session_gauge = Gauge('wahlfang_session_count', 'Wahlfang Number of Sessions')
session_gauge.set_function(lambda: Session.objects.all().count())
urlpatterns = [
path('', views.index, name='index'),
path('help', views.help_page, name='help'),
# Session
path('meeting/<int:pk>', views.session_detail, name='session'),
path('meeting/<int:pk>/settings', views.session_settings, name='session_settings'),
path('meeting/<int:pk>/delete_session', views.delete_session, name='delete_session'),
path('meeting/<int:pk>/add_voters', views.add_voters, name='add_voters'),
path('meeting/<int:pk>/add_tokens', views.add_tokens, name='add_tokens'),
path('meeting/<int:pk>/add_election', views.add_election, name='add_election'),
path('meeting/<int:pk>/print_token', views.print_token, name='print_token'),
path('meeting/<int:pk>/import_csv', views.import_csv, name='import_csv'),
path('meeting/<int:pk>/spectator', views.spectator, name='spectator'),
# Election
path('election/<int:pk>/add_application', views.election_upload_application, name='add_application'),
path('election/<int:pk>/edit/<int:application_id>', views.election_upload_application, name='edit_application'),
path('election/<int:pk>/edit/<int:application_id>/delete_application', views.election_delete_application,
name='delete_application'),
path('election/<int:pk>', views.election_detail, name='election'),
path('election/<int:pk>/delete_voter', views.delete_voter, name='delete_voter'),
path('election/<int:pk>/delete_election', views.delete_election, name='delete_election'),
path('election/<int:pk>/export_csv', views.export_csv, name='export_csv'),
# account management stuff
path('login', views.LoginView.as_view(), name='login'),
path('logout', auth_views.LogoutView.as_view(
next_page='management:login',
), name='logout')
]
|
py
|
1a577b4eaf3772f59bd6f7c8f7f9c19cfaa42d7a
|
#
# PySNMP MIB module MOBANetClocksV2-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MOBANetClocksV2-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:13:37 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
ObjectIdentity, Bits, ModuleIdentity, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Gauge32, MibIdentifier, iso, Counter64, TimeTicks, Integer32, Unsigned32, IpAddress, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Bits", "ModuleIdentity", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Gauge32", "MibIdentifier", "iso", "Counter64", "TimeTicks", "Integer32", "Unsigned32", "IpAddress", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
mbnscMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 13842, 6, 100))
mbnscMIB.setRevisions(('2014-06-17 12:02',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: mbnscMIB.setRevisionsDescriptions(('initial version of this module',))
if mibBuilder.loadTexts: mbnscMIB.setLastUpdated('201406171202Z')
if mibBuilder.loadTexts: mbnscMIB.setOrganization('Moser-Baer AG')
if mibBuilder.loadTexts: mbnscMIB.setContactInfo('Moser-Baer AG Spitalstr. 7 3454 Sumiswald [email protected]')
if mibBuilder.loadTexts: mbnscMIB.setDescription('This mib definition is used for all MOBATime network slave clocks.')
class MOBAAlarm64(TextualConvention, OctetString):
description = 'The alarm list is an array of bytes. The first byte defines the mask for the alarm bits 0..7. The last byte defines the mask for the alarm bits 56..63. Every byte has the range 0 ... 255 (binary).'
status = 'current'
displayHint = '1x1x1x1x.1x1x1x1x'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class MOBAFlags64(TextualConvention, OctetString):
description = 'The flags are an array of bytes. The first byte defines the mask for the status bits 0..7. The last byte defines the mask for the status bits 56..63. Every byte has the range 0 ... 255 (binary).'
status = 'current'
displayHint = '1x1x1x1x.1x1x1x1x'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class MOBANetworkName(TextualConvention, OctetString):
description = 'Fully qualified domain names or IP addresses in ASCII. IPv4 xxx.xxx.xxx.xxx IPV6 xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx name string max 40 chars'
status = 'current'
displayHint = '255a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(1, 40)
mobatime = MibIdentifier((1, 3, 6, 1, 4, 1, 13842))
mobaNetClocks = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6))
mobaNetClocksV2 = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2))
mbnscNet = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1))
mbnscNetGen = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1))
mbnscNetGenMAC = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNetGenMAC.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenMAC.setDescription('MAC-Address (xx xx xx xx xx xx)')
mbnscNetGenIPMode = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("both", 0), ("ipv4only", 1), ("ipv6only", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetGenIPMode.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenIPMode.setDescription('IP Mode: 0: IPv4 and IPv6 1: IPv4 only 2: IPv6 only')
mbnscNetGenIPNameserver = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetGenIPNameserver.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenIPNameserver.setDescription('IPv4 or IPv6 address (xxx.xxx.xxx.xxx or xxxx::xxxx:xxxx) of the nameserver')
mbnscNetGenTZClientPort = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetGenTZClientPort.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenTZClientPort.setDescription('Timezone client port. Default 65534')
mbnscNetGenConfigPort = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetGenConfigPort.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenConfigPort.setDescription('Configuration port. Default 65532')
mbnscNetGenSnmpMode = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNetGenSnmpMode.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenSnmpMode.setDescription('SNMP mode disabled 0: SNMP enabled (default) 1: SNMP disabled')
mbnscNetGenMulticastMode = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetGenMulticastMode.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenMulticastMode.setDescription('Multicast mode disabled 0: Multicast enabled (default) 1: Multicast disabled')
mbnscNetGenHostname = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetGenHostname.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenHostname.setDescription('Host name of the device')
mbnscNetGenCommMode = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNetGenCommMode.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenCommMode.setDescription('Communication / synchronisation mode 0=unicast 1=multicast (only for digital clocks)')
mbnscNetGenMCastGrpIP = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 10), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetGenMCastGrpIP.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenMCastGrpIP.setDescription('IP network address (xxx.xxx.xxx.xxx) of the multicast group IP e.g. 239.192.54.1 (only for digital clocks)')
mbnscNetGenConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetGenConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenConfigCmd.setDescription('Network configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscNetGenConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 1, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNetGenConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscNetGenConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscNetIPv4 = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 2))
mbnscNetIPv4Addr = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 2, 1), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetIPv4Addr.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv4Addr.setDescription('IP network address (xxx.xxx.xxx.xxx) of the network slave clock')
mbnscNetIPv4Mask = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 2, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetIPv4Mask.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv4Mask.setDescription('IP subnet mask (xxx.xxx.xxx.xxx)')
mbnscNetIPv4Gateway = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 2, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetIPv4Gateway.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv4Gateway.setDescription('IP network address (xxx.xxx.xxx.xxx) for the gateway')
mbnscNetIPv4DHCPMode = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 2, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetIPv4DHCPMode.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv4DHCPMode.setDescription('DHCP mode 0: DHCP disabled 1: DHCP enabled (default)')
mbnscNetIPv4ConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 2, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetIPv4ConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv4ConfigCmd.setDescription('Network configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscNetIPv4ConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 2, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNetIPv4ConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv4ConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscNetIPv6 = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 3))
mbnscNetIPv6AddrLocal = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 3, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNetIPv6AddrLocal.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv6AddrLocal.setDescription('IP V6 link local network address (xxxx:xxxx::xxxx)')
mbnscNetIPv6AddrAuto = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 3, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNetIPv6AddrAuto.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv6AddrAuto.setDescription('IP V6 network address by autoconfig (xxxx:xxxx::xxxx)')
mbnscNetIPv6AddrDHCP = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 3, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNetIPv6AddrDHCP.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv6AddrDHCP.setDescription('IP V6 network address by DHCPv6 (xxxx:xxxx::xxxx)')
mbnscNetIPv6AddrFix = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 3, 4), MOBANetworkName().subtype(subtypeSpec=ValueSizeConstraint(40, 40)).setFixedLength(40)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetIPv6AddrFix.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv6AddrFix.setDescription('IP V6 network address manual set (xxxx:xxxx::xxxx)')
mbnscNetIPv6Prefix = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 3, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetIPv6Prefix.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv6Prefix.setDescription('IPv6 subnet prefix for manual set IP address')
mbnscNetIPv6Gateway = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 3, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNetIPv6Gateway.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv6Gateway.setDescription('IP V6 network address of gateway (next hop) (xxxx:xxxx::xxxx)')
mbnscNetIPv6Config = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 3, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("both", 0), ("autoonly", 1), ("dhcpv6only", 2), ("none", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetIPv6Config.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv6Config.setDescription('Mode of IPv6 adress configuration 0: auto and fixed 1: autoconfig only 2: DHCPv6 only 3: none')
mbnscNetIPv6ConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 3, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNetIPv6ConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv6ConfigCmd.setDescription('Network configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscNetIPv6ConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 1, 3, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNetIPv6ConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscNetIPv6ConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscTime = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2))
mbnscTimeNTP1 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscTimeNTP1.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeNTP1.setDescription('String with IPv4 or IPv6 address (192.168.3.4 or FD03:2345:2345:2345::231) or DNS name to get NTP time information.')
mbnscTimeNTP2 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscTimeNTP2.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeNTP2.setDescription('String with IPv4 or IPv6 address (192.168.3.4 or FD03:2345:2345:2345::231) or DNS name to get NTP time information.')
mbnscTimeNTP3 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscTimeNTP3.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeNTP3.setDescription('String with IPv4 or IPv6 address (192.168.3.4 or FD03:2345:2345:2345::231) or DNS name to get NTP time information.')
mbnscTimeNTP4 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscTimeNTP4.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeNTP4.setDescription('String with IPv4 or IPv6 address (192.168.3.4 or FD03:2345:2345:2345::231) or DNS name to get NTP time information.')
mbnscTimeNTPcurrent = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeNTPcurrent.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeNTPcurrent.setDescription('IPv4 or IPv6 address of current NTP server')
mbnscTimeNTPpollIntervall = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(10, 999))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscTimeNTPpollIntervall.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeNTPpollIntervall.setDescription('NTP request intervall 10..999sec')
mbnscTimeDeviceTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 7), Unsigned32()).setUnits('Seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeDeviceTime.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeDeviceTime.setDescription('Actual device time (UTC). Seconds since 1.1.1970 00:00:00')
mbnscTimeLocOffset = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-720, 720))).setUnits('minutes (min)').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeLocOffset.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeLocOffset.setDescription('Offset to localt time in minutes (-720..720)')
mbnscTimeLastReception = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 9), Unsigned32()).setUnits('Seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeLastReception.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeLastReception.setDescription('Last received time (UTC). Seconds since 1.1.1970 00:00:00')
mbnscTimeConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscTimeConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeConfigCmd.setDescription('Time configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscTimeConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 2, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscTimeZone = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3))
mbnscTimeZoneVersion = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeZoneVersion.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneVersion.setDescription('Actual version internal time zone table')
mbnscTimeZoneNumber = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscTimeZoneNumber.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneNumber.setDescription('Time zone 0: UTC 1..64: See user manual, time zone table 65..128: user defined time zone 129..143: time zone server 1..15 254: automatic time zone - e.g. UTC 255: internal user defined time zone (SET only used for digital clocks)')
mbnscTimeZoneEntry1 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(18, 18)).setFixedLength(18)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeZoneEntry1.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneEntry1.setDescription('Time Zone entry -> see special definition')
mbnscTimeZoneEntry2 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(18, 18)).setFixedLength(18)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeZoneEntry2.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneEntry2.setDescription('Time Zone entry -> see special definition')
mbnscTimeZoneEntry3 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(18, 18)).setFixedLength(18)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeZoneEntry3.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneEntry3.setDescription('Time Zone entry -> see special definition')
mbnscTimeZoneEntry4 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(18, 18)).setFixedLength(18)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeZoneEntry4.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneEntry4.setDescription('Time Zone entry -> see special definition')
mbnscTimeZoneEntry5 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(18, 18)).setFixedLength(18)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeZoneEntry5.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneEntry5.setDescription('Time Zone entry -> see special definition')
mbnscTimeZoneEntry6 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(18, 18)).setFixedLength(18)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeZoneEntry6.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneEntry6.setDescription('Time Zone entry -> see special definition')
mbnscTimeZoneEntry7 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(18, 18)).setFixedLength(18)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeZoneEntry7.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneEntry7.setDescription('Time Zone entry -> see special definition')
mbnscTimeZoneConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscTimeZoneConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneConfigCmd.setDescription('Time zone configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscTimeZoneConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 3, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscTimeZoneConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscTimeZoneConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscMode = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4))
mbnscModeSwitchInfo = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscModeSwitchInfo.setStatus('current')
if mibBuilder.loadTexts: mbnscModeSwitchInfo.setDescription('Current switch settings (only analogue movements)')
mbnscModeDisplayBrightness = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeDisplayBrightness.setStatus('current')
if mibBuilder.loadTexts: mbnscModeDisplayBrightness.setDescription('Display brightness for digital clocks 1..30, A (Auto)')
mbnscModeDisplayFormat = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeDisplayFormat.setStatus('current')
if mibBuilder.loadTexts: mbnscModeDisplayFormat.setDescription('Display format 12h/24h for digital clocks')
mbnscModeDisplayAlternate = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeDisplayAlternate.setStatus('current')
if mibBuilder.loadTexts: mbnscModeDisplayAlternate.setDescription('Display alternating mode for digital clocks 1: time continuously 2: date cont. 3: temperature cont. (sequence 6 sec. time, 3 sec. date for ECO-DC) 4: stopwatch cont. (only DC clock) 5: sequence 6 sec. time, 3 sec. date (only DC clock) 6: sequence 8 sec. time, 3 sec. date, 3 sec. temperature (only DC clock)')
mbnscModeNTP = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2)))
if mibBuilder.loadTexts: mbnscModeNTP.setStatus('obsolete')
if mibBuilder.loadTexts: mbnscModeNTP.setDescription('NTP synchronisation mode for digital clocks')
mbnscModeIRlock = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeIRlock.setStatus('current')
if mibBuilder.loadTexts: mbnscModeIRlock.setDescription('IR autolock time for digital clocks (only DC clock) 1..60, U (autolock off)')
mbnscModeTimeDispZeros = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeTimeDispZeros.setStatus('current')
if mibBuilder.loadTexts: mbnscModeTimeDispZeros.setDescription('Leading zeros time display for digital clocks (only DC clock) 1: time with leading zero 2: time without leading zero')
mbnscModeDateDispZeros = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeDateDispZeros.setStatus('current')
if mibBuilder.loadTexts: mbnscModeDateDispZeros.setDescription('Leading zeros date display for digital clocks (only DC clock) 1: date with leading zero 2: date without leading zero')
mbnscModeTempUnit = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeTempUnit.setStatus('current')
if mibBuilder.loadTexts: mbnscModeTempUnit.setDescription('Temperature unit for digital clocks (only DC clock) C: Celsius F: Fahrenheit')
mbnscModeClockOpMode = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeClockOpMode.setStatus('current')
if mibBuilder.loadTexts: mbnscModeClockOpMode.setDescription('Operation mode for digital clocks (only DC clock) 0: normal mode 1: special mode 1 2: special mode 2')
mbnscModeNWParam = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2)))
if mibBuilder.loadTexts: mbnscModeNWParam.setStatus('obsolete')
if mibBuilder.loadTexts: mbnscModeNWParam.setDescription('Network parameter setting mode for digital clocks')
mbnscModeDispDerating = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeDispDerating.setStatus('current')
if mibBuilder.loadTexts: mbnscModeDispDerating.setDescription('Display current deration setting for digital clocks (see service menu definition)')
mbnscModeLightCorr = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeLightCorr.setStatus('current')
if mibBuilder.loadTexts: mbnscModeLightCorr.setDescription('Light measurement correction for digital clocks (see service menu definition)')
mbnscAdditionalDigitalClockModes = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30))
mbnscSensors = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 1))
mbnscSensorsTempActivation = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscSensorsTempActivation.setStatus('current')
if mibBuilder.loadTexts: mbnscSensorsTempActivation.setDescription('Temperature sensor activation 1: DS18B20 3: TME (only sensor 1) 4: TME (Sensors 1 and 2)')
mbnscSensorsTemp1IPAddr = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscSensorsTemp1IPAddr.setStatus('current')
if mibBuilder.loadTexts: mbnscSensorsTemp1IPAddr.setDescription('IP network address (xxx.xxx.xxx.xxx) of the sensor 1 (SET only used for digital clocks)')
mbnscSensorsTemp2IPAddr = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscSensorsTemp2IPAddr.setStatus('current')
if mibBuilder.loadTexts: mbnscSensorsTemp2IPAddr.setDescription('IP network address (xxx.xxx.xxx.xxx) of the sensor 2 (SET only used for digital clocks)')
mbnscSensorsConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 1, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscSensorsConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscSensorsConfigCmd.setDescription('Mode configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscSensorsConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 1, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscSensorsConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscSensorsConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscDA = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 2))
mbnscDASecondCircleDisplay = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDASecondCircleDisplay.setStatus('current')
if mibBuilder.loadTexts: mbnscDASecondCircleDisplay.setDescription('The second circle display mode 1: Accumulating ring of seconds marker 2: Take actual second from full shining circle step by step 3: Full shining circle, actual second off 4: Three shining seconds, the first is showing actual second 5: Two shining seconds, the first is showing actual second')
mbnscDAConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 2, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDAConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscDAConfigCmd.setDescription('Mode configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscDAConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 2, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscDAConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscDAConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscDK = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3))
mbnscDKFirstLanguage = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKFirstLanguage.setStatus('current')
if mibBuilder.loadTexts: mbnscDKFirstLanguage.setDescription('First language selection 1: Czech 2: Slovak 3: English 4: German 5: ... (see manual for full language list)')
mbnscDKSecondLanguage = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKSecondLanguage.setStatus('current')
if mibBuilder.loadTexts: mbnscDKSecondLanguage.setDescription('Second language selection 1: Czech 2: Slovak 3: English 4: German 5: ... (see manual for full language list)')
mbnscDKThirdLanguage = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKThirdLanguage.setStatus('current')
if mibBuilder.loadTexts: mbnscDKThirdLanguage.setDescription('Third language selection 1: Czech 2: Slovak 3: English 4: German 5: ... (see manual for full language list)')
mbnscDKTempUnitSecondLang = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKTempUnitSecondLang.setStatus('current')
if mibBuilder.loadTexts: mbnscDKTempUnitSecondLang.setDescription('Temperature units for second selected language C: Celsius F: Fahrenheit')
mbnscDKTempUnitThirdLang = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKTempUnitThirdLang.setStatus('current')
if mibBuilder.loadTexts: mbnscDKTempUnitThirdLang.setDescription('Temperature units for third selected language C: Celsius F: Fahrenheit')
mbnscDKAutoLangSwitchOver = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKAutoLangSwitchOver.setStatus('current')
if mibBuilder.loadTexts: mbnscDKAutoLangSwitchOver.setDescription('Language switch mode for automatic language switching over in one display alternating cycle A: all languages S: simple language')
mbnscDKNumOfCharsForWeekday = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKNumOfCharsForWeekday.setStatus('current')
if mibBuilder.loadTexts: mbnscDKNumOfCharsForWeekday.setDescription('Number of characters used for weekday names 2: two characters 3: three characters')
mbnscDKNamesFormatDisplay = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKNamesFormatDisplay.setStatus('current')
if mibBuilder.loadTexts: mbnscDKNamesFormatDisplay.setDescription('Weekday and month display format 1: first capital 2: all capitals')
mbnscDKTemp1DescriptEnable = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKTemp1DescriptEnable.setStatus('current')
if mibBuilder.loadTexts: mbnscDKTemp1DescriptEnable.setDescription('Display of description for first measured temperature N/Y')
mbnscDKTemp1Description = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 5))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKTemp1Description.setStatus('current')
if mibBuilder.loadTexts: mbnscDKTemp1Description.setDescription('Description text for first measured temperature')
mbnscDKTemp2DescriptEnable = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKTemp2DescriptEnable.setStatus('current')
if mibBuilder.loadTexts: mbnscDKTemp2DescriptEnable.setDescription('Display of description for second measured temperature N/Y')
mbnscDKTemp2Description = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 5))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKTemp2Description.setStatus('current')
if mibBuilder.loadTexts: mbnscDKTemp2Description.setDescription('Description text for second measured temperature')
mbnscDKWorldTimeZone1 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKWorldTimeZone1.setStatus('current')
if mibBuilder.loadTexts: mbnscDKWorldTimeZone1.setDescription('Index of time zone for world time zone 1 0: UTC 1..64: See user manual, time zone table 65..128: user defined time zone 129..143: time zone server 1..15 255: none (SET only used for digital clocks)')
mbnscDKWorldTimeZone1Description = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKWorldTimeZone1Description.setStatus('current')
if mibBuilder.loadTexts: mbnscDKWorldTimeZone1Description.setDescription('Description text for World time zone 1')
mbnscDKWorldTimeZone2 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKWorldTimeZone2.setStatus('current')
if mibBuilder.loadTexts: mbnscDKWorldTimeZone2.setDescription('Index of time zone for world time zone 2 0: UTC 1..64: See user manual, time zone table 65..128: user defined time zone 129..143: time zone server 1..15 255: none (SET only used for digital clocks)')
mbnscDKWorldTimeZone2Description = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 16), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKWorldTimeZone2Description.setStatus('current')
if mibBuilder.loadTexts: mbnscDKWorldTimeZone2Description.setDescription('Description text for World time zone 2')
mbnscDKWorldTimeZone3 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKWorldTimeZone3.setStatus('current')
if mibBuilder.loadTexts: mbnscDKWorldTimeZone3.setDescription('Index of time zone for world time zone 3 0: UTC 1..64: See user manual, time zone table 65..128: user defined time zone 129..143: time zone server 1..15 255: none (SET only used for digital clocks)')
mbnscDKWorldTimeZone3Description = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 18), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKWorldTimeZone3Description.setStatus('current')
if mibBuilder.loadTexts: mbnscDKWorldTimeZone3Description.setDescription('Description text for World time zone 3')
mbnscDKWorldTimeZone4 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKWorldTimeZone4.setStatus('current')
if mibBuilder.loadTexts: mbnscDKWorldTimeZone4.setDescription('Index of time zone for world time zone 4 0: UTC 1..64: See user manual, time zone table 65..128: user defined time zone 129..143: time zone server 1..15 255: none (SET only used for digital clocks)')
mbnscDKWorldTimeZone4Description = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 20), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKWorldTimeZone4Description.setStatus('current')
if mibBuilder.loadTexts: mbnscDKWorldTimeZone4Description.setDescription('Description text for World time zone 4')
mbnscDKWorldTimeZone5 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKWorldTimeZone5.setStatus('current')
if mibBuilder.loadTexts: mbnscDKWorldTimeZone5.setDescription('Index of time zone for world time zone 5 0: UTC 1..64: See user manual, time zone table 65..128: user defined time zone 129..143: time zone server 1..15 255: none (SET only used for digital clocks)')
mbnscDKWorldTimeZone5Description = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 22), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKWorldTimeZone5Description.setStatus('current')
if mibBuilder.loadTexts: mbnscDKWorldTimeZone5Description.setDescription('Description text for World time zone 5')
mbnscDKAutoTimeZoneSwitchOver = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 23), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKAutoTimeZoneSwitchOver.setStatus('current')
if mibBuilder.loadTexts: mbnscDKAutoTimeZoneSwitchOver.setDescription('World time switch mode for automatic time zone switching over in one display alternating cycle A: all world time zones S: simple world time zone')
mbnscDKConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscDKConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscDKConfigCmd.setDescription('Mode configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscDKConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 30, 3, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscDKConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscDKConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscModeConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscModeConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscModeConfigCmd.setDescription('Mode configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscModeConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscModeConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscModeConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscAdditionalInterfaceModes = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40))
mbnscNMI = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1))
mbnscNMIDCFCurrentLoop = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("off", 0), ("on", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNMIDCFCurrentLoop.setStatus('current')
if mibBuilder.loadTexts: mbnscNMIDCFCurrentLoop.setDescription('Operation mode of the DCF77 current loop. 0: OFF 1: ON')
mbnscNMILineDriver = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("off", 0), ("mobaline", 1), ("activdcf", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNMILineDriver.setStatus('current')
if mibBuilder.loadTexts: mbnscNMILineDriver.setDescription('Operation mode of the line driver. 0: OFF 1: MOBALine 2: Active DCF')
mbnscNMIMOBALineMode = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("command12h", 1), ("clockidcommand", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNMIMOBALineMode.setStatus('current')
if mibBuilder.loadTexts: mbnscNMIMOBALineMode.setDescription('Operation mode of MOBALine. 0: normal operation 1: 12:00 mode, sideclocks shows 12:00 position 2: clock-id mode, sideclocks shows their ids if configured')
mbnscNMIMOBALineMinuteHandMode = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("minutestep", 0), ("halfminutestep", 1), ("continuous", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNMIMOBALineMinuteHandMode.setStatus('current')
if mibBuilder.loadTexts: mbnscNMIMOBALineMinuteHandMode.setDescription('Minute hand mode operation of MOBALine. 0: minute step mode 1: half minute step mode 2: continuous mode')
mbnscNMIActiveDCFMode = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("mode1", 1), ("mode2", 2), ("mode3", 3), ("mode4", 4), ("mode5", 5), ("mode6", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNMIActiveDCFMode.setStatus('current')
if mibBuilder.loadTexts: mbnscNMIActiveDCFMode.setDescription('Operation mode of Active DCF. 1: Mode 1 2: Mode 2 3: Mode 3 4: Mode 4 5: Mode 5 6: Mode 6')
mbnscNMIConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscNMIConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscNMIConfigCmd.setDescription('General configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscNMIConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMIConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscNMIConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscNMISideClockState = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20))
mbnscNMISideClock1 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock1.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock1.setDescription('State of side clock 1 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock2 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock2.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock2.setDescription('State of side clock 2 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock3 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock3.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock3.setDescription('State of side clock 3 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock4 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock4.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock4.setDescription('State of side clock 4 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock5 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock5.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock5.setDescription('State of side clock 5 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock6 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock6.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock6.setDescription('State of side clock 6 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock7 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock7.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock7.setDescription('State of side clock 7 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock8 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock8.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock8.setDescription('State of side clock 8 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock9 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock9.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock9.setDescription('State of side clock 9 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock10 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock10.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock10.setDescription('State of side clock 10 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock11 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock11.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock11.setDescription('State of side clock 11 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscNMISideClock12 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 4, 40, 1, 20, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 3, 5))).clone(namedValues=NamedValues(("notconfigured", 0), ("clockok", 3), ("clockerror", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscNMISideClock12.setStatus('current')
if mibBuilder.loadTexts: mbnscNMISideClock12.setDescription('State of side clock 12 0: Clock not configured 3: Clock OK 5: Clock error')
mbnscGeneral = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5))
mbnscGeneralType = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 14))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscGeneralType.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralType.setDescription('Network slave clock type 1 Network Clock Interface NCI 2 Wireless Time Distribution WTD 868-T 3 NTP movement SEN 00 4 NTP movement NBU 190 5 Digital Clock ECO-DC 6 Digital Clock DC3 7 NTP movement SEN 40 8 NTP movement SAN 40 9 NTP digital clock DA 10 NTP calendar digital clock DK2 11 Network MOBALine Interface NMI 12 NTP clock TREND 13 NTP clock TREND double sided 14 NTP in- and outdoor digital clock DSC')
mbnscGeneralName = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscGeneralName.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralName.setDescription('Device name (identifier)')
mbnscGeneralFirmwareNumber = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscGeneralFirmwareNumber.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralFirmwareNumber.setDescription('Article number of the firmware.')
mbnscGeneralFirmwareVer = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscGeneralFirmwareVer.setReference('OFS_PROD_FIRMW_VER, 27, VARTYPE_STRING')
if mibBuilder.loadTexts: mbnscGeneralFirmwareVer.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralFirmwareVer.setDescription('Firmware version.')
mbnscGeneralStatus = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 5), MOBAFlags64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscGeneralStatus.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralStatus.setDescription('Network slave clock status (64bit). Internal state of the network slave clock. Used for maintenance only. Bit 00 : time ok Bit 01 : 12h position Bit 02 : Reserved Bit 03 : Reserved Bit 04 : Reserved Bit 05 : Reserved Bit 06 : Reserved Bit 07 : Reserved Bit 08 : Reserved Bit 09 : Reserved Bit 10 : Reserved Bit 11 : Reserved Bit 12 : Reserved Bit 13 : Reserved Bit 14 : Reserved Bit 15 : Reserved Bit 16 : Reserved Bit 17 : Reserved Bit 18 : Reserved Bit 19 : Reserved Bit 20 : Reserved Bit 21 : Reserved Bit 22 : Reserved Bit 23 : Reserved Bit 24 : Reserved Bit 25 : Reserved Bit 26 : Reserved Bit 27 : Reserved Bit 28 : Reserved Bit 29 : Reserved Bit 30 : Reserved Bit 31 : Reserved Bit32..63 not used')
mbnscGeneralAlarms = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 6), MOBAAlarm64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscGeneralAlarms.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralAlarms.setDescription('Alarm state of the network slave clock Bit00=Synchronization Bit01=Power supply Bit02=Slave (cascade) Bit03=Illumination Bit04=Hands position (sec) Bit05=Hands position (m/h) Bit06=Restart Bit07=CommError Bit08=Time zone Bit09=Reserved Bit10=Reserved Bit11=Reserved Bit12=Reserved Bit13=Reserved Bit14=Reserved Bit15=Reserved Bit16=Reserved Bit17=Reserved Bit18=Reserved Bit19=Reserved Bit20=Reserved Bit21=Reserved Bit22=Reserved Bit23=Reserved Bit24=Reserved Bit25=Reserved Bit26=Reserved Bit27=Reserved Bit28=Reserved Bit29=Reserved Bit30=Reserved Bit31=Reserved Bit32..Bit63=Device type specific error')
mbnscGeneralSlaveInfo1 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(26, 26)).setFixedLength(26)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscGeneralSlaveInfo1.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralSlaveInfo1.setDescription('Info from slave 1 Byte 0..5 MAC Byte 6..11 Slave local time Byte 12..17 SW-number Byte 18..21 SW-version Byte 22 state Byte 23 reserved Byte 24 error Byte 25 reserved')
mbnscGeneralSlaveInfo2 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(26, 26)).setFixedLength(26)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscGeneralSlaveInfo2.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralSlaveInfo2.setDescription('Info from slave 2 Byte 0..5 MAC Byte 6..11 Slave local time Byte 12..17 SW-number Byte 18..21 SW-version Byte 22 state Byte 23 reserved Byte 24 error Byte 25 reserved')
mbnscGeneralSlaveInfo3 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(26, 26)).setFixedLength(26)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscGeneralSlaveInfo3.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralSlaveInfo3.setDescription('Info from slave 3 Byte 0..5 MAC Byte 6..11 Slave local time Byte 12..17 SW-number Byte 18..21 SW-version Byte 22 state Byte 23 reserved Byte 24 error Byte 25 reserved')
mbnscGeneralConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscGeneralConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralConfigCmd.setDescription('General configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscGeneralConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 5, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscGeneralConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscGeneralConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscSupervision = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 6))
mbnscSNMPManager1 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 6, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscSNMPManager1.setStatus('current')
if mibBuilder.loadTexts: mbnscSNMPManager1.setDescription('String with IPv4 or IPv6 address (e.g 192.168.3.4 or FD03:2345:2345:2345::231) of the SNMP manager 1')
mbnscSNMPManager2 = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 6, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscSNMPManager2.setStatus('current')
if mibBuilder.loadTexts: mbnscSNMPManager2.setDescription('String with IPv4 or IPv6 address (e.g 192.168.3.4 or FD03:2345:2345:2345::231) of the SNMP manager 2')
mbnscSNMPTrapAliveMsgInterval = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 6, 3), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 1440), ))).setUnits('minutes (min)').setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscSNMPTrapAliveMsgInterval.setStatus('current')
if mibBuilder.loadTexts: mbnscSNMPTrapAliveMsgInterval.setDescription('Time interval to send the alive message to the trap listener stations. 0: Alive message disabled >0: Alive message enabled Range: 1 ... 1440 minutes')
mbnscSNMPConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 6, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscSNMPConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscSNMPConfigCmd.setDescription('SNMP / supervision services configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscSNMPConfigChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 6, 101), TimeTicks()).setUnits('Time ticks in 1/100th seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscSNMPConfigChangedTime.setStatus('current')
if mibBuilder.loadTexts: mbnscSNMPConfigChangedTime.setDescription('Time of the last configuration change.')
mbnscCommand = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 7))
mbnscCommand12Pos = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 7, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscCommand12Pos.setStatus('current')
if mibBuilder.loadTexts: mbnscCommand12Pos.setDescription('Mode of the analogue clock: 0=run, 1=12h position')
mbnscCommandSWReset = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 7, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscCommandSWReset.setStatus('current')
if mibBuilder.loadTexts: mbnscCommandSWReset.setDescription('Command SW reset if set to 1')
mbnscCommandFactorySetting = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 7, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscCommandFactorySetting.setStatus('current')
if mibBuilder.loadTexts: mbnscCommandFactorySetting.setDescription('Restore factory settings if set to 1')
mbnscCommandFirmwUpd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 7, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(10, 10)).setFixedLength(10)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscCommandFirmwUpd.setStatus('current')
if mibBuilder.loadTexts: mbnscCommandFirmwUpd.setDescription('Command to start FW update')
mbnscCommandExtContact = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 7, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mbnscCommandExtContact.setStatus('current')
if mibBuilder.loadTexts: mbnscCommandExtContact.setDescription('Set external contact')
mbnscCommandConfigCmd = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 7, 100), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mbnscCommandConfigCmd.setStatus('current')
if mibBuilder.loadTexts: mbnscCommandConfigCmd.setDescription('SNMP / supervision services configuration changed command variable. 0: Not defined 1: SNMP has changed the configuration (possible restart of device) 2: SNMP requests to restore the old configuration')
mbnscSnmpConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 9))
mbnscSnmpCurrentAlarmInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 9, 1))
mbnscTrapAlMsgErrorNr = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 9, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: mbnscTrapAlMsgErrorNr.setStatus('current')
if mibBuilder.loadTexts: mbnscTrapAlMsgErrorNr.setDescription('Error number of the trap message.')
mbnscTrapAlMsgErrorState = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 9, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: mbnscTrapAlMsgErrorState.setStatus('current')
if mibBuilder.loadTexts: mbnscTrapAlMsgErrorState.setDescription('Error state of the trap message.')
mbnscTrapAlMsgErrorTime = MibScalar((1, 3, 6, 1, 4, 1, 13842, 6, 2, 9, 1, 3), Unsigned32()).setUnits('Seconds').setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: mbnscTrapAlMsgErrorTime.setStatus('current')
if mibBuilder.loadTexts: mbnscTrapAlMsgErrorTime.setDescription('Error time of the trap message. Seconds since 1.1.1970 00:00:00')
mbnscTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 2, 10))
mbnscTrapsAlarm = NotificationType((1, 3, 6, 1, 4, 1, 13842, 6, 2, 10, 1)).setObjects(("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorNr"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorState"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorTime"))
if mibBuilder.loadTexts: mbnscTrapsAlarm.setStatus('current')
if mibBuilder.loadTexts: mbnscTrapsAlarm.setDescription('Alarm trap with attached (binded variables) alarm information. mbnscTrapAlMsgErrorNr, mbnscTrapAlMsgErrorState, mbnscTrapAlMsgErrorTime')
mbnscTrapsAlive = NotificationType((1, 3, 6, 1, 4, 1, 13842, 6, 2, 10, 2)).setObjects(("MOBANetClocksV2-MIB", "mbnscGeneralStatus"), ("MOBANetClocksV2-MIB", "mbnscGeneralAlarms"))
if mibBuilder.loadTexts: mbnscTrapsAlive.setStatus('current')
if mibBuilder.loadTexts: mbnscTrapsAlive.setDescription('Device alive trap. Will be send in the configured interval. Attached are the variables mbnscGeneralStatus and mbnscGeneralAlarms.')
mbnscMIBCompliance = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 100, 1))
mbnscMIBCompliances = ModuleCompliance((1, 3, 6, 1, 4, 1, 13842, 6, 100, 1, 1)).setObjects(("MOBANetClocksV2-MIB", "mbnscGrpNBU190"), ("MOBANetClocksV2-MIB", "mbnscGrpSEN40"), ("MOBANetClocksV2-MIB", "mbnscGrpDC"), ("MOBANetClocksV2-MIB", "mbnscGrpECODC"), ("MOBANetClocksV2-MIB", "mbnscGrpDA"), ("MOBANetClocksV2-MIB", "mbnscGrpDK"), ("MOBANetClocksV2-MIB", "mbnscGrpNMI"), ("MOBANetClocksV2-MIB", "mbnscGrpNotUsedParameters"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mbnscMIBCompliances = mbnscMIBCompliances.setStatus('current')
if mibBuilder.loadTexts: mbnscMIBCompliances.setDescription('Please replace this text with your description.')
mbnscMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 13842, 6, 100, 2))
mbnscAllNotifications = NotificationGroup((1, 3, 6, 1, 4, 1, 13842, 6, 100, 2, 1)).setObjects(("MOBANetClocksV2-MIB", "mbnscTrapsAlarm"), ("MOBANetClocksV2-MIB", "mbnscTrapsAlive"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mbnscAllNotifications = mbnscAllNotifications.setStatus('current')
if mibBuilder.loadTexts: mbnscAllNotifications.setDescription('This automatically created notification group contains all notifications that do not belong to any other NOTIFICATION-GROUP')
mbnscGrpNBU190 = ObjectGroup((1, 3, 6, 1, 4, 1, 13842, 6, 100, 2, 2)).setObjects(("MOBANetClocksV2-MIB", "mbnscNetGenMAC"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPNameserver"), ("MOBANetClocksV2-MIB", "mbnscNetGenTZClientPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenSnmpMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMulticastMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenHostname"), ("MOBANetClocksV2-MIB", "mbnscNetGenCommMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMCastGrpIP"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Addr"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Mask"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4DHCPMode"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrLocal"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrAuto"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrDHCP"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrFix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Prefix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Config"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP1"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP2"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP3"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP4"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPcurrent"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPpollIntervall"), ("MOBANetClocksV2-MIB", "mbnscTimeDeviceTime"), ("MOBANetClocksV2-MIB", "mbnscTimeLocOffset"), ("MOBANetClocksV2-MIB", "mbnscTimeLastReception"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneVersion"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneNumber"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscModeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscModeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscGeneralType"), ("MOBANetClocksV2-MIB", "mbnscGeneralName"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareNumber"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareVer"), ("MOBANetClocksV2-MIB", "mbnscGeneralStatus"), ("MOBANetClocksV2-MIB", "mbnscGeneralAlarms"), ("MOBANetClocksV2-MIB", "mbnscGeneralSlaveInfo1"), ("MOBANetClocksV2-MIB", "mbnscGeneralSlaveInfo2"), ("MOBANetClocksV2-MIB", "mbnscGeneralSlaveInfo3"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager1"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager2"), ("MOBANetClocksV2-MIB", "mbnscSNMPTrapAliveMsgInterval"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscCommand12Pos"), ("MOBANetClocksV2-MIB", "mbnscCommandSWReset"), ("MOBANetClocksV2-MIB", "mbnscCommandConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorNr"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorState"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mbnscGrpNBU190 = mbnscGrpNBU190.setStatus('current')
if mibBuilder.loadTexts: mbnscGrpNBU190.setDescription('All parameters used for NBU 190 movement')
mbnscGrpSEN40 = ObjectGroup((1, 3, 6, 1, 4, 1, 13842, 6, 100, 2, 3)).setObjects(("MOBANetClocksV2-MIB", "mbnscNetGenMAC"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPNameserver"), ("MOBANetClocksV2-MIB", "mbnscNetGenTZClientPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenSnmpMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMulticastMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenHostname"), ("MOBANetClocksV2-MIB", "mbnscNetGenCommMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMCastGrpIP"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Addr"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Mask"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4DHCPMode"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrLocal"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrAuto"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrDHCP"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrFix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Prefix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Config"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP1"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP2"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP3"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP4"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPcurrent"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPpollIntervall"), ("MOBANetClocksV2-MIB", "mbnscTimeDeviceTime"), ("MOBANetClocksV2-MIB", "mbnscTimeLocOffset"), ("MOBANetClocksV2-MIB", "mbnscTimeLastReception"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneVersion"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneNumber"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscModeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscModeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscGeneralType"), ("MOBANetClocksV2-MIB", "mbnscGeneralName"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareNumber"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareVer"), ("MOBANetClocksV2-MIB", "mbnscGeneralStatus"), ("MOBANetClocksV2-MIB", "mbnscGeneralAlarms"), ("MOBANetClocksV2-MIB", "mbnscGeneralSlaveInfo1"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager1"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager2"), ("MOBANetClocksV2-MIB", "mbnscSNMPTrapAliveMsgInterval"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscCommand12Pos"), ("MOBANetClocksV2-MIB", "mbnscCommandSWReset"), ("MOBANetClocksV2-MIB", "mbnscCommandConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorNr"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorState"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mbnscGrpSEN40 = mbnscGrpSEN40.setStatus('current')
if mibBuilder.loadTexts: mbnscGrpSEN40.setDescription('All parameters used for SEN 40 movement')
mbnscGrpDC = ObjectGroup((1, 3, 6, 1, 4, 1, 13842, 6, 100, 2, 4)).setObjects(("MOBANetClocksV2-MIB", "mbnscNetGenMAC"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPNameserver"), ("MOBANetClocksV2-MIB", "mbnscNetGenTZClientPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenSnmpMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMulticastMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenHostname"), ("MOBANetClocksV2-MIB", "mbnscNetGenCommMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMCastGrpIP"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Addr"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Mask"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4DHCPMode"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrLocal"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrAuto"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrDHCP"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrFix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Prefix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Config"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP1"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP2"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP3"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP4"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPcurrent"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPpollIntervall"), ("MOBANetClocksV2-MIB", "mbnscTimeDeviceTime"), ("MOBANetClocksV2-MIB", "mbnscTimeLocOffset"), ("MOBANetClocksV2-MIB", "mbnscTimeLastReception"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneVersion"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneNumber"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayBrightness"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayFormat"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayAlternate"), ("MOBANetClocksV2-MIB", "mbnscModeIRlock"), ("MOBANetClocksV2-MIB", "mbnscModeTimeDispZeros"), ("MOBANetClocksV2-MIB", "mbnscModeDateDispZeros"), ("MOBANetClocksV2-MIB", "mbnscModeTempUnit"), ("MOBANetClocksV2-MIB", "mbnscModeClockOpMode"), ("MOBANetClocksV2-MIB", "mbnscModeDispDerating"), ("MOBANetClocksV2-MIB", "mbnscModeLightCorr"), ("MOBANetClocksV2-MIB", "mbnscModeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscModeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscSensorsTempActivation"), ("MOBANetClocksV2-MIB", "mbnscSensorsTemp1IPAddr"), ("MOBANetClocksV2-MIB", "mbnscSensorsTemp2IPAddr"), ("MOBANetClocksV2-MIB", "mbnscSensorsConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscSensorsConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscGeneralType"), ("MOBANetClocksV2-MIB", "mbnscGeneralName"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareNumber"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareVer"), ("MOBANetClocksV2-MIB", "mbnscGeneralStatus"), ("MOBANetClocksV2-MIB", "mbnscGeneralAlarms"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager1"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager2"), ("MOBANetClocksV2-MIB", "mbnscSNMPTrapAliveMsgInterval"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscCommandSWReset"), ("MOBANetClocksV2-MIB", "mbnscCommandConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorNr"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorState"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mbnscGrpDC = mbnscGrpDC.setStatus('current')
if mibBuilder.loadTexts: mbnscGrpDC.setDescription('All parameters used for DC digital clock')
mbnscGrpECODC = ObjectGroup((1, 3, 6, 1, 4, 1, 13842, 6, 100, 2, 5)).setObjects(("MOBANetClocksV2-MIB", "mbnscNetGenMAC"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPNameserver"), ("MOBANetClocksV2-MIB", "mbnscNetGenTZClientPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenSnmpMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMulticastMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenHostname"), ("MOBANetClocksV2-MIB", "mbnscNetGenCommMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMCastGrpIP"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Addr"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Mask"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4DHCPMode"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrLocal"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrAuto"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrDHCP"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrFix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Prefix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Config"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP1"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP2"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP3"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP4"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPcurrent"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPpollIntervall"), ("MOBANetClocksV2-MIB", "mbnscTimeDeviceTime"), ("MOBANetClocksV2-MIB", "mbnscTimeLocOffset"), ("MOBANetClocksV2-MIB", "mbnscTimeLastReception"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneVersion"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneNumber"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayBrightness"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayFormat"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayAlternate"), ("MOBANetClocksV2-MIB", "mbnscModeDispDerating"), ("MOBANetClocksV2-MIB", "mbnscModeLightCorr"), ("MOBANetClocksV2-MIB", "mbnscModeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscModeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscGeneralType"), ("MOBANetClocksV2-MIB", "mbnscGeneralName"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareNumber"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareVer"), ("MOBANetClocksV2-MIB", "mbnscGeneralStatus"), ("MOBANetClocksV2-MIB", "mbnscGeneralAlarms"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager1"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager2"), ("MOBANetClocksV2-MIB", "mbnscSNMPTrapAliveMsgInterval"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscCommandSWReset"), ("MOBANetClocksV2-MIB", "mbnscCommandConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorNr"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorState"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mbnscGrpECODC = mbnscGrpECODC.setStatus('current')
if mibBuilder.loadTexts: mbnscGrpECODC.setDescription('All parameters used for ECO DC digital clock')
mbnscGrpDA = ObjectGroup((1, 3, 6, 1, 4, 1, 13842, 6, 100, 2, 6)).setObjects(("MOBANetClocksV2-MIB", "mbnscNetGenMAC"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPNameserver"), ("MOBANetClocksV2-MIB", "mbnscNetGenTZClientPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenSnmpMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMulticastMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenHostname"), ("MOBANetClocksV2-MIB", "mbnscNetGenCommMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMCastGrpIP"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Addr"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Mask"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4DHCPMode"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrLocal"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrAuto"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrDHCP"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrFix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Prefix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Config"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP1"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP2"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP3"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP4"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPcurrent"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPpollIntervall"), ("MOBANetClocksV2-MIB", "mbnscTimeDeviceTime"), ("MOBANetClocksV2-MIB", "mbnscTimeLocOffset"), ("MOBANetClocksV2-MIB", "mbnscTimeLastReception"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneVersion"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneNumber"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayBrightness"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayFormat"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayAlternate"), ("MOBANetClocksV2-MIB", "mbnscModeIRlock"), ("MOBANetClocksV2-MIB", "mbnscModeTimeDispZeros"), ("MOBANetClocksV2-MIB", "mbnscModeDateDispZeros"), ("MOBANetClocksV2-MIB", "mbnscModeTempUnit"), ("MOBANetClocksV2-MIB", "mbnscModeClockOpMode"), ("MOBANetClocksV2-MIB", "mbnscModeDispDerating"), ("MOBANetClocksV2-MIB", "mbnscModeLightCorr"), ("MOBANetClocksV2-MIB", "mbnscModeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscModeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscSensorsTempActivation"), ("MOBANetClocksV2-MIB", "mbnscSensorsTemp1IPAddr"), ("MOBANetClocksV2-MIB", "mbnscSensorsTemp2IPAddr"), ("MOBANetClocksV2-MIB", "mbnscSensorsConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscSensorsConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscDASecondCircleDisplay"), ("MOBANetClocksV2-MIB", "mbnscDAConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscDAConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscGeneralType"), ("MOBANetClocksV2-MIB", "mbnscGeneralName"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareNumber"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareVer"), ("MOBANetClocksV2-MIB", "mbnscGeneralStatus"), ("MOBANetClocksV2-MIB", "mbnscGeneralAlarms"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager1"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager2"), ("MOBANetClocksV2-MIB", "mbnscSNMPTrapAliveMsgInterval"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscCommandSWReset"), ("MOBANetClocksV2-MIB", "mbnscCommandConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorNr"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorState"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mbnscGrpDA = mbnscGrpDA.setStatus('current')
if mibBuilder.loadTexts: mbnscGrpDA.setDescription('All parameters used for DA digital clock')
mbnscGrpDK = ObjectGroup((1, 3, 6, 1, 4, 1, 13842, 6, 100, 2, 7)).setObjects(("MOBANetClocksV2-MIB", "mbnscNetGenMAC"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPNameserver"), ("MOBANetClocksV2-MIB", "mbnscNetGenTZClientPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenSnmpMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMulticastMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenHostname"), ("MOBANetClocksV2-MIB", "mbnscNetGenCommMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMCastGrpIP"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Addr"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Mask"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4DHCPMode"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrLocal"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrAuto"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrDHCP"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrFix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Prefix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Config"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP1"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP2"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP3"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP4"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPcurrent"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPpollIntervall"), ("MOBANetClocksV2-MIB", "mbnscTimeDeviceTime"), ("MOBANetClocksV2-MIB", "mbnscTimeLocOffset"), ("MOBANetClocksV2-MIB", "mbnscTimeLastReception"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneVersion"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneNumber"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayBrightness"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayFormat"), ("MOBANetClocksV2-MIB", "mbnscModeDisplayAlternate"), ("MOBANetClocksV2-MIB", "mbnscModeIRlock"), ("MOBANetClocksV2-MIB", "mbnscModeTimeDispZeros"), ("MOBANetClocksV2-MIB", "mbnscModeDateDispZeros"), ("MOBANetClocksV2-MIB", "mbnscModeTempUnit"), ("MOBANetClocksV2-MIB", "mbnscModeClockOpMode"), ("MOBANetClocksV2-MIB", "mbnscModeDispDerating"), ("MOBANetClocksV2-MIB", "mbnscModeLightCorr"), ("MOBANetClocksV2-MIB", "mbnscModeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscModeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscSensorsTempActivation"), ("MOBANetClocksV2-MIB", "mbnscSensorsTemp1IPAddr"), ("MOBANetClocksV2-MIB", "mbnscSensorsTemp2IPAddr"), ("MOBANetClocksV2-MIB", "mbnscSensorsConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscSensorsConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscDKFirstLanguage"), ("MOBANetClocksV2-MIB", "mbnscDKSecondLanguage"), ("MOBANetClocksV2-MIB", "mbnscDKThirdLanguage"), ("MOBANetClocksV2-MIB", "mbnscDKTempUnitSecondLang"), ("MOBANetClocksV2-MIB", "mbnscDKTempUnitThirdLang"), ("MOBANetClocksV2-MIB", "mbnscDKAutoLangSwitchOver"), ("MOBANetClocksV2-MIB", "mbnscDKNumOfCharsForWeekday"), ("MOBANetClocksV2-MIB", "mbnscDKNamesFormatDisplay"), ("MOBANetClocksV2-MIB", "mbnscDKTemp1DescriptEnable"), ("MOBANetClocksV2-MIB", "mbnscDKTemp1Description"), ("MOBANetClocksV2-MIB", "mbnscDKTemp2DescriptEnable"), ("MOBANetClocksV2-MIB", "mbnscDKTemp2Description"), ("MOBANetClocksV2-MIB", "mbnscDKWorldTimeZone1"), ("MOBANetClocksV2-MIB", "mbnscDKWorldTimeZone1Description"), ("MOBANetClocksV2-MIB", "mbnscDKWorldTimeZone2"), ("MOBANetClocksV2-MIB", "mbnscDKWorldTimeZone2Description"), ("MOBANetClocksV2-MIB", "mbnscDKWorldTimeZone3"), ("MOBANetClocksV2-MIB", "mbnscDKWorldTimeZone3Description"), ("MOBANetClocksV2-MIB", "mbnscDKWorldTimeZone4"), ("MOBANetClocksV2-MIB", "mbnscDKWorldTimeZone4Description"), ("MOBANetClocksV2-MIB", "mbnscDKWorldTimeZone5"), ("MOBANetClocksV2-MIB", "mbnscDKWorldTimeZone5Description"), ("MOBANetClocksV2-MIB", "mbnscDKAutoTimeZoneSwitchOver"), ("MOBANetClocksV2-MIB", "mbnscDKConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscDKConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscGeneralType"), ("MOBANetClocksV2-MIB", "mbnscGeneralName"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareNumber"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareVer"), ("MOBANetClocksV2-MIB", "mbnscGeneralStatus"), ("MOBANetClocksV2-MIB", "mbnscGeneralAlarms"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager1"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager2"), ("MOBANetClocksV2-MIB", "mbnscSNMPTrapAliveMsgInterval"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscCommandSWReset"), ("MOBANetClocksV2-MIB", "mbnscCommandConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorNr"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorState"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mbnscGrpDK = mbnscGrpDK.setStatus('current')
if mibBuilder.loadTexts: mbnscGrpDK.setDescription('All parameters used for DK digital clock')
mbnscGrpNMI = ObjectGroup((1, 3, 6, 1, 4, 1, 13842, 6, 100, 2, 8)).setObjects(("MOBANetClocksV2-MIB", "mbnscNetGenMAC"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenIPNameserver"), ("MOBANetClocksV2-MIB", "mbnscNetGenTZClientPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigPort"), ("MOBANetClocksV2-MIB", "mbnscNetGenSnmpMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMulticastMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenHostname"), ("MOBANetClocksV2-MIB", "mbnscNetGenCommMode"), ("MOBANetClocksV2-MIB", "mbnscNetGenMCastGrpIP"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetGenConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Addr"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Mask"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4DHCPMode"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv4ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrLocal"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrAuto"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrDHCP"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6AddrFix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Prefix"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Gateway"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6Config"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNetIPv6ConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP1"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP2"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP3"), ("MOBANetClocksV2-MIB", "mbnscTimeNTP4"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPcurrent"), ("MOBANetClocksV2-MIB", "mbnscTimeNTPpollIntervall"), ("MOBANetClocksV2-MIB", "mbnscTimeDeviceTime"), ("MOBANetClocksV2-MIB", "mbnscTimeLocOffset"), ("MOBANetClocksV2-MIB", "mbnscTimeLastReception"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneVersion"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneNumber"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscModeConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscModeConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNMIDCFCurrentLoop"), ("MOBANetClocksV2-MIB", "mbnscNMILineDriver"), ("MOBANetClocksV2-MIB", "mbnscNMIMOBALineMode"), ("MOBANetClocksV2-MIB", "mbnscNMIMOBALineMinuteHandMode"), ("MOBANetClocksV2-MIB", "mbnscNMIActiveDCFMode"), ("MOBANetClocksV2-MIB", "mbnscNMIConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscNMIConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock1"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock2"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock3"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock4"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock5"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock6"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock7"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock8"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock9"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock10"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock11"), ("MOBANetClocksV2-MIB", "mbnscNMISideClock12"), ("MOBANetClocksV2-MIB", "mbnscGeneralType"), ("MOBANetClocksV2-MIB", "mbnscGeneralName"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareNumber"), ("MOBANetClocksV2-MIB", "mbnscGeneralFirmwareVer"), ("MOBANetClocksV2-MIB", "mbnscGeneralStatus"), ("MOBANetClocksV2-MIB", "mbnscGeneralAlarms"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscGeneralConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager1"), ("MOBANetClocksV2-MIB", "mbnscSNMPManager2"), ("MOBANetClocksV2-MIB", "mbnscSNMPTrapAliveMsgInterval"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscSNMPConfigChangedTime"), ("MOBANetClocksV2-MIB", "mbnscCommand12Pos"), ("MOBANetClocksV2-MIB", "mbnscCommandSWReset"), ("MOBANetClocksV2-MIB", "mbnscCommandConfigCmd"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorNr"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorState"), ("MOBANetClocksV2-MIB", "mbnscTrapAlMsgErrorTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mbnscGrpNMI = mbnscGrpNMI.setStatus('current')
if mibBuilder.loadTexts: mbnscGrpNMI.setDescription('All parameters used for Network-MOBALine-Interface NMI')
mbnscGrpNotUsedParameters = ObjectGroup((1, 3, 6, 1, 4, 1, 13842, 6, 100, 2, 100)).setObjects(("MOBANetClocksV2-MIB", "mbnscTimeZoneEntry1"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneEntry2"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneEntry3"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneEntry4"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneEntry5"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneEntry6"), ("MOBANetClocksV2-MIB", "mbnscTimeZoneEntry7"), ("MOBANetClocksV2-MIB", "mbnscModeSwitchInfo"), ("MOBANetClocksV2-MIB", "mbnscCommandFactorySetting"), ("MOBANetClocksV2-MIB", "mbnscCommandFirmwUpd"), ("MOBANetClocksV2-MIB", "mbnscCommandExtContact"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mbnscGrpNotUsedParameters = mbnscGrpNotUsedParameters.setStatus('current')
if mibBuilder.loadTexts: mbnscGrpNotUsedParameters.setDescription('All parameters not used in oother groups')
mibBuilder.exportSymbols("MOBANetClocksV2-MIB", mbnscTimeZoneEntry5=mbnscTimeZoneEntry5, mbnscDKTempUnitSecondLang=mbnscDKTempUnitSecondLang, mbnscTimeNTPpollIntervall=mbnscTimeNTPpollIntervall, mbnscSensors=mbnscSensors, mbnscTimeZoneEntry3=mbnscTimeZoneEntry3, mbnscTimeZoneConfigChangedTime=mbnscTimeZoneConfigChangedTime, mbnscModeTempUnit=mbnscModeTempUnit, mbnscTimeConfigChangedTime=mbnscTimeConfigChangedTime, mbnscDAConfigChangedTime=mbnscDAConfigChangedTime, mbnscNetGen=mbnscNetGen, mbnscNetIPv6ConfigCmd=mbnscNetIPv6ConfigCmd, mbnscNetGenMAC=mbnscNetGenMAC, mbnscNetIPv4Gateway=mbnscNetIPv4Gateway, mbnscCommandConfigCmd=mbnscCommandConfigCmd, mbnscTimeNTPcurrent=mbnscTimeNTPcurrent, mbnscTimeZoneEntry1=mbnscTimeZoneEntry1, mbnscModeNTP=mbnscModeNTP, mbnscDKAutoTimeZoneSwitchOver=mbnscDKAutoTimeZoneSwitchOver, mbnscNetIPv4DHCPMode=mbnscNetIPv4DHCPMode, mbnscNetIPv6ConfigChangedTime=mbnscNetIPv6ConfigChangedTime, mbnscDA=mbnscDA, mbnscGrpSEN40=mbnscGrpSEN40, mbnscTimeNTP3=mbnscTimeNTP3, mbnscNetGenTZClientPort=mbnscNetGenTZClientPort, mbnscNetGenMulticastMode=mbnscNetGenMulticastMode, mbnscSNMPConfigChangedTime=mbnscSNMPConfigChangedTime, mbnscTimeDeviceTime=mbnscTimeDeviceTime, mbnscNMISideClock10=mbnscNMISideClock10, mbnscDKConfigChangedTime=mbnscDKConfigChangedTime, mbnscNMISideClock4=mbnscNMISideClock4, mbnscGeneralFirmwareVer=mbnscGeneralFirmwareVer, mbnscCommand=mbnscCommand, mbnscTimeNTP2=mbnscTimeNTP2, mbnscTimeLocOffset=mbnscTimeLocOffset, mbnscDKWorldTimeZone1Description=mbnscDKWorldTimeZone1Description, mbnscNMISideClock3=mbnscNMISideClock3, mbnscGeneralConfigChangedTime=mbnscGeneralConfigChangedTime, mbnscNetIPv6AddrDHCP=mbnscNetIPv6AddrDHCP, mbnscSnmpConfig=mbnscSnmpConfig, mbnscGrpDC=mbnscGrpDC, mbnscNMIDCFCurrentLoop=mbnscNMIDCFCurrentLoop, mbnscDASecondCircleDisplay=mbnscDASecondCircleDisplay, mbnscDK=mbnscDK, mbnscNMIActiveDCFMode=mbnscNMIActiveDCFMode, mbnscCommand12Pos=mbnscCommand12Pos, mbnscNMIMOBALineMode=mbnscNMIMOBALineMode, mbnscNetIPv6AddrLocal=mbnscNetIPv6AddrLocal, mbnscAdditionalDigitalClockModes=mbnscAdditionalDigitalClockModes, mbnscDKWorldTimeZone2=mbnscDKWorldTimeZone2, mbnscTimeZone=mbnscTimeZone, mbnscSensorsConfigCmd=mbnscSensorsConfigCmd, mbnscModeDisplayAlternate=mbnscModeDisplayAlternate, mbnscNetGenIPMode=mbnscNetGenIPMode, mbnscTime=mbnscTime, mbnscMIBCompliances=mbnscMIBCompliances, mbnscDKTemp1Description=mbnscDKTemp1Description, mbnscSupervision=mbnscSupervision, mbnscNetIPv6AddrFix=mbnscNetIPv6AddrFix, mbnscMIBCompliance=mbnscMIBCompliance, mbnscGeneral=mbnscGeneral, mbnscGrpECODC=mbnscGrpECODC, mbnscModeDisplayFormat=mbnscModeDisplayFormat, mbnscModeIRlock=mbnscModeIRlock, mbnscGeneralStatus=mbnscGeneralStatus, mbnscNMISideClockState=mbnscNMISideClockState, mbnscDKWorldTimeZone2Description=mbnscDKWorldTimeZone2Description, mbnscModeTimeDispZeros=mbnscModeTimeDispZeros, MOBAAlarm64=MOBAAlarm64, mbnscAllNotifications=mbnscAllNotifications, mbnscCommandFirmwUpd=mbnscCommandFirmwUpd, mbnscMode=mbnscMode, mbnscDKWorldTimeZone5Description=mbnscDKWorldTimeZone5Description, mbnscDKWorldTimeZone1=mbnscDKWorldTimeZone1, mbnscNMILineDriver=mbnscNMILineDriver, mbnscTimeNTP4=mbnscTimeNTP4, mbnscNetGenHostname=mbnscNetGenHostname, PYSNMP_MODULE_ID=mbnscMIB, MOBAFlags64=MOBAFlags64, mbnscSensorsTemp2IPAddr=mbnscSensorsTemp2IPAddr, mbnscNMISideClock5=mbnscNMISideClock5, mbnscAdditionalInterfaceModes=mbnscAdditionalInterfaceModes, mbnscNetGenConfigPort=mbnscNetGenConfigPort, mbnscNetGenMCastGrpIP=mbnscNetGenMCastGrpIP, mbnscNetIPv6=mbnscNetIPv6, mbnscGeneralFirmwareNumber=mbnscGeneralFirmwareNumber, mbnscNetGenCommMode=mbnscNetGenCommMode, mbnscNetIPv6Prefix=mbnscNetIPv6Prefix, mbnscNMIMOBALineMinuteHandMode=mbnscNMIMOBALineMinuteHandMode, mbnscNMISideClock2=mbnscNMISideClock2, mbnscSNMPConfigCmd=mbnscSNMPConfigCmd, mbnscTimeNTP1=mbnscTimeNTP1, mbnscDKConfigCmd=mbnscDKConfigCmd, mbnscNMIConfigChangedTime=mbnscNMIConfigChangedTime, mbnscNetGenSnmpMode=mbnscNetGenSnmpMode, MOBANetworkName=MOBANetworkName, mbnscNetIPv6AddrAuto=mbnscNetIPv6AddrAuto, mbnscTrapAlMsgErrorState=mbnscTrapAlMsgErrorState, mbnscTimeZoneNumber=mbnscTimeZoneNumber, mbnscTimeZoneVersion=mbnscTimeZoneVersion, mbnscGrpNotUsedParameters=mbnscGrpNotUsedParameters, mbnscDKWorldTimeZone5=mbnscDKWorldTimeZone5, mbnscTrapAlMsgErrorTime=mbnscTrapAlMsgErrorTime, mbnscModeDisplayBrightness=mbnscModeDisplayBrightness, mbnscGeneralName=mbnscGeneralName, mbnscNMISideClock6=mbnscNMISideClock6, mbnscTrapsAlarm=mbnscTrapsAlarm, mbnscCommandFactorySetting=mbnscCommandFactorySetting, mbnscNetIPv4ConfigChangedTime=mbnscNetIPv4ConfigChangedTime, mbnscDKFirstLanguage=mbnscDKFirstLanguage, mbnscDKWorldTimeZone3=mbnscDKWorldTimeZone3, mbnscTrapAlMsgErrorNr=mbnscTrapAlMsgErrorNr, mbnscSensorsConfigChangedTime=mbnscSensorsConfigChangedTime, mbnscSensorsTempActivation=mbnscSensorsTempActivation, mbnscCommandSWReset=mbnscCommandSWReset, mbnscDKNamesFormatDisplay=mbnscDKNamesFormatDisplay, mbnscNetIPv4Addr=mbnscNetIPv4Addr, mobaNetClocks=mobaNetClocks, mbnscDKTempUnitThirdLang=mbnscDKTempUnitThirdLang, mbnscGeneralSlaveInfo1=mbnscGeneralSlaveInfo1, mbnscNMISideClock7=mbnscNMISideClock7, mbnscSNMPManager2=mbnscSNMPManager2, mbnscNMI=mbnscNMI, mobatime=mobatime, mbnscModeDateDispZeros=mbnscModeDateDispZeros, mbnscNMISideClock11=mbnscNMISideClock11, mbnscMIB=mbnscMIB, mbnscNetIPv4=mbnscNetIPv4, mbnscCommandExtContact=mbnscCommandExtContact, mbnscDKWorldTimeZone4=mbnscDKWorldTimeZone4, mbnscTimeZoneEntry6=mbnscTimeZoneEntry6, mbnscNetIPv6Config=mbnscNetIPv6Config, mbnscGeneralType=mbnscGeneralType, mbnscTimeConfigCmd=mbnscTimeConfigCmd, mbnscDKTemp2DescriptEnable=mbnscDKTemp2DescriptEnable, mbnscDKWorldTimeZone4Description=mbnscDKWorldTimeZone4Description, mbnscDAConfigCmd=mbnscDAConfigCmd, mbnscTimeZoneEntry2=mbnscTimeZoneEntry2, mbnscNMISideClock8=mbnscNMISideClock8, mbnscTrapsAlive=mbnscTrapsAlive, mbnscGeneralAlarms=mbnscGeneralAlarms, mbnscNetIPv4ConfigCmd=mbnscNetIPv4ConfigCmd, mbnscDKThirdLanguage=mbnscDKThirdLanguage, mbnscDKSecondLanguage=mbnscDKSecondLanguage, mbnscGeneralConfigCmd=mbnscGeneralConfigCmd, mbnscDKTemp2Description=mbnscDKTemp2Description, mbnscGeneralSlaveInfo2=mbnscGeneralSlaveInfo2, mbnscNetGenIPNameserver=mbnscNetGenIPNameserver, mbnscGrpDA=mbnscGrpDA, mbnscModeClockOpMode=mbnscModeClockOpMode, mbnscGrpNMI=mbnscGrpNMI, mbnscMIBGroups=mbnscMIBGroups, mbnscModeSwitchInfo=mbnscModeSwitchInfo, mbnscSNMPTrapAliveMsgInterval=mbnscSNMPTrapAliveMsgInterval, mbnscNetGenConfigCmd=mbnscNetGenConfigCmd, mbnscSNMPManager1=mbnscSNMPManager1, mbnscGeneralSlaveInfo3=mbnscGeneralSlaveInfo3, mbnscDKTemp1DescriptEnable=mbnscDKTemp1DescriptEnable, mbnscDKNumOfCharsForWeekday=mbnscDKNumOfCharsForWeekday, mbnscTimeZoneConfigCmd=mbnscTimeZoneConfigCmd, mbnscModeNWParam=mbnscModeNWParam, mbnscModeConfigChangedTime=mbnscModeConfigChangedTime, mbnscNet=mbnscNet, mbnscNMISideClock1=mbnscNMISideClock1, mbnscSnmpCurrentAlarmInfo=mbnscSnmpCurrentAlarmInfo, mbnscGrpNBU190=mbnscGrpNBU190, mbnscTimeLastReception=mbnscTimeLastReception, mbnscSensorsTemp1IPAddr=mbnscSensorsTemp1IPAddr, mbnscModeConfigCmd=mbnscModeConfigCmd, mbnscTraps=mbnscTraps, mbnscTimeZoneEntry7=mbnscTimeZoneEntry7, mobaNetClocksV2=mobaNetClocksV2, mbnscNetIPv4Mask=mbnscNetIPv4Mask, mbnscNetIPv6Gateway=mbnscNetIPv6Gateway, mbnscModeLightCorr=mbnscModeLightCorr, mbnscNMISideClock12=mbnscNMISideClock12, mbnscDKWorldTimeZone3Description=mbnscDKWorldTimeZone3Description, mbnscNMIConfigCmd=mbnscNMIConfigCmd, mbnscTimeZoneEntry4=mbnscTimeZoneEntry4, mbnscNMISideClock9=mbnscNMISideClock9, mbnscNetGenConfigChangedTime=mbnscNetGenConfigChangedTime, mbnscGrpDK=mbnscGrpDK, mbnscModeDispDerating=mbnscModeDispDerating, mbnscDKAutoLangSwitchOver=mbnscDKAutoLangSwitchOver)
|
py
|
1a577b698f79e00cd5dea39ea7d97774d3e8d7c9
|
import json
import web
import six
import re
import os
import urlparse
from werkzeug.exceptions import BadRequest, MethodNotAllowed
from urllib import unquote
from utils import props
from init_subclass_meta import InitSubclassMeta
from graphql import Source, execute, parse, validate
from graphql.error import format_error as format_graphql_error
from graphql.error import GraphQLError
from graphql.execution import ExecutionResult
from graphql.type.schema import GraphQLSchema
from graphql.utils.get_operation_ast import get_operation_ast
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DIR_PATH = os.path.join(BASE_DIR, 'templates')
def get_accepted_content_types():
def qualify(x):
parts = x.split(';', 1)
if len(parts) == 2:
match = re.match(r'(^|;)q=(0(\.\d{,3})?|1(\.0{,3})?)(;|$)',
parts[1])
if match:
return parts[0], float(match.group(2))
return parts[0], 1
raw_content_types = web.ctx.env.get('HTTP_ACCEPT', '*/*').split(',')
qualified_content_types = map(qualify, raw_content_types)
return list(x[0] for x in sorted(qualified_content_types,
key=lambda x: x[1], reverse=True))
class HttpError(Exception):
def __init__(self, response, message=None, *args, **kwargs):
self.response = response
self.message = message = message or response.description
super(HttpError, self).__init__(message, *args, **kwargs)
class GraphQLView:
__metaclass__ = InitSubclassMeta
schema = None
executor = None
root_value = None
context = None
pretty = False
graphiql = False
middleware = None
batch = False
graphiql_version = '0.11.11'
graphiql_temp_title = "GraphQL"
def __init__(self, *args, **kwargs):
if hasattr(self, 'GraphQLMeta'):
for key, value in props(self.GraphQLMeta).iteritems():
setattr(self, key, value)
assert not all((self.graphiql, self.batch)), 'Use either graphiql or batch processing'
assert isinstance(self.schema, GraphQLSchema), 'A Schema is required to be provided to GraphQLView.'
def get_root_value(self):
return self.root_value
def get_context(self):
if self.context is not None:
return self.context
return web.ctx
def get_middleware(self):
return self.middleware
def get_executor(self):
return self.executor
def render_graphiql(self, **kwargs):
for key, value in kwargs.iteritems():
kwargs[key] = json.dumps(kwargs.get(key, None))
render = web.template.render(DIR_PATH)
return render.graph(self.graphiql_version, **kwargs)
def dispatch(self):
try:
if web.ctx.method.lower() not in ('get', 'post'):
raise HttpError(MethodNotAllowed(['GET', 'POST'], 'GraphQL only supports GET and POST requests.'))
data = self.parse_body()
show_graphiql = self.graphiql and self.can_display_graphiql(data)
if self.batch: # False
responses = [self.get_response(entry) for entry in data]
result = '[{}]'.format(','.join([response[0] for response in responses]))
status_code = max(responses, key=lambda response: response[1])[1]
else:
result, status_code = self.get_response(data, show_graphiql)
if show_graphiql:
query, variables, operation_name, id = self.get_graphql_params(data)
return self.render_graphiql(
query=query,
variables=json.dumps(variables),
operation_name=operation_name,
result=result,
graphiql_temp_title=self.graphiql_temp_title
)
else:
web.header('Content-Type', 'application/json')
return result
except HttpError as e:
web.header('Content-Type', 'application/json')
return self.json_encode({'errors': [self.format_error(e)]})
def get_response(self, data, show_graphiql=False):
query, variables, operation_name, id = self.get_graphql_params(data)
execution_result = self.execute_graphql_request(
data,
query,
variables,
operation_name,
show_graphiql
)
status_code = 200
if execution_result:
response = {}
if execution_result.errors:
response['errors'] = [self.format_error(e) for e in execution_result.errors]
if execution_result.invalid:
status_code = 400
else:
status_code = 200
response['data'] = execution_result.data
if self.batch:
response = {
'id': id,
'payload': response,
'status': status_code,
}
result = self.json_encode(response, show_graphiql)
else:
result = None
return result, status_code
def execute(self, *args, **kwargs):
return execute(self.schema, *args, **kwargs)
def execute_graphql_request(self, data, query, variables, operation_name, show_graphiql=False):
if not query:
if show_graphiql:
return None
raise HttpError(BadRequest('Must provide query string.'))
try:
source = Source(query, name='GraphQL request')
ast = parse(source)
validation_errors = validate(self.schema, ast)
if validation_errors:
return ExecutionResult(
errors=validation_errors,
invalid=True,
)
except Exception as e:
return ExecutionResult(errors=[e], invalid=True)
if web.ctx.method.lower() == 'get':
operation_ast = get_operation_ast(ast, operation_name)
if operation_ast and operation_ast.operation != 'query':
if show_graphiql:
return None
raise HttpError(MethodNotAllowed(
['POST'], 'Can only perform a {} operation from a POST request.'.format(operation_ast.operation)
))
try:
return self.execute(
ast,
root_value=self.get_root_value(),
variable_values=variables or {},
operation_name=operation_name,
context_value=self.get_context(),
middleware=self.get_middleware(),
executor=self.get_executor()
)
except Exception as e:
return ExecutionResult(errors=[e], invalid=True)
def parse_body(self):
content_type = web.ctx.env.get('CONTENT_TYPE')
if content_type == 'application/graphql':
return dict(urlparse.parse_qsl(web.data()))
elif content_type == 'application/json':
try:
request_json = json.loads(web.data().decode('utf8'))
if self.batch:
assert isinstance(request_json, list)
else:
assert isinstance(request_json, dict)
return request_json
except:
raise HttpError(BadRequest('POST body sent invalid JSON.'))
elif content_type == 'application/x-www-form-urlencoded':
return dict(urlparse.parse_qsl(web.data()))
elif content_type == 'multipart/form-data':
return web.data()
return {}
def json_encode(self, d, show_graphiql=False):
pretty = self.pretty or show_graphiql or web.input().get('pretty')
if not pretty:
return json.dumps(d, separators=(',', ':'))
return json.dumps(d, sort_keys=True,
indent=2, separators=(',', ': '))
def get_graphql_params(self, data):
variables = query = id = operation_name = None
query = self.check_data_underfiend('query', data)
variables = self.check_data_underfiend('variables', data)
id = self.check_data_underfiend('id', data)
operation_name = self.check_data_underfiend('operationName', data)
if variables and isinstance(variables, six.text_type):
try:
variables = json.loads(variables)
except:
raise HttpError(BadRequest('Variables are invalid JSON.'))
return query, variables, operation_name, id
def GET(self):
return self.dispatch()
def POST(self):
return self.dispatch()
@staticmethod
def check_data_underfiend(param, data):
parameter = web.input().get(param, None) or data.get(param, None)
return parameter if parameter != "undefined" else None
@classmethod
def can_display_graphiql(cls, data):
raw = 'raw' in web.input() or 'raw' in web.data()
return not raw and cls.request_wants_html()
@classmethod
def request_wants_html(cls):
accepted = get_accepted_content_types()
html_index = accepted.count('text/html')
json_index = accepted.count('application/json')
return html_index > json_index
@staticmethod
def format_error(error):
if isinstance(error, GraphQLError):
return format_graphql_error(error)
return {'message': six.text_type(error)}
|
py
|
1a577be649ca4cd884d2b8fcf55a968bbac65e98
|
#!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads data into BigQuery from an object in Google Cloud Storage.
For more information, see the README.md under /bigquery.
Example invocation:
$ python load_data_from_gcs.py example_dataset example_table \
gs://example-bucket/example-data.csv
The dataset and table should already exist.
"""
import argparse
import time
import uuid
from gcloud import bigquery
def load_data_from_gcs(dataset_name, table_name, source):
bigquery_client = bigquery.Client()
dataset = bigquery_client.dataset(dataset_name)
table = dataset.table(table_name)
job_name = str(uuid.uuid4())
job = bigquery_client.load_table_from_storage(
job_name, table, source)
job.begin()
wait_for_job(job)
print('Loaded {} rows into {}:{}.'.format(
job.output_rows, dataset_name, table_name))
def wait_for_job(job):
while True:
job.reload()
if job.state == 'DONE':
if job.error_result:
raise RuntimeError(job.error_result)
return
time.sleep(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('dataset_name')
parser.add_argument('table_name')
parser.add_argument(
'source', help='The Google Cloud Storage object to load. Must be in '
'the format gs://bucket_name/object_name')
args = parser.parse_args()
load_data_from_gcs(
args.dataset_name,
args.table_name,
args.source)
|
py
|
1a577bffe54489acbe8c9ecd634e5ce6d7f49b13
|
from __future__ import absolute_import
import plotly.graph_objs as go
from collections import OrderedDict
from plotly.tests.utils import TestCaseNoTemplate
class FigureTest(TestCaseNoTemplate):
def test_to_ordered_dict(self):
fig = go.Figure(
layout={
"yaxis": {"range": [1, 2]},
"xaxis": {"range": [1, 2]},
"shapes": [
{"xsizemode": "pixel", "type": "circle"},
{"type": "line", "xsizemode": "pixel"},
],
},
data=[
{"type": "scatter", "marker": {"size": 12, "color": "green"}},
{"type": "bar", "y": [1, 2], "x": [1, 2]},
],
)
result = fig.to_ordered_dict()
expected = OrderedDict(
[
(
"data",
[
OrderedDict(
[
(
"marker",
OrderedDict([("color", "green"), ("size", 12)]),
),
("type", "scatter"),
]
),
OrderedDict([("type", "bar"), ("x", [1, 2]), ("y", [1, 2])]),
],
),
(
"layout",
OrderedDict(
[
(
"shapes",
[
OrderedDict(
[("type", "circle"), ("xsizemode", "pixel")]
),
OrderedDict(
[("type", "line"), ("xsizemode", "pixel")]
),
],
),
("xaxis", OrderedDict([("range", [1, 2])])),
("yaxis", OrderedDict([("range", [1, 2])])),
]
),
),
]
)
self.assertEqual(result, expected)
def test_to_ordered_with_frames(self):
frame = go.Frame(
layout={
"yaxis": {"range": [1, 2]},
"xaxis": {"range": [1, 2]},
"shapes": [
{"xsizemode": "pixel", "type": "circle"},
{"type": "line", "xsizemode": "pixel"},
],
},
data=[
{"type": "scatter", "marker": {"size": 12, "color": "green"}},
{"type": "bar", "y": [1, 2], "x": [1, 2]},
],
)
fig = go.Figure(frames=[{}, frame])
result = fig.to_ordered_dict()
expected_frame = OrderedDict(
[
(
"data",
[
OrderedDict(
[
(
"marker",
OrderedDict([("color", "green"), ("size", 12)]),
),
("type", "scatter"),
]
),
OrderedDict([("type", "bar"), ("x", [1, 2]), ("y", [1, 2])]),
],
),
(
"layout",
OrderedDict(
[
(
"shapes",
[
OrderedDict(
[("type", "circle"), ("xsizemode", "pixel")]
),
OrderedDict(
[("type", "line"), ("xsizemode", "pixel")]
),
],
),
("xaxis", OrderedDict([("range", [1, 2])])),
("yaxis", OrderedDict([("range", [1, 2])])),
]
),
),
]
)
expected = OrderedDict(
[
("data", []),
("layout", OrderedDict()),
("frames", [OrderedDict(), expected_frame]),
]
)
self.assertEqual(result, expected)
|
py
|
1a577d2aceccc7e24294cef207f931362cd19c7d
|
import pandas as pd
import numpy as np
import pickle
np.random.seed(1212)
import keras
from keras.models import Model
from keras.layers import *
from keras import optimizers
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_data_format('channels_last')
from keras.models import model_from_json
from keras.utils.np_utils import to_categorical
from preprocessing import convert_img_to_csv
def train_model():
if 1:
df_train=pd.read_csv('model/train_final.csv',index_col=False)
labels=df_train[['784']]
df_train.drop(df_train.columns[[784]],axis=1,inplace=True)
df_train.head()
labels=np.array(labels)
cat=to_categorical(labels,num_classes=24)
print(cat[0])
x = len(df_train.axes[0])
l=[]
for i in range(x):
l.append(np.array(df_train[i:i+1]).reshape(28,28,1))
np.random.seed(7)
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(28,28,1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(15, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(24, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(np.array(l), cat, epochs=30, shuffle=True)
model_json = model.to_json()
with open("model/model_final.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model/model_final.h5")
if __name__ == '__main__':
train_model()
|
py
|
1a577f7c11d4487a662485853cdf9f1d63114e9d
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "nonstringenumsclient"
VERSION = "0.1.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.6.18", "azure-core<2.0.0,>=1.8.0"]
setup(
name=NAME,
version=VERSION,
description="NonStringEnumsClient",
author_email="",
url="",
keywords=["Swagger", "NonStringEnumsClient"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Testing non-string enums.
"""
)
|
py
|
1a577fa634e00c47d1cef798c282a8047b087ce0
|
import os, glob
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanException
from conans.model.version import Version
class IlmBaseConan(ConanFile):
name = "ilmbase"
description = "IlmBase is a component of OpenEXR. OpenEXR is a high dynamic-range (HDR) image file format developed by Industrial Light & Magic for use in computer imaging applications."
version = "2.3.0"
license = "BSD"
url = "https://github.com/Mikayex/conan-ilmbase.git"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "namespace_versioning": [True, False], "fPIC": [True, False]}
default_options = "shared=False", "namespace_versioning=True", "fPIC=True"
generators = "cmake"
exports = "FindIlmBase.cmake"
def config_options(self):
if self.settings.os == "Windows":
self.options.remove("fPIC")
def configure(self):
if "fPIC" in self.options.fields and self.options.shared:
self.options.fPIC = True
if self.settings.compiler == 'gcc' and Version(str(self.settings.compiler.version)) < "5":
raise ConanException("gcc >= 5 is required (support for C++14)")
if self.settings.compiler == 'apple-clang' and self.settings.compiler.libcxx == 'libstdc++':
raise ConanException("Compile with stdlib=libc++ using settings.compiler.libcxx")
def source(self):
url = "https://github.com/openexr/openexr/releases/download/v{version}/ilmbase-{version}.tar.gz"
tools.get(url.format(version=self.version))
tools.replace_in_file(os.path.join('ilmbase-{}'.format(self.version), 'CMakeLists.txt'), 'PROJECT ( ilmbase )',
"""PROJECT ( ilmbase )
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
""")
def build(self):
yes_no = {True: "enable", False: "disable"}
args = ["--{}-shared".format(yes_no.get(bool(self.options.shared))),
"--{}-static".format(yes_no.get(not bool(self.options.shared))),
"--{}-namespaceversioning".format(yes_no.get(bool(self.options.namespace_versioning))),
]
autotools = AutoToolsBuildEnvironment(self)
autotools.configure(configure_dir='ilmbase-{}'.format(self.version), args=args)
autotools.make()
tools.replace_prefix_in_pc_file("IlmBase.pc", "${package_root_path_ilmbase}")
def package(self):
autotools = AutoToolsBuildEnvironment(self)
autotools.install()
self.copy("FindIlmBase.cmake", src=".", dst=".")
self.copy("license*", dst="licenses", src="ilmbase-%s" % self.version, ignore_case=True, keep_path=False)
for f in glob.glob(os.path.join(self.package_folder, 'lib', '*.la')):
os.remove(f)
def package_info(self):
self.cpp_info.includedirs = [os.path.join('include', 'OpenEXR'), ]
self.cpp_info.libs = ['Half', 'Iex', 'IexMath', 'IlmThread', 'Imath']
if self.options.shared and self.settings.os == "Windows":
self.cpp_info.defines.append("OPENEXR_DLL")
if not self.settings.os == "Windows":
self.cpp_info.cppflags = ["-pthread"]
|
py
|
1a577fb31f026856d299a346bc52451d3acfce4e
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Common base for crypto handlers
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
import json
import base64
import time
import resources.lib.common as common
from resources.lib.services.msl.msl_utils import MSL_DATA_FILENAME
from resources.lib.utils.esn import get_esn
from resources.lib.utils.logging import LOG
class MSLBaseCrypto:
"""
Common base class for MSL crypto operations.
Handles MasterToken and sequence number
"""
def __init__(self):
self._msl_data = None
self.mastertoken = None
self.serial_number = None
self.sequence_number = None
self.renewal_window = None
self.expiration = None
self.bound_esn = None # Specify the ESN bound to mastertoken
def load_msl_data(self, msl_data=None):
self._msl_data = msl_data if msl_data else {}
if msl_data:
self.set_mastertoken(msl_data['tokens']['mastertoken'])
self.bound_esn = msl_data.get('bound_esn', get_esn())
def compare_mastertoken(self, mastertoken):
"""Check if the new MasterToken is different from current due to renew"""
if not self._mastertoken_is_newer_that(mastertoken):
LOG.debug('MSL mastertoken is changed due to renew')
self.set_mastertoken(mastertoken)
self._save_msl_data()
def _mastertoken_is_newer_that(self, mastertoken):
"""Check if current MasterToken is newer than mastertoken specified"""
# Based on cadmium player sourcecode and ref. to [isNewerThan] in:
# https://github.com/Netflix/msl/blob/master/core/src/main/java/com/netflix/msl/tokens/MasterToken.java
new_tokendata = json.loads(
base64.standard_b64decode(mastertoken['tokendata'].encode('utf-8')).decode('utf-8'))
if new_tokendata['sequencenumber'] == self.sequence_number:
return new_tokendata['expiration'] > self.expiration
if new_tokendata['sequencenumber'] > self.sequence_number:
cut_off = new_tokendata['sequencenumber'] - pow(2, 53) + 127
return self.sequence_number >= cut_off
cut_off = self.sequence_number - pow(2, 53) + 127
return new_tokendata['sequencenumber'] < cut_off
def parse_key_response(self, headerdata, esn, save_to_disk):
"""Parse a key response and update crypto keys"""
self.set_mastertoken(headerdata['keyresponsedata']['mastertoken'])
self._init_keys(headerdata['keyresponsedata'])
self.bound_esn = esn
if save_to_disk:
self._save_msl_data()
def set_mastertoken(self, mastertoken):
"""Set the MasterToken and check it for validity"""
tokendata = json.loads(
base64.standard_b64decode(mastertoken['tokendata'].encode('utf-8')).decode('utf-8'))
self.mastertoken = mastertoken
self.serial_number = tokendata['serialnumber']
self.sequence_number = tokendata.get('sequencenumber', 0)
self.renewal_window = tokendata['renewalwindow']
self.expiration = tokendata['expiration']
def _save_msl_data(self):
"""Save crypto keys and MasterToken to disk"""
self._msl_data['tokens'] = {'mastertoken': self.mastertoken}
self._msl_data.update(self._export_keys())
self._msl_data['bound_esn'] = self.bound_esn
common.save_file_def(MSL_DATA_FILENAME, json.dumps(self._msl_data).encode('utf-8'))
LOG.debug('Successfully saved MSL data to disk')
def _init_keys(self, key_response_data):
"""Initialize crypto keys from key_response_data"""
raise NotImplementedError
def _export_keys(self):
"""Export crypto keys to a dict"""
raise NotImplementedError
def get_user_id_token(self, profile_guid):
"""Get a valid the user id token associated to a profile guid"""
if 'user_id_tokens' in self._msl_data:
user_id_token = self._msl_data['user_id_tokens'].get(profile_guid)
if user_id_token and not self.is_user_id_token_expired(user_id_token):
return user_id_token
return None
def save_user_id_token(self, profile_guid, user_token_id):
"""Save or update a user id token associated to a profile guid"""
if 'user_id_tokens' not in self._msl_data:
save_msl_data = True
self._msl_data['user_id_tokens'] = {
profile_guid: user_token_id
}
else:
save_msl_data = not self._msl_data['user_id_tokens'].get(profile_guid) == user_token_id
self._msl_data['user_id_tokens'][profile_guid] = user_token_id
if save_msl_data:
self._save_msl_data()
def clear_user_id_tokens(self):
"""Clear all user id tokens"""
self._msl_data.pop('user_id_tokens', None)
self._save_msl_data()
def is_user_id_token_expired(self, user_id_token):
"""Check if user id token is expired"""
token_data = json.loads(base64.standard_b64decode(user_id_token['tokendata']))
# Subtract 5min as a safety measure
return (token_data['expiration'] - 300) < time.time()
def is_current_mastertoken_expired(self):
"""Check if the current MasterToken is expired"""
return self.expiration <= time.time()
def get_current_mastertoken_validity(self):
"""Gets a dict values to know if current MasterToken is renewable and/or expired"""
time_now = time.time()
renewable = self.renewal_window < time_now
expired = self.expiration <= time_now
return {'is_renewable': renewable, 'is_expired': expired}
|
py
|
1a57802e69eae1a947573ca4049a0618d5ad0a71
|
#!/usr/bin/env python
import os
import sys
from DIRAC import S_OK, S_ERROR, gLogger, exit
from DIRAC.Core.Base import Script
Script.setUsageMessage('''Register SE files from a list of files to DFC. These list of files must be locally readable
{0} [option|cfgfile] DFCRoot LocalRoot Filelist SE
Example: {0} /juno/lustre/junofs/PmtCharacterization/scan_data/soft/root_macros /junofs/PmtCharacterization/scan_data/soft/root_macros filelist.txt IHEP-STORM'''.format(Script.scriptName))
Script.registerSwitch( 'e', 'existCheck', 'Check if file exists')
Script.registerSwitch( 'q:', 'querySkip=', 'Skip files in the meta query')
Script.registerSwitch( 'b:', 'bufferSize=', 'Register buffer size, default to 100')
Script.parseCommandLine(ignoreErrors = False)
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
fcc = FileCatalogClient('DataManagement/FileCatalog')
args = Script.getPositionalArgs()
if len(args) != 4:
Script.showHelp()
exit(1)
dfcRoot = args[0]
if (not args[1]) or args[1].endswith(os.sep):
localRoot = args[1]
else:
localRoot = args[1] + os.sep
fileList = args[2]
toSE = args[3]
lfnQuery = []
existCheck = False
bufferSize = 100
switches = Script.getUnprocessedSwitches()
for switch in switches:
if switch[0] == 'q' or switch[0] == 'querySkip':
result = fcc.findFilesByMetadata({'juno_transfer': switch[1]}, '/')
if result['OK']:
lfnQuery += result['Value']
if switch[0] == 'e' or switch[0] == 'existCheck':
existCheck = True
if switch[0] == 'b' or switch[0] == 'bufferSize':
bufferSize = int(switch[1])
lfnQuery = set(lfnQuery)
counter = 0
dm = DataManager()
fileTupleBuffer = []
with open(fileList) as file_obj:
for fullFn in file_obj:
counter += 1
print(fullFn)
fullFn=fullFn.strip('\n')
if not fullFn.startswith(localRoot):
gLogger.error('%s does not start with %s' % (fullFn, localDir))
continue
lastPart = fullFn[len(localRoot):]
#lastPart = os.path.basename(fullFn)
lfn = os.path.join(dfcRoot, lastPart)
print(lfn)
if lfn in lfnQuery:
if counter%1000 == 0:
gLogger.notice('Skip file in query counter: %s' % counter)
continue
if existCheck:
result = fcc.isFile(lfn)
if result['OK'] and lfn in result['Value']['Successful'] and result['Value']['Successful'][lfn]:
if counter%1000 == 0:
gLogger.notice('Skip file existed counter: %s' % counter)
continue
size = os.path.getsize(fullFn)
adler32 = fileAdler(fullFn)
guid = makeGuid()
fileTuple = ( lfn, fullFn, size, toSE, guid, adler32 )
#gLogger.notice('the parameter to registered %s %s %s %s %s %s' % (lfn,fullFn,size,toSE,guid,adler32))
fileTupleBuffer.append(fileTuple)
gLogger.debug('Register to lfn: %s' % lfn)
gLogger.debug('fileTuple: %s' % (fileTuple,))
if len(fileTupleBuffer) >= bufferSize:
result = dm.registerFile( fileTupleBuffer )
if not result['OK']:
gLogger.error('Can not register %s' % fullFn)
exit(1)
del fileTupleBuffer[:]
gLogger.notice('%s files registered' % counter)
if fileTupleBuffer:
result = dm.registerFile( fileTupleBuffer )
if not result['OK']:
gLogger.error('Can not register %s' % fullFn)
exit(1)
del fileTupleBuffer[:]
gLogger.notice('Total %s files registered' % counter)
|
py
|
1a57825fd484d6d1aa0cd4f4880de05275f9a9f2
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import uuid
import pytest
from nvflare.apis.controller_spec import TaskCompletionStatus
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from .controller_test import TestController, create_client, create_task, get_ready, launch_task
class TestBasic(TestController):
@pytest.mark.parametrize("task_name,client_name", [["__test_task", "__test_client"]])
def test_process_submission_invalid_task(self, task_name, client_name):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
with pytest.raises(RuntimeError, match=f"Unknown task: {task_name} from client {client_name}."):
controller.process_submission(
client=client, task_name=task_name, task_id=str(uuid.uuid4()), fl_ctx=FLContext(), result=Shareable()
)
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
@pytest.mark.parametrize("num_client_requests", [1, 2, 3, 4])
def test_process_task_request_client_request_multiple_times(self, method, num_client_requests):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
input_data = Shareable()
input_data["hello"] = "world"
task = create_task("__test_task", data=input_data)
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread)
for i in range(num_client_requests):
task_name_out, _, data = controller.process_task_request(client, fl_ctx)
assert task_name_out == "__test_task"
assert data == input_data
assert task.last_client_task_map["__test_client"].task_send_count == num_client_requests
controller.cancel_task(task)
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
def test_process_submission(self, method):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
task = create_task("__test_task")
kwargs = {"targets": [client]}
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": kwargs,
},
)
get_ready(launch_thread)
task_name_out, client_task_id, data = controller.process_task_request(client, fl_ctx)
# in here we make up client results:
result = Shareable()
result["result"] = "result"
controller.process_submission(
client=client, task_name="__test_task", task_id=client_task_id, fl_ctx=fl_ctx, result=result
)
assert task.last_client_task_map["__test_client"].result == result
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
@pytest.mark.parametrize("timeout", [1, 2])
def test_task_timeout(self, method, timeout):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
task = create_task(name="__test_task", data=Shareable(), timeout=timeout)
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread)
assert controller.get_num_standing_tasks() == 1
time.sleep(timeout + 1)
assert controller.get_num_standing_tasks() == 0
assert task.completion_status == TaskCompletionStatus.TIMEOUT
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
def test_cancel_task(self, method):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
task = create_task(name="__test_task")
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread)
assert controller.get_num_standing_tasks() == 1
controller.cancel_task(task=task)
controller._check_tasks()
assert controller.get_num_standing_tasks() == 0
assert task.completion_status == TaskCompletionStatus.CANCELLED
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
def test_cancel_all_tasks(self, method):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
task = create_task("__test_task")
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread)
task1 = create_task("__test_task1")
launch_thread1 = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task1,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread1)
assert controller.get_num_standing_tasks() == 2
controller.cancel_all_tasks()
controller._check_tasks()
assert controller.get_num_standing_tasks() == 0
assert task.completion_status == TaskCompletionStatus.CANCELLED
assert task1.completion_status == TaskCompletionStatus.CANCELLED
launch_thread.join()
self.stop_controller(controller, fl_ctx)
|
py
|
1a578280890f34186e9e04448d7cce88a0281070
|
from brownie import (
network,
accounts,
config,
interface,
Contract,
)
from brownie.network.state import Chain
from brownie import web3
from web3 import Web3
def get_account(index=None, id=None):
if index is not None:
return accounts[index]
if id:
return accounts.load(id)
return accounts.add(config["wallets"]["from_key"])
def get_web3():
return Web3(web3.provider)
def check_solution(setup_contract):
if setup_contract.isSolved():
print("Challenge solved!")
else:
print("Challenge not solved...")
|
py
|
1a57828c3cef041660d42f0fbb78e4be57f42f00
|
import _plotly_utils.basevalidators
class DxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name='dx', parent_name='bar', **kwargs):
super(DxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
|
py
|
1a57839450f69d1be30394eb1cb9126d6d9bb2ef
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
from paddle.vision.ops import DeformConv2D
from ...modules.init import kaiming_normal_, constant_, constant_init
from .builder import GENERATORS
@paddle.no_grad()
def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
"""Initialize network weights.
Args:
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(module_list, list):
module_list = [module_list]
for m in module_list:
if isinstance(m, nn.Conv2D):
kaiming_normal_(m.weight, **kwargs)
scale_weight = scale * m.weight
m.weight.set_value(scale_weight)
if m.bias is not None:
constant_(m.bias, bias_fill)
elif isinstance(m, nn.Linear):
kaiming_normal_(m.weight, **kwargs)
scale_weight = scale * m.weight
m.weight.set_value(scale_weight)
if m.bias is not None:
constant_(m.bias, bias_fill)
class ResidualBlockNoBN(nn.Layer):
"""Residual block without BN.
It has a style of:
---Conv-ReLU-Conv-+-
|________________|
Args:
nf (int): Channel number of intermediate features.
Default: 64.
"""
def __init__(self, nf=64):
super(ResidualBlockNoBN, self).__init__()
self.nf = nf
self.conv1 = nn.Conv2D(self.nf, self.nf, 3, 1, 1)
self.conv2 = nn.Conv2D(self.nf, self.nf, 3, 1, 1)
self.relu = nn.ReLU()
default_init_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
identity = x
out = self.conv2(self.relu(self.conv1(x)))
return identity + out
def MakeMultiBlocks(func, num_layers, nf=64):
"""Make layers by stacking the same blocks.
Args:
func (nn.Layer): nn.Layer class for basic block.
num_layers (int): number of blocks.
Returns:
nn.Sequential: Stacked blocks in nn.Sequential.
"""
Blocks = nn.Sequential()
for i in range(num_layers):
Blocks.add_sublayer('block%d' % i, func(nf))
return Blocks
class PredeblurResNetPyramid(nn.Layer):
"""Pre-dublur module.
Args:
in_nf (int): Channel number of input image. Default: 3.
nf (int): Channel number of intermediate features. Default: 64.
HR_in (bool): Whether the input has high resolution. Default: False.
"""
def __init__(self, in_nf=3, nf=64, HR_in=False):
super(PredeblurResNetPyramid, self).__init__()
self.in_nf = in_nf
self.nf = nf
self.HR_in = True if HR_in else False
self.Leaky_relu = nn.LeakyReLU(negative_slope=0.1)
if self.HR_in:
self.conv_first_1 = nn.Conv2D(in_channels=self.in_nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.conv_first_2 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1)
self.conv_first_3 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1)
else:
self.conv_first = nn.Conv2D(in_channels=self.in_nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.RB_L1_1 = ResidualBlockNoBN(nf=self.nf)
self.RB_L1_2 = ResidualBlockNoBN(nf=self.nf)
self.RB_L1_3 = ResidualBlockNoBN(nf=self.nf)
self.RB_L1_4 = ResidualBlockNoBN(nf=self.nf)
self.RB_L1_5 = ResidualBlockNoBN(nf=self.nf)
self.RB_L2_1 = ResidualBlockNoBN(nf=self.nf)
self.RB_L2_2 = ResidualBlockNoBN(nf=self.nf)
self.RB_L3_1 = ResidualBlockNoBN(nf=self.nf)
self.deblur_L2_conv = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1)
self.deblur_L3_conv = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1)
self.upsample = nn.Upsample(scale_factor=2,
mode="bilinear",
align_corners=False,
align_mode=0)
def forward(self, x):
if self.HR_in:
L1_fea = self.Leaky_relu(self.conv_first_1(x))
L1_fea = self.Leaky_relu(self.conv_first_2(L1_fea))
L1_fea = self.Leaky_relu(self.conv_first_3(L1_fea))
else:
L1_fea = self.Leaky_relu(self.conv_first(x))
L2_fea = self.deblur_L2_conv(L1_fea)
L2_fea = self.Leaky_relu(L2_fea)
L3_fea = self.deblur_L3_conv(L2_fea)
L3_fea = self.Leaky_relu(L3_fea)
L3_fea = self.RB_L3_1(L3_fea)
L3_fea = self.upsample(L3_fea)
L2_fea = self.RB_L2_1(L2_fea) + L3_fea
L2_fea = self.RB_L2_2(L2_fea)
L2_fea = self.upsample(L2_fea)
L1_fea = self.RB_L1_1(L1_fea)
L1_fea = self.RB_L1_2(L1_fea) + L2_fea
out = self.RB_L1_3(L1_fea)
out = self.RB_L1_4(out)
out = self.RB_L1_5(out)
return out
class TSAFusion(nn.Layer):
"""Temporal Spatial Attention (TSA) fusion module.
Temporal: Calculate the correlation between center frame and
neighboring frames;
Spatial: It has 3 pyramid levels, the attention is similar to SFT.
(SFT: Recovering realistic texture in image super-resolution by deep
spatial feature transform.)
Args:
nf (int): Channel number of middle features. Default: 64.
nframes (int): Number of frames. Default: 5.
center (int): The index of center frame. Default: 2.
"""
def __init__(self, nf=64, nframes=5, center=2):
super(TSAFusion, self).__init__()
self.nf = nf
self.nframes = nframes
self.center = center
self.sigmoid = nn.Sigmoid()
self.Leaky_relu = nn.LeakyReLU(negative_slope=0.1)
self.tAtt_2 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.tAtt_1 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.fea_fusion = nn.Conv2D(in_channels=self.nf * self.nframes,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.sAtt_1 = nn.Conv2D(in_channels=self.nf * self.nframes,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.max_pool = nn.MaxPool2D(3, stride=2, padding=1)
self.avg_pool = nn.AvgPool2D(3, stride=2, padding=1, exclusive=False)
self.sAtt_2 = nn.Conv2D(in_channels=2 * self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.sAtt_3 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.sAtt_4 = nn.Conv2D(
in_channels=self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0,
)
self.sAtt_5 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.sAtt_add_1 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.sAtt_add_2 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.sAtt_L1 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.sAtt_L2 = nn.Conv2D(
in_channels=2 * self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1,
)
self.sAtt_L3 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.upsample = nn.Upsample(scale_factor=2,
mode="bilinear",
align_corners=False,
align_mode=0)
def forward(self, aligned_fea):
"""
Args:
aligned_feat (Tensor): Aligned features with shape (b, n, c, h, w).
Returns:
Tensor: Features after TSA with the shape (b, c, h, w).
"""
B, N, C, H, W = aligned_fea.shape
x_center = aligned_fea[:, self.center, :, :, :]
emb_rf = self.tAtt_2(x_center)
emb = aligned_fea.reshape([-1, C, H, W])
emb = self.tAtt_1(emb)
emb = emb.reshape([-1, N, self.nf, H, W])
cor_l = []
for i in range(N):
emb_nbr = emb[:, i, :, :, :] #[B,C,W,H]
cor_tmp = paddle.sum(emb_nbr * emb_rf, axis=1)
cor_tmp = paddle.unsqueeze(cor_tmp, axis=1)
cor_l.append(cor_tmp)
cor_prob = paddle.concat(cor_l, axis=1) #[B,N,H,W]
cor_prob = self.sigmoid(cor_prob)
cor_prob = paddle.unsqueeze(cor_prob, axis=2) #[B,N,1,H,W]
cor_prob = paddle.expand(cor_prob, [B, N, self.nf, H, W]) #[B,N,C,H,W]
cor_prob = cor_prob.reshape([B, -1, H, W])
aligned_fea = aligned_fea.reshape([B, -1, H, W])
aligned_fea = aligned_fea * cor_prob
fea = self.fea_fusion(aligned_fea)
fea = self.Leaky_relu(fea)
#spatial fusion
att = self.sAtt_1(aligned_fea)
att = self.Leaky_relu(att)
att_max = self.max_pool(att)
att_avg = self.avg_pool(att)
att_pool = paddle.concat([att_max, att_avg], axis=1)
att = self.sAtt_2(att_pool)
att = self.Leaky_relu(att)
#pyramid
att_L = self.sAtt_L1(att)
att_L = self.Leaky_relu(att_L)
att_max = self.max_pool(att_L)
att_avg = self.avg_pool(att_L)
att_pool = paddle.concat([att_max, att_avg], axis=1)
att_L = self.sAtt_L2(att_pool)
att_L = self.Leaky_relu(att_L)
att_L = self.sAtt_L3(att_L)
att_L = self.Leaky_relu(att_L)
att_L = self.upsample(att_L)
att = self.sAtt_3(att)
att = self.Leaky_relu(att)
att = att + att_L
att = self.sAtt_4(att)
att = self.Leaky_relu(att)
att = self.upsample(att)
att = self.sAtt_5(att)
att_add = self.sAtt_add_1(att)
att_add = self.Leaky_relu(att_add)
att_add = self.sAtt_add_2(att_add)
att = self.sigmoid(att)
fea = fea * att * 2 + att_add
return fea
class DCNPack(nn.Layer):
"""Modulated deformable conv for deformable alignment.
Ref:
Delving Deep into Deformable Alignment in Video Super-Resolution.
"""
def __init__(self,
num_filters=64,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
deformable_groups=8,
extra_offset_mask=True):
super(DCNPack, self).__init__()
self.extra_offset_mask = extra_offset_mask
self.deformable_groups = deformable_groups
self.num_filters = num_filters
if isinstance(kernel_size, int):
self.kernel_size = [kernel_size, kernel_size]
self.conv_offset_mask = nn.Conv2D(in_channels=self.num_filters,
out_channels=self.deformable_groups *
3 * self.kernel_size[0] *
self.kernel_size[1],
kernel_size=self.kernel_size,
stride=stride,
padding=padding)
self.total_channels = self.deformable_groups * 3 * self.kernel_size[
0] * self.kernel_size[1]
self.split_channels = self.total_channels // 3
self.dcn = DeformConv2D(in_channels=self.num_filters,
out_channels=self.num_filters,
kernel_size=self.kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
deformable_groups=self.deformable_groups)
self.sigmoid = nn.Sigmoid()
# init conv offset
constant_init(self.conv_offset_mask, 0., 0.)
def forward(self, fea_and_offset):
out = None
x = None
if self.extra_offset_mask:
out = self.conv_offset_mask(fea_and_offset[1])
x = fea_and_offset[0]
o1 = out[:, 0:self.split_channels, :, :]
o2 = out[:, self.split_channels:2 * self.split_channels, :, :]
mask = out[:, 2 * self.split_channels:, :, :]
offset = paddle.concat([o1, o2], axis=1)
mask = self.sigmoid(mask)
y = self.dcn(x, offset, mask)
return y
class PCDAlign(nn.Layer):
"""Alignment module using Pyramid, Cascading and Deformable convolution
(PCD). It is used in EDVR.
Ref:
EDVR: Video Restoration with Enhanced Deformable Convolutional Networks
Args:
nf (int): Channel number of middle features. Default: 64.
groups (int): Deformable groups. Defaults: 8.
"""
def __init__(self, nf=64, groups=8):
super(PCDAlign, self).__init__()
self.nf = nf
self.groups = groups
self.Leaky_relu = nn.LeakyReLU(negative_slope=0.1)
self.upsample = nn.Upsample(scale_factor=2,
mode="bilinear",
align_corners=False,
align_mode=0)
# Pyramid has three levels:
# L3: level 3, 1/4 spatial size
# L2: level 2, 1/2 spatial size
# L1: level 1, original spatial size
# L3
self.PCD_Align_L3_offset_conv1 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L3_offset_conv2 = nn.Conv2D(in_channels=nf,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L3_dcn = DCNPack(num_filters=nf,
kernel_size=3,
stride=1,
padding=1,
deformable_groups=groups)
#L2
self.PCD_Align_L2_offset_conv1 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L2_offset_conv2 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L2_offset_conv3 = nn.Conv2D(in_channels=nf,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L2_dcn = DCNPack(num_filters=nf,
kernel_size=3,
stride=1,
padding=1,
deformable_groups=groups)
self.PCD_Align_L2_fea_conv = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
#L1
self.PCD_Align_L1_offset_conv1 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L1_offset_conv2 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L1_offset_conv3 = nn.Conv2D(in_channels=nf,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L1_dcn = DCNPack(num_filters=nf,
kernel_size=3,
stride=1,
padding=1,
deformable_groups=groups)
self.PCD_Align_L1_fea_conv = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
#cascade
self.PCD_Align_cas_offset_conv1 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_cas_offset_conv2 = nn.Conv2D(in_channels=nf,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_cascade_dcn = DCNPack(num_filters=nf,
kernel_size=3,
stride=1,
padding=1,
deformable_groups=groups)
def forward(self, nbr_fea_l, ref_fea_l):
"""Align neighboring frame features to the reference frame features.
Args:
nbr_fea_l (list[Tensor]): Neighboring feature list. It
contains three pyramid levels (L1, L2, L3),
each with shape (b, c, h, w).
ref_fea_l (list[Tensor]): Reference feature list. It
contains three pyramid levels (L1, L2, L3),
each with shape (b, c, h, w).
Returns:
Tensor: Aligned features.
"""
#L3
L3_offset = paddle.concat([nbr_fea_l[2], ref_fea_l[2]], axis=1)
L3_offset = self.PCD_Align_L3_offset_conv1(L3_offset)
L3_offset = self.Leaky_relu(L3_offset)
L3_offset = self.PCD_Align_L3_offset_conv2(L3_offset)
L3_offset = self.Leaky_relu(L3_offset)
L3_fea = self.PCD_Align_L3_dcn([nbr_fea_l[2], L3_offset])
L3_fea = self.Leaky_relu(L3_fea)
#L2
L2_offset = paddle.concat([nbr_fea_l[1], ref_fea_l[1]], axis=1)
L2_offset = self.PCD_Align_L2_offset_conv1(L2_offset)
L2_offset = self.Leaky_relu(L2_offset)
L3_offset = self.upsample(L3_offset)
L2_offset = paddle.concat([L2_offset, L3_offset * 2], axis=1)
L2_offset = self.PCD_Align_L2_offset_conv2(L2_offset)
L2_offset = self.Leaky_relu(L2_offset)
L2_offset = self.PCD_Align_L2_offset_conv3(L2_offset)
L2_offset = self.Leaky_relu(L2_offset)
L2_fea = self.PCD_Align_L2_dcn([nbr_fea_l[1], L2_offset])
L3_fea = self.upsample(L3_fea)
L2_fea = paddle.concat([L2_fea, L3_fea], axis=1)
L2_fea = self.PCD_Align_L2_fea_conv(L2_fea)
L2_fea = self.Leaky_relu(L2_fea)
#L1
L1_offset = paddle.concat([nbr_fea_l[0], ref_fea_l[0]], axis=1)
L1_offset = self.PCD_Align_L1_offset_conv1(L1_offset)
L1_offset = self.Leaky_relu(L1_offset)
L2_offset = self.upsample(L2_offset)
L1_offset = paddle.concat([L1_offset, L2_offset * 2], axis=1)
L1_offset = self.PCD_Align_L1_offset_conv2(L1_offset)
L1_offset = self.Leaky_relu(L1_offset)
L1_offset = self.PCD_Align_L1_offset_conv3(L1_offset)
L1_offset = self.Leaky_relu(L1_offset)
L1_fea = self.PCD_Align_L1_dcn([nbr_fea_l[0], L1_offset])
L2_fea = self.upsample(L2_fea)
L1_fea = paddle.concat([L1_fea, L2_fea], axis=1)
L1_fea = self.PCD_Align_L1_fea_conv(L1_fea)
#cascade
offset = paddle.concat([L1_fea, ref_fea_l[0]], axis=1)
offset = self.PCD_Align_cas_offset_conv1(offset)
offset = self.Leaky_relu(offset)
offset = self.PCD_Align_cas_offset_conv2(offset)
offset = self.Leaky_relu(offset)
L1_fea = self.PCD_Align_cascade_dcn([L1_fea, offset])
L1_fea = self.Leaky_relu(L1_fea)
return L1_fea
@GENERATORS.register()
class EDVRNet(nn.Layer):
"""EDVR network structure for video super-resolution.
Now only support X4 upsampling factor.
Paper:
EDVR: Video Restoration with Enhanced Deformable Convolutional Networks
Args:
in_nf (int): Channel number of input image. Default: 3.
out_nf (int): Channel number of output image. Default: 3.
scale_factor (int): Scale factor from input image to output image. Default: 4.
nf (int): Channel number of intermediate features. Default: 64.
nframes (int): Number of input frames. Default: 5.
groups (int): Deformable groups. Defaults: 8.
front_RBs (int): Number of blocks for feature extraction. Default: 5.
back_RBs (int): Number of blocks for reconstruction. Default: 10.
center (int): The index of center frame. Frame counting from 0. Default: None.
predeblur (bool): Whether has predeblur module. Default: False.
HR_in (bool): Whether the input has high resolution. Default: False.
with_tsa (bool): Whether has TSA module. Default: True.
TSA_only (bool): Whether only use TSA module. Default: False.
"""
def __init__(self,
in_nf=3,
out_nf=3,
scale_factor=4,
nf=64,
nframes=5,
groups=8,
front_RBs=5,
back_RBs=10,
center=None,
predeblur=False,
HR_in=False,
w_TSA=True):
super(EDVRNet, self).__init__()
self.in_nf = in_nf
self.out_nf = out_nf
self.scale_factor = scale_factor
self.nf = nf
self.nframes = nframes
self.groups = groups
self.front_RBs = front_RBs
self.back_RBs = back_RBs
self.center = nframes // 2 if center is None else center
self.predeblur = True if predeblur else False
self.HR_in = True if HR_in else False
self.w_TSA = True if w_TSA else False
self.Leaky_relu = nn.LeakyReLU(negative_slope=0.1)
if self.predeblur:
self.pre_deblur = PredeblurResNetPyramid(in_nf=self.in_nf,
nf=self.nf,
HR_in=self.HR_in)
self.cov_1 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1)
else:
self.conv_first = nn.Conv2D(in_channels=self.in_nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
#feature extraction module
self.feature_extractor = MakeMultiBlocks(ResidualBlockNoBN,
self.front_RBs, self.nf)
self.fea_L2_conv1 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1)
self.fea_L2_conv2 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.fea_L3_conv1 = nn.Conv2D(
in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1,
)
self.fea_L3_conv2 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
#PCD alignment module
self.PCDModule = PCDAlign(nf=self.nf, groups=self.groups)
#TSA Fusion module
if self.w_TSA:
self.TSAModule = TSAFusion(nf=self.nf,
nframes=self.nframes,
center=self.center)
else:
self.TSAModule = nn.Conv2D(in_channels=self.nframes * self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1)
#reconstruction module
self.reconstructor = MakeMultiBlocks(ResidualBlockNoBN, self.back_RBs,
self.nf)
self.upconv1 = nn.Conv2D(in_channels=self.nf,
out_channels=4 * self.nf,
kernel_size=3,
stride=1,
padding=1)
self.pixel_shuffle = nn.PixelShuffle(2)
self.upconv2 = nn.Conv2D(in_channels=self.nf,
out_channels=4 * 64,
kernel_size=3,
stride=1,
padding=1)
self.HRconv = nn.Conv2D(in_channels=64,
out_channels=64,
kernel_size=3,
stride=1,
padding=1)
self.conv_last = nn.Conv2D(in_channels=64,
out_channels=self.out_nf,
kernel_size=3,
stride=1,
padding=1)
if self.scale_factor == 4:
self.upsample = nn.Upsample(scale_factor=self.scale_factor,
mode="bilinear",
align_corners=False,
align_mode=0)
def forward(self, x):
"""
Args:
x (Tensor): Input features with shape (b, n, c, h, w).
Returns:
Tensor: Features after EDVR with the shape (b, c, scale_factor*h, scale_factor*w).
"""
B, N, C, H, W = x.shape
x_center = x[:, self.center, :, :, :]
L1_fea = x.reshape([-1, C, H, W]) #[B*N,C,W,H]
if self.predeblur:
L1_fea = self.pre_deblur(L1_fea)
L1_fea = self.cov_1(L1_fea)
if self.HR_in:
H, W = H // 4, W // 4
else:
L1_fea = self.conv_first(L1_fea)
L1_fea = self.Leaky_relu(L1_fea)
# feature extraction and create Pyramid
L1_fea = self.feature_extractor(L1_fea)
# L2
L2_fea = self.fea_L2_conv1(L1_fea)
L2_fea = self.Leaky_relu(L2_fea)
L2_fea = self.fea_L2_conv2(L2_fea)
L2_fea = self.Leaky_relu(L2_fea)
# L3
L3_fea = self.fea_L3_conv1(L2_fea)
L3_fea = self.Leaky_relu(L3_fea)
L3_fea = self.fea_L3_conv2(L3_fea)
L3_fea = self.Leaky_relu(L3_fea)
L1_fea = L1_fea.reshape([-1, N, self.nf, H, W])
L2_fea = L2_fea.reshape([-1, N, self.nf, H // 2, W // 2])
L3_fea = L3_fea.reshape([-1, N, self.nf, H // 4, W // 4])
# pcd align
ref_fea_l = [
L1_fea[:, self.center, :, :, :], L2_fea[:, self.center, :, :, :],
L3_fea[:, self.center, :, :, :]
]
aligned_fea = []
for i in range(N):
nbr_fea_l = [
L1_fea[:, i, :, :, :], L2_fea[:, i, :, :, :], L3_fea[:,
i, :, :, :]
]
aligned_fea.append(self.PCDModule(nbr_fea_l, ref_fea_l))
# TSA Fusion
aligned_fea = paddle.stack(aligned_fea, axis=1) # [B, N, C, H, W]
fea = None
if not self.w_TSA:
aligned_fea = aligned_fea.reshape([B, -1, H, W])
fea = self.TSAModule(aligned_fea) # [B, N, C, H, W]
#Reconstruct
out = self.reconstructor(fea)
out = self.upconv1(out)
out = self.pixel_shuffle(out)
out = self.Leaky_relu(out)
out = self.upconv2(out)
out = self.pixel_shuffle(out)
out = self.Leaky_relu(out)
out = self.HRconv(out)
out = self.Leaky_relu(out)
out = self.conv_last(out)
if self.HR_in:
base = x_center
else:
base = self.upsample(x_center)
out += base
return out
|
py
|
1a57852dd9ee261a7e919e3fedca5e620e1561d6
|
# Generated by Django 3.0.2 on 2020-06-29 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20200608_2137'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='commission',
field=models.PositiveIntegerField(default=1),
),
]
|
py
|
1a578651bf0ce11bb82acbb826454b34a1c13b22
|
import pytest
import json
import tempfile
import pyethereum.trie as trie
import logging
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
logger = logging.getLogger()
def check_testdata(data_keys, expected_keys):
assert set(data_keys) == set(expected_keys), \
"test data changed, please adjust tests"
def load_tests():
try:
fixture = json.load(open('fixtures/trietestnextprev.json', 'r'))
except IOError:
raise IOError("Could not read trietests.json from fixtures",
"Make sure you did 'git submodule init'")
return fixture
def run_test(name):
logger.debug('testing %s', name)
t = trie.Trie(tempfile.mktemp())
data = load_tests()[name]
for k in data['in']:
logger.debug('updating with (%s, %s)', k, k)
t.update(k, k)
for point, prev, nxt in data['tests']:
assert nxt == (t.next(point) or '')
assert prev == (t.prev(point) or '')
def test_basic():
run_test('basic')
|
py
|
1a5786b5291ad9af6167987cde4630c36b5340bc
|
from numpy import zeros, shape
## Normal
def c2(J,M):
if abs(M)>J:
return 0
else:
return 1/3 + 2/3*((J*(J+1)-3*M**2))/((2*J+3)*(2*J-1))
def cp2(J,M):
if abs(M)>J:
return 0
else:
return ((2.0*J+1)*(2*J+5)*(J+1-M))**.5*((2+J-M)*(J+1+M)*(J+2+M))**.5/((2*J+1)*(2*J+3)*(2*J+5))
def cm2(J,M):
if abs(M)>J:
return 0
elif M==J:
return 0
else:
return ((2.0*J-3)*(2*J+1)*(J-1-M))**.5*((J-M)*(J-1+M)*(J+M))**.5/((2*J-3)*(2*J-1)*(2*J+1))
|
py
|
1a578828675ba0f7c24993d975176e9835ea96a0
|
from keras import backend as K
def matthews_correlation(y_true, y_pred):
'''Calculates the Matthews correlation coefficient measure for quality
of binary classification problems.
'''
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
def precision(y_true, y_pred):
'''Calculates the precision, a metric for multi-label classification of
how many selected items are relevant.
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
'''Calculates the recall, a metric for multi-label classification of
how many relevant items are selected.
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fbeta_score(y_true, y_pred, beta=1):
'''Calculates the F score, the weighted harmonic mean of precision and recall.
This is useful for multi-label classification, where input samples can be
classified as sets of labels. By only using accuracy (precision) a model
would achieve a perfect score by simply assigning every class to every
input. In order to avoid this, a metric should penalize incorrect class
assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
computes this, as a weighted mean of the proportion of correct class
assignments vs. the proportion of incorrect class assignments.
With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
correct classes becomes more important, and with beta > 1 the metric is
instead weighted towards penalizing incorrect class assignments.
'''
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
def fmeasure_acc(y_true, y_pred):
'''Calculates the f-measure, the harmonic mean of precision and recall.
'''
return fbeta_score(y_true, y_pred, beta=1)
|
py
|
1a57893a6ca6d508d029237d6e881c35429a01d9
|
from __future__ import division, print_function, absolute_import
import functools
import numpy as np
import math
import sys
import types
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
# Make See Also linking for our local copy work properly
def _copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)"""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
trapz = _copy_func(trapz)
if sys.flags.optimize <= 1:
trapz.__doc__ = trapz.__doc__.replace('sum, cumsum', 'numpy.cumsum')
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
(0.1110884353741496, None)
>>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
(0.11111111111111102, None)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
(0.9999999771971152, None)
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
(1.000000000039565, None)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.quadrature(f, 0.0, 1.0)
(0.11111111111111106, 4.163336342344337e-17)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.quadrature(np.cos, 0.0, np.pi/2)
(0.9999999999999536, 3.9611425250996035e-11)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, insert this value at the beginning of the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simps(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simps(y, x)
1642.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simps(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(10, 14.25, 0.25)
>>> y = np.arange(3, 12)
>>> integrate.romb(y)
56.0
>>> y = np.sin(np.power(x, 2.5))
>>> integrate.romb(y)
-0.742561336672229
>>> integrate.romb(y, show=True)
Richardson Extrapolation Table for Romberg Integration
====================================================================
-0.81576
4.63862 6.45674
-1.10581 -3.02062 -3.65245
-2.57379 -3.06311 -3.06595 -3.05664
-1.34093 -0.92997 -0.78776 -0.75160 -0.74256
====================================================================
-0.742561336672229
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="\n")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <[email protected]>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <[email protected]>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <[email protected]>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
r"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+ B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
where :math:`\xi \in [x_0,x_N]`
and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Examples
--------
Compute the integral of sin(x) in [0, :math:`\pi`]:
>>> from scipy.integrate import newton_cotes
>>> def f(x):
... return np.sin(x)
>>> a = 0
>>> b = np.pi
>>> exact = 2
>>> for N in [2, 4, 6, 8, 10]:
... x = np.linspace(a, b, N + 1)
... an, B = newton_cotes(N, 1)
... dx = (b - a) / N
... quad = dx * np.sum(an * f(x))
... error = abs(quad - exact)
... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
...
2 2.094395102 9.43951e-02
4 1.998570732 1.42927e-03
6 2.000017814 1.78136e-05
8 1.999999835 1.64725e-07
10 2.000000001 1.14677e-09
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except Exception:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
|
py
|
1a57895784c9fcc416e4f363e9fa41d50ca1edc1
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("moonbridgevj7kcklujarx.onion:37511", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "moonbridgevj7kcklujarx.onion")
assert_equal(cmd.port, 37511)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
|
py
|
1a578ba24743c44e45159d9d66ad409030348d70
|
'''
NERYS
a universal product monitor
Current Module: Other Sites
Usage:
NERYS will monitor specified sites for keywords and sends a Discord alert
when a page has a specified keyword. This can be used to monitor any site
on a product release date to automatically detect when a product has been
uploaded. Useful when monitoring hundreds of sites for shops in different
timezones.
Complete:
- find all products on Shopify site by keyword
- send discord notifications
- monitor for new products
- optimization for Shopify to return product checkout links by size
- find all products on other sites by keyword
- attempt to get product page links for universal sites
Left To Do:
- monitor for Shopify restocks
- monitor for restocks on other sites
-- find sold out by keyword
-- find sizes by keyword
-- find countdown timer by keyword
- detect cloudflare
- get product picture for other sites
- optimization for footsites
Credits:
Niveen Jegatheeswaran - Main Dev - https://github.com/snivyn/
kyb3r - Discord Embed - https://github.com/kyb3r/
'''
import requests
from bs4 import BeautifulSoup as soup
import requests
from log import log as log
import time
from datetime import datetime
import random
import sqlite3
from bs4 import BeautifulSoup as soup
from discord_hooks import Webhook
from threading import Thread
class Product():
def __init__(self, title, link, stock, keyword):
'''
(str, str, bool, str) -> None
Creates an instance of the Product class.
'''
# Setup product attributes
self.title = title
self.stock = stock
self.link = link
self.keyword = keyword
def read_from_txt(path):
'''
(None) -> list of str
Loads up all sites from the sitelist.txt file in the root directory.
Returns the sites as a list
'''
# Initialize variables
raw_lines = []
lines = []
# Load data from the txt file
try:
f = open(path, "r")
raw_lines = f.readlines()
f.close()
# Raise an error if the file couldn't be found
except:
log('e', "Couldn't locate <" + path + ">.")
raise FileNotFound()
if(len(raw_lines) == 0):
raise NoDataLoaded()
# Parse the data
for line in raw_lines:
lines.append(line.strip("\n"))
# Return the data
return lines
def add_to_db(product):
'''
(Product) -> bool
Given a product <product>, the product is added to a database <products.db>
and whether or not a Discord alert should be sent out is returned. Discord
alerts are sent out based on whether or not a new product matching
keywords is found.
'''
# Initialize variables
title = product.title
stock = str(product.stock)
link = product.link
keyword = product.keyword
alert = False
# Create database
conn = sqlite3.connect('products.db')
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS products(title TEXT, link TEXT UNIQUE, stock TEXT, keywords TEXT)""")
# Add product to database if it's unique
try:
c.execute("""INSERT INTO products (title, link, stock, keywords) VALUES (?, ?, ?, ?)""", (title, link, stock, keyword))
log('s', "Found new product with keyword " + keyword + ". Link = " + link)
alert = True
except:
# Product already exists
pass
#log('i', "Product at URL <" + link + "> already exists in the database.")
# Close connection to the database
conn.commit()
c.close()
conn.close()
# Return whether or not it's a new product
return alert
def send_embed(product):
'''
(Product) -> None
Sends a discord alert based on info provided.
'''
url = 'https://discord.com/api/webhooks/728820147346997278/ocPnHwKHaeCLeq1N1UJ7nAmO1qvat3sxr2G5xv72TubAGZWmhajDzknK9CfR6ZpvxA2i'
embed = Webhook(url, color=123123)
embed.set_author(name='NERYS', icon='https://static.zerochan.net/Daenerys.Targaryen.full.2190849.jpg')
embed.set_desc("Found product based on keyword " + product.keyword)
embed.add_field(name="Link", value=product.link)
embed.set_footer(text='NERYS by @snivynGOD', icon='https://static.zerochan.net/Daenerys.Targaryen.full.2190849.jpg', ts=True)
embed.post()
def monitor(link, keywords):
'''
(str, list of str) -> None
Given a URL <link> and keywords <keywords>, the URL is scanned and alerts
are sent via Discord when a new product containing a keyword is detected.
'''
log('i', "Checking site <" + link + ">...")
# Parse the site from the link
pos_https = link.find("https://")
pos_http = link.find("http://")
if(pos_https == 0):
site = link[8:]
end = site.find("/")
if(end != -1):
site = site[:end]
site = "https://" + site
else:
site = link[7:]
end = site.find("/")
if(end != -1):
site = site[:end]
site = "http://" + site
# Get all the links on the "New Arrivals" page
try:
r = requests.get(link, timeout=5, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed. Retrying...")
time.sleep(5)
try:
r = requests.get(link, timeout=8, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed.")
return
page = soup(r.text, "html.parser")
raw_links = page.findAll("a")
hrefs = []
for raw_link in raw_links:
try:
hrefs.append(raw_link["href"])
except:
pass
# Check for links matching keywords
for href in hrefs:
found = False
for keyword in keywords:
if(keyword.upper() in href.upper()):
found = True
if("http" in href):
product_page = href
else:
product_page = site + href
product = Product("N/A", product_page, True, keyword)
alert = add_to_db(product)
if(alert):
send_embed(product)
if(__name__ == "__main__"):
# Ignore insecure messages
requests.packages.urllib3.disable_warnings()
# Keywords (seperated by -)
keywords = [
"jordan",
"dunk",
"pharrell",
"free-throw-line",
"kendrick",
"tinker",
"game-royal",
"yeezy",
"human-race",
"big-bang",
"dont-trip",
"kung-fu-kenny",
"playstation",
"valentine",
"ovo-air-jordan",
"ovo-jordan",
"air-jordan-1",
"wotherspoon"
]
# Load sites from file
sites = read_from_txt("other-sites.txt")
# Start monitoring sites
while(True):
threads = []
for site in sites:
t = Thread(target=monitor, args=(site, keywords))
threads.append(t)
t.start()
time.sleep(2) # 2 second delay before going to the next site
|
py
|
1a578c33022e3fe7633240058b4ae6313aede159
|
import tldextract
from analyzer import Pattern
from analyzer import PatternRecognizer
# pylint: disable=line-too-long
REGEX = r'\b(((([a-zA-Z0-9])|([a-zA-Z0-9][a-zA-Z0-9\-]{0,86}[a-zA-Z0-9]))\.(([a-zA-Z0-9])|([a-zA-Z0-9][a-zA-Z0-9\-]{0,73}[a-zA-Z0-9]))\.(([a-zA-Z0-9]{2,12}\.[a-zA-Z0-9]{2,12})|([a-zA-Z0-9]{2,25})))|((([a-zA-Z0-9])|([a-zA-Z0-9][a-zA-Z0-9\-]{0,162}[a-zA-Z0-9]))\.(([a-zA-Z0-9]{2,12}\.[a-zA-Z0-9]{2,12})|([a-zA-Z0-9]{2,25}))))\b' # noqa: E501' # noqa: E501
CONTEXT = ["domain", "ip"]
class DomainRecognizer(PatternRecognizer):
"""
Recognizes domain names using regex
"""
def __init__(self):
patterns = [Pattern('Domain ()', REGEX, 0.5)]
super().__init__(supported_entity="DOMAIN_NAME", patterns=patterns,
context=CONTEXT)
def validate_result(self, pattern_text):
result = tldextract.extract(pattern_text)
return result.fqdn != ''
|
py
|
1a578d99d5512757e0f16e3a3761cc038adfecf3
|
import os
import sys
sys.path.append(".")
from Utils.HTMLTestRunner import *
from Testcases.test_login import Login
from Testcases.test_02 import Test02
# get the directory path to output report file
dir = os.getcwd()
# get all tests from Login class
login1 = unittest.TestLoader().loadTestsFromTestCase(Login)
test02 = unittest.TestLoader().loadTestsFromTestCase(Test02)
# create a test suite
test_suite = unittest.TestSuite([login1, test02])
# open the report file
outfile = open(dir + '\\Reports\\SeleniumPythonTestSummary.html', 'w', encoding='utf-8')
print(dir + '\\SeleniumPythonTestSummary.html')
# configure HTMLTestRunner options
runner = HTMLTestRunner(stream=outfile, title='Test Report', description='Acceptance Tests')
# run the suite using HTMLTestRunner
runner.run(test_suite)
outfile.close()
|
py
|
1a578fcd25a825db4ce942f25a206fb42640a572
|
import argparse, os
#######################################
def basic_training_parameters(parser):
parser.add_argument('--dataset', default='cub200', type=str, help='Dataset to use.')
parser.add_argument('--train_val_split', default=1, type=float, help='Percentage with which the training dataset is split into training/validation.')
### General Training Parameters
parser.add_argument('--lr', default=0.00001, type=float, help='Learning Rate for network parameters.')
parser.add_argument('--fc_lr', default=-1, type=float, help='Learning Rate for network parameters.')
parser.add_argument('--n_epochs', default=150, type=int, help='Number of training epochs.')
parser.add_argument('--kernels', default=8, type=int, help='Number of workers for pytorch dataloader.')
parser.add_argument('--bs', default=112 , type=int, help='Mini-Batchsize to use.')
parser.add_argument('--seed', default=1, type=int, help='Random seed for reproducibility.')
parser.add_argument('--scheduler', default='step', type=str, help='Type of learning rate scheduling. Currently: step & exp.')
parser.add_argument('--gamma', default=0.3, type=float, help='Learning rate reduction after tau epochs.')
parser.add_argument('--decay', default=0.0004, type=float, help='Weight decay for optimizer.')
parser.add_argument('--tau', default=[10000], nargs='+',type=int,help='Stepsize before reducing learning rate.')
parser.add_argument('--use_sgd', action='store_true', help='Appendix to save folder name if any special information is to be included.')
##### Loss-specific Settings
parser.add_argument('--loss', default='margin', type=str, help='Choose between TripletLoss, ProxyNCA, ...')
parser.add_argument('--batch_mining', default='distance', type=str, help='For triplet-based losses: Modes of Sampling: random, semihard, distance, adaptive interval.')
parser.add_argument('--extension', default='none', type=str, help='Extension Method to standard metric learning losses')
#####
parser.add_argument('--embed_dim', default=128, type=int, help='Embedding dimensionality of the network. Note: dim=128 or 64 is used in most papers.')
parser.add_argument('--arch', default='resnet50_frozen_normalize', type=str, help='Underlying network architecture. Frozen denotes that \
exisiting pretrained batchnorm layers are frozen, and normalize denotes normalization of the output embedding.')
parser.add_argument('--ensemble_num', default=4, type=int)
parser.add_argument('--compos_num', default=4, type=int)
parser.add_argument('--not_pretrained', action='store_true')
#####
parser.add_argument('--evaluation_metrics', nargs='+', default=['e_recall@1', 'e_recall@2', 'e_recall@4', 'nmi', 'f1', 'mAP_c'], type=str, help='Metrics to evaluate performance by.')
parser.add_argument('--evaltypes', nargs='+', default=['discriminative'], type=str, help='The network may produce multiple embeddings (ModuleDict). If the key is listed here, the entry will be evaluated on the evaluation metrics.\
Note: One may use Combined_embed1_embed2_..._embedn-w1-w1-...-wn to compute evaluation metrics on weighted (normalized) combinations.')
parser.add_argument('--storage_metrics', nargs='+', default=['e_recall@1'], type=str, help='Improvement in these metrics on the testset trigger checkpointing.')
parser.add_argument('--realistic_augmentation', action='store_true')
parser.add_argument('--realistic_main_augmentation', action='store_true')
##### Setup Parameters
parser.add_argument('--gpu', default=[1], nargs='+', type=int, help='Random seed for reproducibility.')
parser.add_argument('--savename', default='group_plus_seed', type=str, help='Appendix to save folder name if any special information is to be included.')
parser.add_argument('--source_path', default=os.getcwd()+'/Datasets', type=str, help='Path to training data.')
parser.add_argument('--save_path', default=os.getcwd()+'/Training_Results', type=str, help='Where to save everything.')
return parser
#######################################
def diva_parameters(parser):
##### Multifeature Parameters
parser.add_argument('--diva_ssl', default='fast_moco', type=str, help='Random seed for reproducibility.')
parser.add_argument('--diva_sharing', default='random', type=str, help='Random seed for reproducibility.')
parser.add_argument('--diva_intra', default='random', type=str, help='Random seed for reproducibility.')
parser.add_argument('--diva_features', default=['discriminative'], nargs='+', type=str, help='Random seed for reproducibility.')
parser.add_argument('--diva_decorrelations', default=[], nargs='+', type=str)
parser.add_argument('--diva_rho_decorrelation', default=[1500], nargs='+', type=float, help='Weights for adversarial Separation of embeddings.')
### Adversarial Separation Loss
parser.add_argument('--diva_decorrnet_dim', default=512, type=int, help='')
parser.add_argument('--diva_decorrnet_lr', default=0.00001, type=float, help='')
### Invariant Spread Loss
parser.add_argument('--diva_instdiscr_temperature', default=0.1, type=float, help='')
### Deep Clustering
parser.add_argument('--diva_dc_update_f', default=2, type=int, help='')
parser.add_argument('--diva_dc_ncluster', default=300, type=int, help='')
### (Fast) Momentum Contrast Loss
parser.add_argument('--diva_moco_momentum', default=0.9, type=float, help='')
parser.add_argument('--diva_moco_temperature', default=0.1, type=float, help='')
parser.add_argument('--diva_moco_n_key_batches', default=50, type=int, help='')
parser.add_argument('--diva_moco_lower_cutoff', default=0.5, type=float, help='')
parser.add_argument('--diva_moco_upper_cutoff', default=1.4, type=float, help='')
parser.add_argument('--diva_moco_temp_lr', default=0.0005, type=float, help='')
parser.add_argument('--diva_moco_trainable_temp', action='store_true', help='')
### Weights for each feature space training objective
parser.add_argument('--diva_alpha_ssl', default=0.3, type=float, help='')
parser.add_argument('--diva_alpha_shared', default=0.3, type=float, help='')
parser.add_argument('--diva_alpha_intra', default=0.3, type=float, help='')
return parser
#######################################
def wandb_parameters(parser):
### Wandb Log Arguments
parser.add_argument('--log_online', action='store_true')
parser.add_argument('--wandb_key', default='<your_api_key_here>', type=str, help='Options are currently: wandb & comet')
parser.add_argument('--project', default='DiVA_Sample_Runs', type=str, help='Appendix to save folder name if any special information is to be included.')
parser.add_argument('--group', default='Sample_Run', type=str, help='Appendix to save folder name if any special information is to be included.')
return parser
#######################################
def loss_specific_parameters(parser):
### Contrastive Loss
parser.add_argument('--loss_contrastive_pos_margin', default=0, type=float, help='positive and negative margins for contrastive pairs.')
parser.add_argument('--loss_contrastive_neg_margin', default=1, type=float, help='positive and negative margins for contrastive pairs.')
# parser.add_argument('--loss_contrastive_neg_margin', default=0.2, type=float, help='positive and negative margins for contrastive pairs.')
### Triplet-based Losses
parser.add_argument('--loss_triplet_margin', default=0.2, type=float, help='Margin for Triplet Loss')
### MarginLoss
parser.add_argument('--loss_margin_margin', default=0.2, type=float, help='Learning Rate for class margin parameters in MarginLoss')
parser.add_argument('--loss_margin_beta_lr', default=0.0005, type=float, help='Learning Rate for class margin parameters in MarginLoss')
parser.add_argument('--loss_margin_beta', default=1.2, type=float, help='Initial Class Margin Parameter in Margin Loss')
parser.add_argument('--loss_margin_nu', default=0, type=float, help='Regularisation value on betas in Margin Loss.')
parser.add_argument('--loss_margin_beta_constant', action='store_true')
### ProxyNCA
parser.add_argument('--loss_proxynca_lr', default=0.0005, type=float, help='Learning Rate for Proxies in ProxyNCALoss.')
#NOTE: The number of proxies is determined by the number of data classes.
### NPair L2 Penalty
parser.add_argument('--loss_npair_l2', default=0.005, type=float, help='L2 weight in NPair. Note: Set to 0.02 in paper, but multiplied with 0.25 in the implementation as well.')
### Angular Loss
parser.add_argument('--loss_angular_alpha', default=36, type=float, help='Angular margin in degrees.')
parser.add_argument('--loss_angular_npair_ang_weight', default=2, type=float, help='relative weighting between angular and npair contribution.')
parser.add_argument('--loss_angular_npair_l2', default=0.005, type=float, help='relative weighting between angular and npair contribution.')
### Multisimilary Loss
parser.add_argument('--loss_multisimilarity_pos_weight', default=2, type=float, help='Weighting on positive similarities.')
parser.add_argument('--loss_multisimilarity_neg_weight', default=40, type=float, help='Weighting on negative similarities.')
parser.add_argument('--loss_multisimilarity_margin', default=0.1, type=float, help='Distance margin for both positive and negative similarities.')
parser.add_argument('--loss_multisimilarity_thresh', default=0.5, type=float, help='Exponential thresholding.')
### Lifted Structure Loss
parser.add_argument('--loss_lifted_neg_margin', default=1, type=float, help='')
parser.add_argument('--loss_lifted_l2', default=0.005, type=float, help='')
### Binomial Deviance Loss
parser.add_argument('--loss_binomial_pos_weight', default=2, type=float, help='Weighting on positive similarities.')
parser.add_argument('--loss_binomial_neg_weight', default=40, type=float, help='Weighting on negative similarities.')
parser.add_argument('--loss_binomial_margin', default=0.1, type=float, help='Distance margin for both positive and negative similarities.')
parser.add_argument('--loss_binomial_thresh', default=0.5, type=float, help='Exponential thresholding.')
### Quadruplet Loss
parser.add_argument('--loss_quadruplet_alpha1', default=1, type=float, help='')
parser.add_argument('--loss_quadruplet_alpha2', default=0.5, type=float, help='')
### Soft-Triplet Loss
parser.add_argument('--loss_softtriplet_n_centroids', default=10, type=int, help='')
parser.add_argument('--loss_softtriplet_margin_delta', default=0.01, type=float, help='')
parser.add_argument('--loss_softtriplet_gamma', default=0.1, type=float, help='')
parser.add_argument('--loss_softtriplet_lambda', default=20, type=float, help='')
parser.add_argument('--loss_softtriplet_reg_weight', default=0.2, type=float, help='')
parser.add_argument('--loss_softtriplet_lr', default=0.0005, type=float, help='')
### Normalized Softmax Loss
parser.add_argument('--loss_softmax_lr', default=0.00001, type=float, help='')
parser.add_argument('--loss_softmax_temperature', default=0.05, type=float, help='')
### Histogram Loss
parser.add_argument('--loss_histogram_nbins', default=51, type=int, help='')
### SNR Triplet (with learnable margin) Loss
parser.add_argument('--loss_snr_margin', default=0.2, type=float, help='')
parser.add_argument('--loss_snr_reg_lambda', default=0.005, type=float, help='')
parser.add_argument('--loss_snr_beta', default=0, type=float, help='Example values: 0.2')
parser.add_argument('--loss_snr_beta_lr', default=0.0005,type=float, help='Example values: 0.2')
### Normalized Softmax Loss
parser.add_argument('--loss_arcface_lr', default=0.0005, type=float, help='')
parser.add_argument('--loss_arcface_angular_margin', default=0.5, type=float, help='')
parser.add_argument('--loss_arcface_feature_scale', default=64, type=float, help='')
### Quadruplet Loss
parser.add_argument('--loss_quadruplet_margin_alpha_1', default=0.2, type=float, help='')
parser.add_argument('--loss_quadruplet_margin_alpha_2', default=0.2, type=float, help='')
return parser
#######################################
def batchmining_specific_parameters(parser):
### Distance-based_Sampling
parser.add_argument('--miner_distance_lower_cutoff', default=0.5, type=float)
parser.add_argument('--miner_distance_upper_cutoff', default=1.4, type=float)
return parser
#######################################
def batch_creation_parameters(parser):
parser.add_argument('--data_sampler', default='class_random', type=str, help='How the batch is created.')
parser.add_argument('--samples_per_class', default=2, type=int, help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_batchmatch_bigbs', default=512, type=int, help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_batchmatch_ncomps', default=10, type=int, help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_storage_no_update', action='store_true', help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_d2_coreset_lambda', default=1, type=float, help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_gc_coreset_lim', default=1e-9, type=float, help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_sampler_lowproj_dim', default=-1, type=int, help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_sim_measure', default='euclidean', type=str, help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_gc_softened', action='store_true', help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_idx_full_prec', action='store_true', help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_mb_mom', default=-1, type=float, help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
parser.add_argument('--data_mb_lr', default=1, type=float, help='Number of samples in one class drawn before choosing the next class. Set to >1 for losses other than ProxyNCA.')
return parser
|
py
|
1a57903883ed8f6f74cb65d1b6b27b53eb46829e
|
# positioner_window.py, window to control a positioning instrument
# Reinier Heeres, <[email protected]>, 2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gtk
import gobject
import logging
import qtclient as qt
from gettext import gettext as _L
from lib.gui.qtwindow import QTWindow
from lib.gui.qttable import QTTable
from lib.gui.dropdowns import InstrumentDropdown
from lib.gui.misc import pack_hbox, pack_vbox
from lib.misc import sign
class PositionControls(gtk.Frame):
__gsignals__ = {
'direction-clicked': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'direction-released': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'max-speed-changed': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'min-speed-changed': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'accel-changed': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'decel-changed': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'stop-request': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE, [])
}
def __init__(self, ins):
gtk.Frame.__init__(self)
self._config = qt.config
self.set_label(_L('Controls'))
self._table = gtk.Table(4, 9)
self._button_up = gtk.Button('/\\')
self._button_up.connect('pressed',
lambda x: self._direction_clicked(True, 0, 1, 0))
self._button_up.connect('released',
lambda x: self._direction_clicked(False, 0, 1, 0))
self._table.attach(self._button_up, 1, 2, 0, 1,
gtk.EXPAND | gtk.FILL, 0)
self._button_down = gtk.Button('\\/')
self._button_down.connect('pressed',
lambda x: self._direction_clicked(True, 0, -1, 0))
self._button_down.connect('released',
lambda x: self._direction_clicked(False, 0, -1, 0))
self._table.attach(self._button_down, 1, 2, 2, 3,
gtk.EXPAND | gtk.FILL, 0)
self._button_left = gtk.Button('<')
self._button_left.connect('pressed',
lambda x: self._direction_clicked(True, -1, 0, 0))
self._button_left.connect('released',
lambda x: self._direction_clicked(False, -1, 0, 0))
self._table.attach(self._button_left, 0, 1, 1, 2,
gtk.EXPAND | gtk.FILL, 0)
self._button_right = gtk.Button('>')
self._button_right.connect('pressed',
lambda x: self._direction_clicked(True, 1, 0, 0))
self._button_right.connect('released',
lambda x: self._direction_clicked(False, 1, 0, 0))
self._table.attach(self._button_right, 2, 3, 1, 2,
gtk.EXPAND | gtk.FILL, 0)
self._button_upleft = gtk.Button('\\')
self._button_upleft.connect('pressed',
lambda x: self._direction_clicked(True, -1, 1, 0))
self._button_upleft.connect('released',
lambda x: self._direction_clicked(False, -1, 1, 0))
self._table.attach(self._button_upleft, 0, 1, 0, 1,
gtk.EXPAND | gtk.FILL, 0)
self._button_upright = gtk.Button('/')
self._button_upright.connect('pressed',
lambda x: self._direction_clicked(True, 1, 1, 0))
self._button_upright.connect('released',
lambda x: self._direction_clicked(False, 1, 1, 0))
self._table.attach(self._button_upright, 2, 3, 0, 1,
gtk.EXPAND | gtk.FILL, 0)
self._button_downleft = gtk.Button('/')
self._button_downleft.connect('pressed',
lambda x: self._direction_clicked(True, -1, -1, 0))
self._button_downleft.connect('released',
lambda x: self._direction_clicked(False, -1, -1, 0))
self._table.attach(self._button_downleft, 0, 1, 2, 3,
gtk.EXPAND | gtk.FILL, 0)
self._button_downright = gtk.Button('\\')
self._button_downright.connect('pressed',
lambda x: self._direction_clicked(True, 1, -1, 0))
self._button_downright.connect('released',
lambda x: self._direction_clicked(False, 1, -1, 0))
self._table.attach(self._button_downright, 2, 3, 2, 3,
gtk.EXPAND | gtk.FILL, 0)
self._button_z_up = gtk.Button('/\\')
self._button_z_up.connect('pressed',
lambda x: self._direction_clicked(True, 0, 0, 1))
self._button_z_up.connect('released',
lambda x: self._direction_clicked(False, 0, 0, 1))
self._table.attach(self._button_z_up, 4, 5, 0, 1,
gtk.EXPAND | gtk.FILL, 0)
self._button_z_down = gtk.Button('\/')
self._button_z_down.connect('pressed',
lambda x: self._direction_clicked(True, 0, 0, -1))
self._button_z_down.connect('released',
lambda x: self._direction_clicked(False, 0, 0, -1))
self._table.attach(self._button_z_down, 4, 5, 2, 3,
gtk.EXPAND | gtk.FILL, 0)
self._max_speed = gtk.VScale()
self._max_speed.set_size_request(100, 90)
self._max_speed.set_range(1, 500)
self._max_speed.set_inverted(True)
self._max_speed.connect('value-changed', self._max_speed_changed_cb)
self._max_speed.set_digits(1)
self._table.attach(gtk.Label(_L('Max speed')), 5, 6, 0, 1, 0, 0)
self._table.attach(self._max_speed, 5, 6, 1, 3, 0, 0)
self._min_speed = gtk.VScale()
self._min_speed.set_size_request(100, 90)
self._min_speed.set_range(1, 500)
self._min_speed.set_inverted(True)
self._min_speed.connect('value-changed', self._min_speed_changed_cb)
self._min_speed.set_digits(1)
self._table.attach(gtk.Label(_L('Min speed')), 6, 7, 0, 1, 0, 0)
self._table.attach(self._min_speed, 6, 7, 1, 3, 0, 0)
self._accel = gtk.VScale()
self._accel.set_size_request(100, 90)
self._accel.set_range(1.1, 4.0)
self._accel.set_inverted(True)
self._accel.connect('value-changed', self._accel_changed_cb)
self._accel.set_digits(2)
self._table.attach(gtk.Label(_L('Acceleration')), 7, 8, 0, 1, 0, 0)
self._table.attach(self._accel, 7, 8, 1, 3, 0, 0)
self._decel = gtk.VScale()
self._decel.set_size_request(100, 90)
self._decel.set_range(1.1, 4.0)
self._decel.set_inverted(True)
self._decel.connect('value-changed', self._decel_changed_cb)
self._decel.set_digits(2)
self._table.attach(gtk.Label(_L('Deceleration')), 8, 9, 0, 1, 0, 0)
self._table.attach(self._decel, 8, 9, 1, 3, 0, 0)
self._stop_but = gtk.Button('Stop')
self._stop_but.connect('clicked', self._stop_clicked_cb)
self._table.attach(self._stop_but, 0, 3, 3, 4, gtk.FILL, 0)
self.connect('key-press-event', self._key_pressed_cb)
self.connect('key-release-event', self._key_released_cb)
self.add(self._table)
self._inhibit_save = False
self.set_instrument(ins)
def _load_settings(self):
if self._instrument is None:
return
insname = self._instrument.get_name()
cfg = self._config
self._inhibit_save = True
self._max_speed.set_value(cfg.get('positioner_%s_max_speed' % insname, 250))
self._min_speed.set_value(cfg.get('positioner_%s_min_speed' % insname, 50))
self._accel.set_value(cfg.get('positioner_%s_accel' % insname, 1.5))
self._decel.set_value(cfg.get('positioner_%s_decel' % insname, 2.0))
self._inhibit_save = False
def _save_settings(self):
if self._instrument is None or self._inhibit_save:
return
insname = self._instrument.get_name()
cfg = self._config
cfg.set('positioner_%s_max_speed' % insname, self._max_speed.get_value())
cfg.set('positioner_%s_min_speed' % insname, self._min_speed.get_value())
cfg.set('positioner_%s_accel' % insname, self._accel.get_value())
cfg.set('positioner_%s_decel' % insname, self._decel.get_value())
def set_instrument(self, ins):
self._instrument = ins
if self._instrument is not None:
self._channels = ins.get_channels()
else:
self._channels = 0
bval = False
if self._channels > 0:
bval = True
self._button_left.set_sensitive(bval)
self._button_right.set_sensitive(bval)
self._button_upleft.set_sensitive(bval)
self._button_upright.set_sensitive(bval)
self._button_downleft.set_sensitive(bval)
self._button_downright.set_sensitive(bval)
self._stop_but.set_sensitive(bval)
bval = False
if self._channels > 1:
bval = True
self._button_up.set_sensitive(bval)
self._button_down.set_sensitive(bval)
bval = False
if self._channels > 2:
bval = True
self._button_z_up.set_sensitive(bval)
self._button_z_down.set_sensitive(bval)
self._load_settings()
def _direction_clicked(self, clicked, x, y, z):
coord = []
if self._channels > 0:
coord.append(x)
if self._channels > 1:
coord.append(y)
if self._channels > 2:
coord.append(z)
if clicked:
self.emit('direction-clicked', coord)
else:
self.emit('direction-released', coord)
def _key_pressed_cb(self, sender, key):
pass
def _key_released_cb(self, sender, key):
pass
def _max_speed_changed_cb(self, sender):
self._save_settings()
self.emit('max-speed-changed', sender.get_value())
def _min_speed_changed_cb(self, sender):
self._save_settings()
self.emit('min-speed-changed', sender.get_value())
def get_max_speed(self):
return self._max_speed.get_value()
def get_min_speed(self):
return self._min_speed.get_value()
def get_accel(self):
return self._accel.get_value()
def get_decel(self):
return self._decel.get_value()
def _accel_changed_cb(self, sender):
self._save_settings()
self.emit('accel-changed', sender.get_value())
def _decel_changed_cb(self, sender):
self._save_settings()
self.emit('decel-changed', sender.get_value())
def _stop_clicked_cb(self, sender):
self.emit('stop-request')
class PositionBookmarks(gtk.Frame):
__gsignals__ = {
'go-request': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
}
def __init__(self, ins):
gtk.Frame.__init__(self)
self.set_label(_L('Bookmarks'))
self._add_button = gtk.Button(_L('Add'))
self._add_button.connect('clicked', self._add_clicked_cb)
self._goxy_button = gtk.Button(_L('Goto XY'))
self._goxy_button.connect('clicked', self._go_clicked_cb, 2)
self._goxyz_button = gtk.Button(_L('Goto XYZ'))
self._goxyz_button.connect('clicked', self._go_clicked_cb, 3)
self._remove_button = gtk.Button(_L('Remove'))
self._remove_button.connect('clicked', self._remove_clicked_cb)
self._bookmark_data = {}
self._tree_model = gtk.ListStore(str, str)
self._tree_view = QTTable([
('Label', {}),
('Position', {})
], self._tree_model)
self._config = qt.config
self._load_bookmarks()
self._label_entry = gtk.Entry()
self.set_instrument(ins)
vbox = pack_vbox([
pack_hbox([
gtk.Label(_L('Label')),
self._label_entry], True, False),
pack_hbox([
self._add_button,
self._goxy_button,
self._goxyz_button,
self._remove_button], True, True),
self._tree_view
], False, False)
vbox.set_border_width(4)
self.add(vbox)
def set_instrument(self, ins):
self._ins = ins
bval = False
if ins is not None:
bval = True
self._add_button.set_sensitive(bval)
bval = False
if ins is not None and ins.get_channels() > 1:
bval = True
self._goxy_button.set_sensitive(bval)
bval = False
if ins is not None and ins.get_channels() > 2:
bval = True
self._goxyz_button.set_sensitive(bval)
def _add_clicked_cb(self, widget):
pos = self._ins.get_position()
posstr = self._ins.format_parameter_value('position', pos)
label = self._label_entry.get_text()
index = "%s%s" % (label, posstr)
if index in self._bookmark_data:
return
self._tree_model.append((label, posstr))
self._bookmark_data[index] = pos
self._save_bookmarks()
def _remove_clicked_cb(self, widget):
(model, rows) = self._tree_view.get_selection().get_selected_rows()
for row in rows:
it = model.get_iter(row)
rowdata = model.get(it, 0, 1)
index = "%s%s" % (rowdata[0], rowdata[1])
if index in self._bookmark_data:
del self._bookmark_data[index]
model.remove(it)
self._save_bookmarks()
def _go_clicked_cb(self, widget, nchannels):
(model, rows) = self._tree_view.get_selection().get_selected_rows()
if len(rows) != 1:
logging.warning('Select 1 row only!')
row = rows[0]
it = model.get_iter(row)
label = model.get_value(it, 0)
posstr = model.get_value(it, 1)
index = "%s%s" % (label, posstr)
pos = self._bookmark_data[index]
pos = pos[:nchannels]
self.emit('go-request', pos)
def _load_bookmarks(self):
for row in self._config.get('positioner_bookmarks', []):
it = self._tree_model.append(row[:2])
index = "%s%s" % (row[0], row[1])
self._bookmark_data[index] = row[2]
def _save_bookmarks(self):
data = []
for row in self._tree_model:
index = "%s%s" % (row[0], row[1])
data.append((row[0], row[1], self._bookmark_data[index]))
self._config.set('positioner_bookmarks', data)
class PositionerWindow(QTWindow):
def __init__(self):
QTWindow.__init__(self, 'positioner', 'Positioner')
self.connect("delete-event", self._delete_event_cb)
self._moving = False
self._controls = PositionControls(None)
self._controls.connect('direction-clicked', self._direction_clicked_cb)
self._controls.connect('direction-released', self._direction_released_cb)
self._controls.connect('max-speed-changed', self._max_speed_changed_cb)
self._controls.connect('min-speed-changed', self._min_speed_changed_cb)
self._controls.connect('accel-changed', self._accel_changed_cb)
self._controls.connect('decel-changed', self._decel_changed_cb)
self._controls.connect('stop-request', self._stop_request_cb)
self._max_speed = self._controls.get_max_speed()
self._min_speed = self._controls.get_min_speed()
self._accel_factor = self._controls.get_accel()
self._decel_factor = self._controls.get_decel()
self._bookmarks = PositionBookmarks(None)
self._bookmarks.connect('go-request', self._go_request)
self._ins_combo = InstrumentDropdown(types=['positioner'])
self._ins_combo.connect('changed', self._instrument_changed_cb)
self._instrument = None
poslabel = gtk.Label()
poslabel.set_markup('<big>%s</big>' % _L('Position'))
self._position_label = gtk.Label()
self._update_position()
vbox = pack_vbox([
self._ins_combo,
pack_hbox([
poslabel,
self._position_label], True, True),
self._controls,
self._bookmarks], False, False)
# Speed control variables
self._direction_down = (0, 0, 0)
self._step_done = False
self._speed = [0, 0, 0]
self._timer_hid = None
self._counter = 0
self.add(vbox)
vbox.show_all()
def _delete_event_cb(self, widget, event, data=None):
self.hide()
return True
def _instrument_changed_cb(self, widget):
ins = self._ins_combo.get_instrument()
self._instrument = ins
self._controls.set_instrument(ins)
self._bookmarks.set_instrument(ins)
self._update_position()
def _go_request(self, sender, position):
self._instrument.move_abs(position)
def _direction_clicked_cb(self, sender, direction):
self._direction_down = direction
self._step_done = False
if self._timer_hid is None:
self._timer_hid = gobject.timeout_add(100, self._position_timer)
def _direction_released_cb(self, sender, direction):
if not self._step_done and self._speed == [0, 0, 0]:
if self._timer_hid is not None:
gobject.source_remove(self._timer_hid)
self._timer_hid = None
self._do_single_step()
self._direction_down = (0, 0, 0)
def _do_single_step(self):
for i in range(len(self._direction_down)):
if self._direction_down[i] != 0:
self._instrument.step(i, sign(self._direction_down[i]))
def _update_speed(self):
for i in range(len(self._direction_down)):
if self._direction_down[i] != 0:
if self._speed[i] == 0:
self._speed[i] = self._direction_down[i] * self._min_speed
else:
self._speed[i] = self._speed[i] * self._accel_factor
if abs(self._speed[i]) >= self._max_speed:
self._speed[i] = sign(self._speed[i]) * self._max_speed
else:
self._speed[i] = self._speed[i] / self._decel_factor
if abs(self._speed[i]) < self._min_speed:
self._speed[i] = 0
if self._speed != [0, 0, 0]:
self._step_done = True
self._instrument.set_speed(self._speed)
if not self._moving:
self._instrument.start()
self._moving = True
return True
else:
self._instrument.stop()
self._moving = False
return False
return ret
def _update_position(self):
if self._instrument is not None and self._instrument.has_parameter('position'):
pos = self._instrument.get_position()
posstr = self._instrument.format_parameter_value('position', pos)
else:
posstr = 'None'
self._position_label.set_markup('<big>%s</big>' % posstr)
def _position_timer(self):
self._counter += 1
ret = self._update_speed()
if not ret:
self._timer_hid = None
if (self._counter % 5) == 0 or not ret:
self._update_position()
return ret
def _max_speed_changed_cb(self, sender, val):
self._max_speed = val
def _min_speed_changed_cb(self, sender, val):
self._min_speed = val
def _accel_changed_cb(self, sender, val):
self._accel_factor = val
def _decel_changed_cb(self, sender, val):
self._decel_factor = val
def _stop_request_cb(self, sender):
self._instrument.stop()
Window = PositionerWindow
|
py
|
1a57907d4e56026afdeff8bdde08582c23501cfa
|
"""
Module containing class which computes fits of data using linear models through
analytical calculations. It has functions to output the signal estimate (with
errors), parameter covariance, and more. It can accept the noise level either
as standard deviations of channels (if uncorrelated) or as a covariance matrix
in the form of a
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`.
**File**: $PYLINEX/pylinex/fitter/Fitter.py
**Author**: Keith Tauscher
**Date**: 25 May 2021
"""
from __future__ import division
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as pl
from distpy import GaussianDistribution, ChiSquaredDistribution
from ..util import Savable, create_hdf5_dataset, psi_squared
from .TrainingSetIterator import TrainingSetIterator
from .BaseFitter import BaseFitter
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class Fitter(BaseFitter, Savable):
"""
Class which computes fits of data using linear models through analytical
calculations. It has functions to output the signal estimate (with errors),
parameter covariance, and more. It can accept the noise level either as
standard deviations of channels (if uncorrelated) or as a covariance matrix
in the form of a
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`.
"""
def __init__(self, basis_sum, data, error=None, **priors):
"""
Initializes a new `Fitter` object using the given inputs. The
likelihood used by the fit is of the form \\(\\mathcal{L}\
(\\boldsymbol{x}) \\propto \\exp{\\left\\{-\\frac{1}{2}\
[\\boldsymbol{y}-(\\boldsymbol{G}\\boldsymbol{x} +\
\\boldsymbol{\\mu})]^T\\boldsymbol{C}^{-1}[\\boldsymbol{y}-\
(\\boldsymbol{G}\\boldsymbol{x}+\\boldsymbol{\\mu})]\\right\\}}\\) and
the prior used is \\(\\pi(\\boldsymbol{x}) \\propto\
\\exp{\\left\\{-\\frac{1}{2}(\\boldsymbol{x}-\\boldsymbol{\\nu})^T\
\\boldsymbol{\\Lambda}^{-1}(\\boldsymbol{x}-\\boldsymbol{\\nu})\
\\right\\}}\\). The posterior distribution explored is
\\(p(\\boldsymbol{x})=\
\\mathcal{L}(\\boldsymbol{x})\\times\\pi(\\boldsymbol{x})\\).
Parameters
----------
basis_sum : `pylinex.basis.BasisSum.BasisSum` or\
`pylinex.basis.Basis.Basis`
the basis used to model the data, represented in equations by
\\(\\boldsymbol{G}\\) alongside the translation component
\\(\\boldsymbol{\\mu}\\). Two types of inputs are accepted:
- If `basis_sum` is a `pylinex.basis.BasisSum.BasisSum`, then it is
assumed to have constituent bases for each modeled component
alongside `pylinex.expander.Expander.Expander` objects determining
how those components enter into the data
- If `basis_sum` is a `pylinex.basis.Basis.Basis`, then it is
assumed that this single basis represents the only component that
needs to be modeled. The
`pylinex.fitter.BaseFitter.BaseFitter.basis_sum` property will be
set to a `pylinex.basis.BasisSum.BasisSum` object with this
`pylinex.basis.Basis.Basis` as its only component, labeled with the
string name `"sole"`
data : numpy.ndarray
the data to fit, represented in equations by \\(\\boldsymbol{y}\\)
- if `data` is 1D, then its length should be the same as the
(expanded) vectors in `basis_sum`, i.e. the number of rows of
\\(\\boldsymbol{G}\\), `nchannels`
- if `data` is 2D, then it should have shape `(ncurves, nchannels)`
and it will be interpreted as a list of data vectors to fit
independently
error : numpy.ndarray or\
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`
the noise level of the data that determines the covariance matrix,
represented in equations by \\(\\boldsymbol{C}\\):
- if `error` is a 1D `numpy.ndarray`, it should have the same
length as the (expanded) vectors in `basis_sum`, i.e. the number of
rows of \\(\\boldsymbol{G}\\), `nchannels` and should only contain
positive numbers. In this case, \\(\\boldsymbol{C}\\) is a diagonal
matrix whose elements are the squares of the values in `error`
- if `error` is a
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`,
then it is assumed to represent a block diagonal
\\(\\boldsymbol{C}\\) directly
priors : dict
keyword arguments where the keys are exactly the names of the
`basis_sum` with `'_prior'` appended to them and the values are
`distpy.distribution.GaussianDistribution.GaussianDistribution`
objects. Priors are optional and can be included or excluded for
any given component. If `basis_sum` was given as a
`pylinex.basis.Basis.Basis`, then `priors` should either be empty
or a dictionary of the form
`{'sole_prior': gaussian_distribution}`. The means and inverse
covariances of all priors are combined into a full parameter prior
mean and full parameter prior inverse covariance, represented in
equations by \\(\\boldsymbol{\\nu}\\) and
\\(\\boldsymbol{\\Lambda}^{-1}\\), respectively. Having no prior is
equivalent to having an infinitely wide prior, i.e. a prior with an
inverse covariance matrix of \\(\\boldsymbol{0}\\)
"""
self.basis_sum = basis_sum
self.priors = priors
self.data = data
self.error = error
@property
def prior_significance(self):
"""
The prior significance, represented mathematically as
\\(\\boldsymbol{\\nu}^T\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{\\nu}.
"""
if not hasattr(self, '_prior_significance'):
self._prior_significance = np.dot(self.prior_mean,\
np.dot(self.prior_inverse_covariance, self.prior_mean))
return self._prior_significance
@property
def log_prior_covariance_determinant(self):
"""
The logarithm (base e) of the determinant of the prior
parameter covariance matrix, \\(|\\boldsymbol{\\Lambda}|\\). Note that
if a given prior is not given, it is simply not used here (to avoid
getting 0 or \\(\\infty\\) as the determinant).
"""
if not hasattr(self, '_log_prior_covariance_determinant'):
self._log_prior_covariance_determinant = 0
for key in self.priors:
this_prior_covariance = self.priors[key].covariance.A
self._log_prior_covariance_determinant +=\
la.slogdet(this_prior_covariance)[1]
return self._log_prior_covariance_determinant
@property
def data_significance(self):
"""
The data significance, represented mathematically as
\\((\\boldsymbol{y}-\\boldsymbol{\\mu})^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y} - \\boldsymbol{\\mu})\\). It is either a single number
(if `Fitter.multiple_data_curves` is True) or a 1D `numpy.ndarray` (if
`Fitter.multiple_data_curves` is False)
"""
if not hasattr(self, '_data_significance'):
if self.multiple_data_curves:
self._data_significance =\
np.sum(self.weighted_translated_data ** 2, axis=1)
else:
self._data_significance =\
np.dot(self.weighted_translated_data,\
self.weighted_translated_data)
return self._data_significance
@property
def num_parameters(self):
"""
The number of parameters of the fit. This is the same as the
`num_basis_vectors` property of `Fitter.basis_sum`.
"""
return self.basis_sum.num_basis_vectors
@property
def posterior_covariance_times_prior_inverse_covariance(self):
"""
The posterior covariance multiplied on the right by the prior inverse
covariance, represented mathematically as
\\(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1}\\). This is a matrix
measure of the effect of the data on the distribution of parameters
(i.e. it approaches the zero matrix if the data constrains parameters
much more powerfully than the prior and approaches the identity matrix
if the prior constrains parameters much more powerfully than the data).
"""
if not hasattr(self,\
'_posterior_covariance_times_prior_inverse_covariance'):
self._posterior_covariance_times_prior_inverse_covariance =\
np.dot(self.parameter_covariance,\
self.prior_inverse_covariance)
return self._posterior_covariance_times_prior_inverse_covariance
@property
def model_complexity_mean_to_peak_logL(self):
"""
A measure of the model complexity that is computed by taking the
difference between the mean and peak values of the log likelihood. If
this `Fitter` has no priors, then this property will always simply
return the number of parameters, \\(p\\). It is represented
mathematically as
\\(p-\\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})\\).
"""
if not hasattr(self, '_model_complexity_mean_to_peak_logL'):
self._model_complexity_mean_to_peak_logL = self.num_parameters
if self.has_priors:
self._model_complexity_mean_to_peak_logL -= np.trace(\
self.posterior_covariance_times_prior_inverse_covariance)
return self._model_complexity_mean_to_peak_logL
@property
def model_complexity_logL_variance(self):
"""
A measure of the model complexity which is computed by finding the
variance of the log likelihood function. It is represented
mathematically as \\(p+2\\ \\boldsymbol{\\delta}\\boldsymbol{C}^{-1}\
\\boldsymbol{G}\\boldsymbol{S}\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
\\boldsymbol{\\delta} + \\text{tr}(\\boldsymbol{S}\
\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})\
-2\\ \\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})\\).
"""
if not hasattr(self, '_model_complexity_logL_variance'):
self._model_complexity_logL_variance = self.num_parameters
bias_term = np.dot(self.weighted_basis, self.weighted_bias.T).T
if self.multiple_data_curves:
covariance_times_bias_term =\
np.dot(bias_term, self.parameter_covariance)
bias_term =\
np.sum(bias_term * covariance_times_bias_term, axis=1)
del covariance_times_bias_term
else:
bias_term = np.dot(bias_term,\
np.dot(self.parameter_covariance, bias_term))
self._model_complexity_logL_variance += (2 * bias_term)
if self.has_priors:
self._model_complexity_logL_variance += np.trace(np.dot(\
self.posterior_covariance_times_prior_inverse_covariance,\
self.posterior_covariance_times_prior_inverse_covariance))
self._model_complexity_logL_variance -= (2 * np.trace(\
self.posterior_covariance_times_prior_inverse_covariance))
return self._model_complexity_logL_variance
@property
def basis_dot_products(self):
"""
The dot products between the `pylinex.basis.Basis.Basis` objects
underlying the `Fitter.basis_sum` this object stores. See the
`pylinex.basis.Basis.Basis.dot` method for details on this calculation.
"""
if not hasattr(self, '_basis_dot_products'):
if self.non_diagonal_noise_covariance:
raise NotImplementedError("Basis dot products are not yet " +\
"implemented for non diagonal noise covariance matrices.")
else:
self._basis_dot_products =\
self.basis_sum.basis_dot_products(error=self.error)
return self._basis_dot_products
@property
def basis_dot_product_sum(self):
"""
The sum of all off diagonal elements of the upper triangle of
`Fitter.basis_dot_products`.
"""
if not hasattr(self, '_basis_dot_product_sum'):
self._basis_dot_product_sum = np.sum(self.basis_dot_products)
self._basis_dot_product_sum = self._basis_dot_product_sum -\
np.trace(self.basis_dot_products)
self._basis_dot_product_sum = self._basis_dot_product_sum / 2.
return self._basis_dot_product_sum
@property
def parameter_inverse_covariance(self):
"""
The inverse of the posterior distribution's covariance matrix. This is
represented mathematically as \\(\\boldsymbol{S}^{-1}=\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G} +\
\\boldsymbol{\\Lambda}^{-1}\\).
"""
if not hasattr(self, '_parameter_inverse_covariance'):
self._parameter_inverse_covariance = self.basis_overlap_matrix
if self.has_priors:
self._parameter_inverse_covariance =\
self._parameter_inverse_covariance +\
self.prior_inverse_covariance
return self._parameter_inverse_covariance
@property
def likelihood_parameter_covariance(self):
"""
The parameter covariance implied only by the likelihood, represented
mathematically as
\\((\\boldsymbol{G}^T\\boldsymbol{C}\\boldsymbol{G})^{-1}\\).
"""
if not hasattr(self, '_likelihood_parameter_covariance'):
if self.has_priors:
self._likelihood_parameter_covariance =\
la.inv(self.basis_overlap_matrix)
else:
self._likelihood_parameter_covariance =\
self.parameter_covariance
return self._likelihood_parameter_covariance
@property
def likelihood_parameter_mean(self):
"""
Property storing the parameter mean implied by the likelihood (i.e.
disregarding priors). It is represented mathematically as
\\((\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G})^{-1}\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y}-\\boldsymbol{\\mu})\\).
"""
if not hasattr(self, '_likelihood_parameter_mean'):
if self.has_priors:
self._likelihood_parameter_mean =\
np.dot(self.likelihood_parameter_covariance,\
np.dot(self.weighted_basis,\
self.weighted_translated_data.T)).T
else:
self._likelihood_parameter_mean = self.parameter_mean
return self._likelihood_parameter_mean
@property
def likelihood_channel_mean(self):
"""
Property storing the channel mean associated with the likelihood
parameter mean (i.e. the result if there are no priors). It is
represented mathematically as \\(\\boldsymbol{G}\
(\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G})^{-1}\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y}-\\boldsymbol{\\mu}) + \\boldsymbol{\\mu}\\).
"""
if not hasattr(self, '_likelihood_channel_mean'):
if self.has_priors:
self._likelihood_channel_mean = self.basis_sum.translation +\
np.dot(self.basis_sum.basis.T,\
self.likelihood_parameter_mean.T).T
else:
self._likelihood_channel_mean = self.channel_mean
return self._likelihood_channel_mean
@property
def likelihood_channel_bias(self):
"""
Property storing the channel-space bias associated with the likelihood
parameter mean (i.e. the result if there are no priors). It is
represented mathematically as \\(\\boldsymbol{\\delta}_{\\text{NP}}=\
\\left[\\boldsymbol{I}-\\boldsymbol{G}\
(\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G})^{-1}\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\right]\
(\\boldsymbol{y}-\\boldsymbol{\\mu})\\).
"""
if not hasattr(self, '_likelihood_channel_bias'):
if self.has_priors:
self._likelihood_channel_bias =\
self.data - self.likelihood_channel_mean
else:
self._likelihood_channel_bias = self.channel_bias
return self._likelihood_channel_bias
@property
def likelihood_weighted_bias(self):
"""
The likelihood channel bias weighted by the error, represented
mathematically as
\\(\\boldsymbol{C}^{-1/2}\\boldsymbol{\\delta}_{\\text{NP}}\\).
"""
if not hasattr(self, '_likelihood_weighted_bias'):
if self.has_priors:
self._likelihood_weighted_bias =\
self.weight(self.likelihood_channel_bias, -1)
else:
self._likelihood_weighted_bias = self.weighted_bias
return self._likelihood_weighted_bias
@property
def likelihood_bias_statistic(self):
"""
The maximum value of the loglikelihood, represented mathematically as
\\(\\boldsymbol{\\delta}_{\\text{NP}}^T \\boldsymbol{C}^{-1}\
\\boldsymbol{\\delta}_{\\text{NP}}\\). It is equal to -2 times the peak
value of the loglikelihood.
"""
if not hasattr(self, '_likelihood_bias_statistic'):
if self.has_priors:
if self.multiple_data_curves:
self._likelihood_bias_statistic =\
np.sum(self.likelihood_weighted_bias ** 2, axis=1)
else:
self._likelihood_bias_statistic = np.dot(\
self.likelihood_weighted_bias,\
self.likelihood_weighted_bias)
else:
self._likelihood_bias_statistic = self.bias_statistic
return self._likelihood_bias_statistic
@property
def degrees_of_freedom(self):
"""
The difference between the number of channels and the number of
parameters.
"""
if not hasattr(self, '_degrees_of_freedom'):
self._degrees_of_freedom = self.num_channels - self.num_parameters
return self._degrees_of_freedom
@property
def normalized_likelihood_bias_statistic(self):
"""
The normalized version of the likelihood bias statistic. This is a
statistic that should be close to 1 which measures how well the total
data is fit and is represented mathematically as
\\(\\frac{1}{\\text{dof}}\\boldsymbol{\\delta}_{\\text{NP}}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}_{\\text{NP}}\\), where
\\(\\text{dof}\\) and is the number of degrees of freedom.
"""
if not hasattr(self, '_normalized_likelihood_bias_statistic'):
self._normalized_likelihood_bias_statistic =\
self.likelihood_bias_statistic / self.degrees_of_freedom
return self._normalized_likelihood_bias_statistic
@property
def chi_squared(self):
"""
The (non-reduced) chi-squared value(s) of the fit(s) in this `Fitter`,
represented mathematically as
\\(\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
return self.bias_statistic
@property
def reduced_chi_squared(self):
"""
The reduced chi-squared value(s) of the fit(s) in this `Fitter`,
represented mathematically as \\(\\frac{1}{\\text{dof}}\
\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
return self.normalized_bias_statistic
@property
def reduced_chi_squared_expected_mean(self):
"""
The expected mean of `Fitter.reduced_chi_squared`, represented
mathematically as \\(\\frac{1}{\\text{dof}}[\\text{dof} +\
\\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})]\\).
"""
if not hasattr(self, '_reduced_chi_squared_expected_mean'):
if self.has_priors:
mean = np.sum(np.diag(\
self.posterior_covariance_times_prior_inverse_covariance))
else:
mean = 0
self._reduced_chi_squared_expected_mean =\
(mean + self.degrees_of_freedom) / self.degrees_of_freedom
return self._reduced_chi_squared_expected_mean
@property
def reduced_chi_squared_expected_variance(self):
"""
The expected variance of `Fitter.reduced_chi_squared`, represented
mathematically as \\(\\frac{2}{\\text{dof}^2}[\\text{dof} +\
\\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1}\
\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})]\\).
"""
if not hasattr(self, '_reduced_chi_squared_expected_variance'):
if self.has_priors:
variance =\
self.posterior_covariance_times_prior_inverse_covariance
variance = np.sum(variance * variance.T)
else:
variance = 0
self._reduced_chi_squared_expected_variance =\
(2 * (variance + self.degrees_of_freedom)) /\
(self.degrees_of_freedom ** 2)
return self._reduced_chi_squared_expected_variance
@property
def reduced_chi_squared_expected_distribution(self):
"""
A `distpy.distribution.GaussianDistribution.GaussianDistribution` with
mean given by `Fitter.reduced_chi_squared_expected_mean` and variance
given by `Fitter.reduced_chi_squared_expected_variance`.
"""
if not hasattr(self, '_reduced_chi_squared_expected_distribution'):
if self.has_priors:
self._reduced_chi_squared_expected_distribution =\
GaussianDistribution(\
self.reduced_chi_squared_expected_mean,\
self.reduced_chi_squared_expected_variance)
else:
self._reduced_chi_squared_expected_distribution =\
ChiSquaredDistribution(self.degrees_of_freedom,\
reduced=True)
return self._reduced_chi_squared_expected_distribution
@property
def psi_squared(self):
"""
Property storing the reduced psi-squared values of the fit(s) in this
Fitter.
"""
if not hasattr(self, '_psi_squared'):
if self.multiple_data_curves:
self._psi_squared =\
np.array([psi_squared(bias, error=None)\
for bias in self.weighted_bias])
else:
self._psi_squared = psi_squared(self.weighted_bias, error=None)
return self._psi_squared
@property
def maximum_loglikelihood(self):
"""
The maximum value of the Gaussian loglikelihood (when the normalizing
constant outside the exponential is left off).
"""
if not hasattr(self, '_maximum_loglikelihood'):
self._maximum_loglikelihood =\
(-(self.likelihood_bias_statistic / 2.))
return self._maximum_loglikelihood
@property
def parameter_covariance(self):
"""
The covariance matrix of the posterior parameter distribution,
represented mathematically as \\(\\boldsymbol{S}=(\\boldsymbol{G}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{G} +\
\\boldsymbol{\\Lambda}^{-1})^{-1}\\).
"""
if not hasattr(self, '_parameter_covariance'):
self._parameter_covariance =\
la.inv(self.parameter_inverse_covariance)
return self._parameter_covariance
@property
def log_parameter_covariance_determinant(self):
"""
The logarithm (base e) of the determinant of the posterior parameter
covariance matrix, represented mathematically as
\\(\\Vert\\boldsymbol{S}\\Vert\\).
"""
if not hasattr(self, '_log_parameter_covariance_determinant'):
self._log_parameter_covariance_determinant =\
la.slogdet(self.parameter_covariance)[1]
return self._log_parameter_covariance_determinant
@property
def log_parameter_covariance_determinant_ratio(self):
"""
The logarithm (base e) of the ratio of the determinant of the posterior
parameter covariance matrix to the determinant of the prior parameter
covariance matrix. This can be thought of as the log of the ratio of
the hypervolume of the 1 sigma posterior ellipse to the hypervolume of
the 1 sigma prior ellipse. It is represented mathematically as
\\(\\ln{\\left(\\frac{\\Vert\\boldsymbol{S}\\Vert}{\
\\Vert\\boldsymbol{\\Lambda}\\Vert}\\right)}\\).
"""
if not hasattr(self, '_log_parameter_covariance_determinant_ratio'):
self._log_parameter_covariance_determinant_ratio =\
self.log_parameter_covariance_determinant -\
self.log_prior_covariance_determinant
return self._log_parameter_covariance_determinant_ratio
@property
def channel_error(self):
"""
The error on the estimate of the full data in channel space,
represented mathematically as
\\(\\boldsymbol{G}\\boldsymbol{S}\\boldsymbol{G}^T\\).
"""
if not hasattr(self, '_channel_error'):
SAT = np.dot(self.parameter_covariance, self.basis_sum.basis)
self._channel_error =\
np.sqrt(np.einsum('ab,ab->b', self.basis_sum.basis, SAT))
return self._channel_error
@property
def channel_RMS(self):
"""
The RMS error on the estimate of the full data in channel space.
"""
if not hasattr(self, '_channel_RMS'):
self._channel_RMS =\
np.sqrt(np.mean(np.power(self.channel_error, 2)))
return self._channel_RMS
@property
def parameter_mean(self):
"""
The posterior mean parameter vector(s). It is represented
mathematically as
\\(\\boldsymbol{\\gamma} =\
(\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G} +\
\\boldsymbol{\\Lambda}^{-1})[\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y}-\\boldsymbol{\\mu}) +\
\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{\\nu}]\\) and is store in a
`numpy.ndarray` of shape of the result is either `(nparams,)` or
`(ncurves, nparams)`.
"""
if not hasattr(self, '_parameter_mean'):
self._parameter_mean =\
np.dot(self.weighted_basis, self.weighted_translated_data.T).T
if self.has_priors:
if self.multiple_data_curves:
self._parameter_mean = self._parameter_mean +\
self.prior_inverse_covariance_times_mean[np.newaxis,:]
else:
self._parameter_mean = self._parameter_mean +\
self.prior_inverse_covariance_times_mean
self._parameter_mean =\
np.dot(self.parameter_covariance, self._parameter_mean.T).T
return self._parameter_mean
@property
def parameter_distribution(self):
"""
Property storing a
`distpy.distribution.GaussianDistribution.GaussianDistribution`
representing a distribution with the mean and covariance stored in
`Fitter.parameter_mean` and `Fitter.parameter_covariance`,
respectively.
"""
if not hasattr(self, '_parameter_distribution'):
if self.multiple_data_curves:
raise ValueError("parameter_distribution only makes sense " +\
"if the Fitter has only one data curve.")
else:
self._parameter_distribution = GaussianDistribution(\
self.parameter_mean, self.parameter_covariance)
return self._parameter_distribution
@property
def posterior_significance(self):
"""
The posterior significance, represented mathematically as
\\(\\boldsymbol{z}^T \\boldsymbol{S}^{-1} \\boldsymbol{z}\\),
where \\(z\\) is `Fitter.parameter_mean`.
"""
if not hasattr(self, '_posterior_significance'):
if self.multiple_data_curves:
inverse_covariance_times_mean = np.dot(self.parameter_mean,\
self.parameter_inverse_covariance)
self._posterior_significance = np.sum(\
self.parameter_mean * inverse_covariance_times_mean,\
axis=1)
del inverse_covariance_times_mean
else:
self._posterior_significance =\
np.dot(self.parameter_mean,\
np.dot(self.parameter_inverse_covariance,\
self.parameter_mean))
return self._posterior_significance
@property
def channel_mean(self):
"""
The posterior estimate of the modeled data in channel space.
"""
if not hasattr(self, '_channel_mean'):
self._channel_mean = self.basis_sum.translation +\
np.dot(self.basis_sum.basis.T, self.parameter_mean.T).T
return self._channel_mean
@property
def channel_bias(self):
"""
The bias of the estimate of the data (i.e. the posterior estimate of
the data minus the data), represented mathematically as
\\(\\boldsymbol{\\delta}\\).
"""
if not hasattr(self, '_channel_bias'):
self._channel_bias = self.data - self.channel_mean
return self._channel_bias
@property
def channel_bias_RMS(self):
"""
The RMS of `Fitter.channel_bias`.
"""
if not hasattr(self, '_channel_bias_RMS'):
if self.multiple_data_curves:
self._channel_bias_RMS = np.sqrt(\
np.sum(self.channel_bias ** 2, axis=1) / self.num_channels)
else:
self._channel_bias_RMS =\
np.sqrt(np.dot(self.channel_bias, self.channel_bias) /\
self.num_channels)
return self._channel_bias_RMS
@property
def weighted_bias(self):
"""
The posterior channel bias weighted down by the errors, represented
mathematically as \\(\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
if not hasattr(self, '_weighted_bias'):
self._weighted_bias = self.weight(self.channel_bias, -1)
return self._weighted_bias
@property
def bias_statistic(self):
"""
A statistic known as the "bias statistic", represented mathematically
as
\\(\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
It is a measure of the bias of the full model being fit. It should have
a \\(\\chi^2(N)\\) distribution where \\(N\\) is the number of degrees
of freedom.
"""
if not hasattr(self, '_bias_statistic'):
if self.multiple_data_curves:
self._bias_statistic = np.sum(self.weighted_bias ** 2, axis=1)
else:
self._bias_statistic =\
np.dot(self.weighted_bias, self.weighted_bias)
return self._bias_statistic
@property
def loglikelihood_at_posterior_maximum(self):
"""
The value of the Gaussian loglikelihood (without the normalizing factor
outside the exponential) at the maximum of the posterior distribution.
"""
if not hasattr(self, '_loglikelihood_at_posterior_maximum'):
self._loglikelihood_at_posterior_maximum =\
(-(self.bias_statistic / 2.))
return self._loglikelihood_at_posterior_maximum
@property
def normalized_bias_statistic(self):
"""
The reduced chi-squared value(s) of the fit(s) in this `Fitter`,
represented mathematically as \\(\\frac{1}{\\text{dof}}\
\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
if not hasattr(self, '_normalized_bias_statistic'):
self._normalized_bias_statistic =\
self.bias_statistic / self.degrees_of_freedom
return self._normalized_bias_statistic
@property
def likelihood_significance_difference(self):
"""
The likelihood covariance part of the significance difference, equal to
\\(\\boldsymbol{\\gamma}^T\\boldsymbol{C}\\boldsymbol{\\gamma}-\
\\boldsymbol{y}^T\\boldsymbol{C}^{-1}\\boldsymbol{y}\\) where
\\(\\boldsymbol{\\gamma}\\) is `Fitter.parameter_mean`.
"""
if not hasattr(self, '_likelihood_significance_difference'):
mean_sum = self.weight(self.channel_mean + self.data -\
(2 * self.basis_sum.translation), -1)
mean_difference = (self.channel_mean - self.data) / error_to_divide
if self.multiple_data_curves:
self._likelihood_significance_difference =\
np.sum(mean_sum * mean_difference, axis=1)
else:
self._likelihood_significance_difference =\
np.dot(mean_sum, mean_difference)
return self._likelihood_significance_difference
@property
def prior_significance_difference(self):
"""
Property storing the prior covariance part of the significance
difference. This is equal to (\\boldsymbol{\\gamma}^T\
\\boldsymbol{\\Lambda}^{-1} \\boldsymbol{\\gamma} -\
\\boldsymbol{\\nu}^T \\boldsymbol{\\Lambda}^{-1} \\boldsymbol{\\nu}\\).
"""
if not hasattr(self, '_prior_significance_difference'):
if self.multiple_data_curves:
self._prior_significance_difference =\
np.zeros(self.data.shape[:-1])
else:
self._prior_significance_difference = 0
for name in self.names:
key = '{!s}_prior'.format(name)
if key in self.priors:
prior = self.priors[key]
prior_mean = prior.internal_mean.A[0]
prior_inverse_covariance = prior.inverse_covariance.A
posterior_mean = self.subbasis_parameter_mean(name=name)
mean_sum = posterior_mean + prior_mean
mean_difference = posterior_mean - prior_mean
if self.multiple_data_curves:
this_term =\
np.dot(mean_difference, prior_inverse_covariance)
this_term = np.sum(this_term * mean_sum, axis=1)
else:
this_term = np.dot(mean_sum,\
np.dot(prior_inverse_covariance, mean_difference))
self._prior_significance_difference =\
self._prior_significance_difference + this_term
return self._prior_significance_difference
@property
def significance_difference(self):
"""
The difference between the posterior significance and the sum of the
data significance and prior significance. It is a term in the log
evidence and is given by
\\(\\boldsymbol{\\gamma}^T\\boldsymbol{S}^{-1}\\boldsymbol{\\gamma} -\
\\boldsymbol{y}^T\\boldsymbol{C}^{-1}\\boldsymbol{y} -\
\\boldsymbol{\\nu}^T\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{\\nu}\\).
"""
if not hasattr(self, '_significance_difference'):
self._significance_difference =\
self.likelihood_significance_difference +\
self.prior_significance_difference
return self._significance_difference
@property
def log_evidence(self):
"""
The natural logarithm of the evidence (a.k.a. marginal likelihood) of
this fit. The evidence is the integral over parameter space of the
product of the likelihood and the prior and is often very large.
"""
if not hasattr(self, '_log_evidence'):
log_evidence = (self.log_parameter_covariance_determinant_ratio +\
self.significance_difference) / 2.
if self.has_all_priors:
# only constants added below, ignore if numerical problems
log_evidence = log_evidence -\
((self.num_channels * np.log(2 * np.pi)) / 2.)
if self.non_diagonal_noise_covariance:
log_evidence = log_evidence +\
(self.error.sign_and_log_abs_determinant()[1]) / 2
else:
log_evidence = log_evidence + np.sum(np.log(self.error))
self._log_evidence = log_evidence
return self._log_evidence
@property
def log_evidence_per_data_channel(self):
"""
`Fitter.log_evidence` divided by the number of channels.
"""
if not hasattr(self, '_log_evidence_per_data_channel'):
self._log_evidence_per_data_channel =\
self.log_evidence / self.num_channels
return self._log_evidence_per_data_channel
@property
def evidence(self):
"""
The evidence (a.k.a. marginal likelihood) of this fit. Beware: the
evidence is often extremely large in magnitude, with log evidences
sometimes approaching +-10^7. In these cases, the evidence will end up
NaN.
"""
if not hasattr(self, '_evidence'):
self._evidence = np.exp(self.log_evidence)
return self._evidence
@property
def evidence_per_data_channel(self):
"""
The factor by which each data channel multiplies the Bayesian evidence
on average (more precisely, the geometric mean of these numbers).
"""
if not hasattr(self, '_evidence_per_data_channel'):
self._evidence_per_data_channel =\
np.exp(self.log_evidence_per_data_channel)
return self._evidence_per_data_channel
@property
def bayesian_information_criterion(self):
"""
The Bayesian Information Criterion (BIC) which is essentially the same
as the bias statistic except it includes information about the
complexity of the model. It is \\(\\boldsymbol{\\delta}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{\\delta} + p\\ln{N}\\), where \\(p\\)
is the number of parameters and \\(N\\) is the number of data channels.
"""
if not hasattr(self, '_bayesian_information_criterion'):
self._bayesian_information_criterion =\
self.likelihood_bias_statistic +\
(self.num_parameters * np.log(self.num_channels))
return self._bayesian_information_criterion
@property
def BIC(self):
"""
Alias for `Fitter.bayesian_information_criterion`.
"""
return self.bayesian_information_criterion
@property
def akaike_information_criterion(self):
"""
An information criterion given by \\(\\boldsymbol{\\delta}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{\\delta} + 2p\\), where \\(p\\) is the
number of parameters.
"""
if not hasattr(self, '_akaike_information_criterion'):
self._akaike_information_criterion =\
self.likelihood_bias_statistic + (2 * self.num_parameters)
return self._akaike_information_criterion
@property
def AIC(self):
"""
Alias for `Fitter.akaike_information_criterion`.
"""
return self.akaike_information_criterion
######################## TODO documentation below this line has't been updated!
@property
def deviance_information_criterion(self):
"""
An information criterion given by -4 ln(L_max) + <2 ln(L)> where L is
the likelihood, <> denotes averaging over the posterior, and L_max is
the maximum likelihood.
"""
if not hasattr(self, '_deviance_information_criterion'):
self._deviance_information_criterion =\
self.likelihood_bias_statistic +\
(2 * self.model_complexity_mean_to_peak_logL)
return self._deviance_information_criterion
@property
def DIC(self):
"""
Alias for deviance_information_criterion property.
"""
return self.deviance_information_criterion
@property
def deviance_information_criterion_logL_variance(self):
"""
Version of the Deviance Information Criterion (DIC) which estimates the
model complexity through computation of the variance of the log
likelihood (with respect to the posterior).
"""
if not hasattr(self, '_deviance_information_criterion_logL_variance'):
self._deviance_information_criterion_logL_variance =\
self.likelihood_bias_statistic +\
self.model_complexity_logL_variance
return self._deviance_information_criterion_logL_variance
@property
def DIC2(self):
"""
Alias for the deviance_information_criterion_logL_variance property.
"""
return self.deviance_information_criterion_logL_variance
@property
def posterior_prior_mean_difference(self):
"""
Property storing the difference between the posterior parameter mean
and the prior parameter mean.
"""
if not hasattr(self, '_posterior_prior_mean_difference'):
if self.multiple_data_curves:
self._posterior_prior_mean_difference =\
self.parameter_mean - self.prior_mean[np.newaxis,:]
else:
self._posterior_prior_mean_difference =\
self.parameter_mean - self.prior_mean
return self._posterior_prior_mean_difference
@property
def bayesian_predictive_information_criterion(self):
"""
The Bayesian Predictive Information Criterion (BPIC), a statistic which
gives relatives goodness of fit values.
"""
if not hasattr(self, '_bayesian_predictive_information_criterion'):
self._bayesian_predictive_information_criterion =\
self.num_parameters + self.bias_statistic
if self.has_priors: # TODO
self._bayesian_predictive_information_criterion -= np.trace(\
self.posterior_covariance_times_prior_inverse_covariance)
term_v1 = np.dot(\
self.posterior_covariance_times_prior_inverse_covariance,\
self.posterior_prior_mean_difference.T).T
term_v2 = np.dot(self.prior_inverse_covariance,\
self.posterior_prior_mean_difference.T).T +\
(2 * np.dot(self.weighted_basis, self.weighted_bias.T).T)
if self.multiple_data_curves:
self._bayesian_predictive_information_criterion +=\
(np.sum(term_v1 * term_v2, axis=1) / self.num_channels)
else:
self._bayesian_predictive_information_criterion +=\
(np.dot(term_v1, term_v2) / self.num_channels)
if self.non_diagonal_noise_covariance:
doubly_weighted_basis =\
self.weight(self.weight(self.basis_sum.basis, -1), -1)
self._bayesian_predictive_information_criterion +=\
(2 * np.einsum('ij,ik,jk,k', self.parameter_covariance,\
doubly_weighted_basis, doubly_weighted_basis,\
self.channel_bias ** 2))
else:
weighted_error = self.channel_error / self.error
if self.multiple_data_curves:
weighted_error = weighted_error[np.newaxis,:]
to_sum = ((weighted_error * self.weighted_bias) ** 2)
self._bayesian_predictive_information_criterion +=\
(2 * np.sum(to_sum, axis=-1))
del to_sum
return self._bayesian_predictive_information_criterion
@property
def BPIC(self):
"""
Alias for `Fitter.bayesian_predictive_information_criterion`.
"""
return self.bayesian_predictive_information_criterion
def subbasis_log_separation_evidence(self, name=None):
"""
Calculates the subbasis_log_separation evidence per degree of freedom.
This is the same as the evidence with the log covariance determinant
ratio replaced by the log covariance determinant ratio for the given
subbasis (normalized by the degrees of freedom).
name: string identifying subbasis under concern
per_channel: if True, normalizes the log_separation_evidence by
dividing by the nuiber of data channels.
returns: single float number
"""
if not hasattr(self, '_subbasis_log_separation_evidences'):
self._subbasis_log_separation_evidences = {}
if name not in self._subbasis_log_separation_evidences:
self._subbasis_log_separation_evidences[name] =\
(self.log_evidence -\
(self.log_parameter_covariance_determinant_ratio / 2.) +\
(self.subbasis_log_parameter_covariance_determinant_ratio(\
name=name) / 2.)) / self.degrees_of_freedom
return self._subbasis_log_separation_evidences[name]
def subbasis_separation_evidence_per_degree_of_freedom(self, name=None):
"""
Finds the subbasis separation evidence per degree of freedom.
name: string identifying subbasis under concern
returns: single non-negative float number
"""
if not hasattr(self,\
'_subbasis_separation_evidences_per_degree_of_freedom'):
self._subbasis_separation_evidences_per_degree_of_freedom = {}
if name not in\
self._subbasis_separation_evidences_per_degree_of_freedom:
self._subbasis_separation_evidences_per_degree_of_freedom[name] =\
np.exp(self.subbasis_log_separation_evidence(name=name))
return self._subbasis_separation_evidences_per_degree_of_freedom[name]
@property
def log_separation_evidence(self):
"""
Property storing the logarithm (base e) of the separation evidence, a
version of the evidence where the log of the ratio of the determinants
of the posterior to prior covariance matrices is replaced by the sum
over all subbases of such logs of ratios.
"""
if not hasattr(self, '_log_separation_evidence'):
self._log_separation_evidence = self.log_evidence -\
(self.log_parameter_covariance_determinant_ratio / 2.) +\
(self.subbasis_log_parameter_covariance_determinant_ratios_sum\
/ 2.)
return self._log_separation_evidence
@property
def log_separation_evidence_per_data_channel(self):
"""
Property storing the log_separation_evidence divided by the number of
data channels. For more information, see the log_separation_evidence
property.
"""
if not hasattr(self, '_log_separation_evidence_per_data_channel'):
self._log_separation_evidence_per_data_channel =\
self.log_separation_evidence / self.num_channels
return self._log_separation_evidence_per_data_channel
@property
def separation_evidence(self):
"""
Property storing the separation evidence, a version of the evidence
where the log of the ratio of the determinants of the posterior to
prior covariance matrices is replaced by the sum over all subbases of
such logs of ratios.
"""
if not hasattr(self, '_separation_evidence'):
self._separation_evidence = np.exp(self.log_separation_evidence)
return self._separation_evidence
@property
def separation_evidence_per_data_channel(self):
"""
Property storing the average (geometric mean) factor by which each data
channel affects the separation evidence.
"""
if not hasattr(self, '_separation_evidence_per_data_channel'):
self._separation_evidence_per_data_channel =\
np.exp(self.log_separation_evidence_per_data_channel)
return self._separation_evidence_per_data_channel
@property
def subbasis_log_parameter_covariance_determinant_ratios_sum(self):
"""
Property storing the sum of the logarithms (base e) of the ratios of
the posterior parameter covariance matrices to the prior parameter
covariance matrices.
"""
if not hasattr(self,\
'_subbasis_log_parameter_covariance_determinant_ratios_sum'):
self._subbasis_log_parameter_covariance_determinant_ratios_sum =\
sum([self.subbasis_log_parameter_covariance_determinant_ratio(\
name=name) for name in self.names])
return self._subbasis_log_parameter_covariance_determinant_ratios_sum
def subbasis_prior_significance(self, name=None):
"""
Finds and returns the quantity: mu^T Lambda^{-1} mu, where mu is the
prior subbasis parameter mean and Lambda is the prior subbasis
parameter covariance.
name: string identifying subbasis under concern
returns: single float number
"""
if not hasattr(self, '_subbasis_prior_significances'):
self._subbasis_prior_significances = {}
if name not in self._subbasis_prior_significances:
prior = self.priors[name + '_prior']
mean = prior.internal_mean.A[0]
inverse_covariance = prior.inverse_covariance.A
self._subbasis_prior_significances[name] =\
np.dot(mean, np.dot(inverse_covariance, mean))
return self._subbasis_prior_significances[name]
def subbasis_parameter_inverse_covariance(self, name=None):
"""
Finds the inverse of the marginalized covariance matrix corresponding
to the given subbasis.
name: string identifying subbasis under concern
"""
if not hasattr(self, '_subbasis_parameter_inverse_covariances'):
self._subbasis_parameter_inverse_covariances = {}
if name not in self._subbasis_parameter_inverse_covariances:
self._subbasis_parameter_inverse_covariances[name] =\
la.inv(self.subbasis_parameter_covariance(name=name))
return self._subbasis_parameter_inverse_covariances[name]
def subbases_overlap_matrix(self, row_name=None, column_name=None):
"""
Creates a view into the overlap matrix between the given subbases.
row_name: the (string) name of the subbasis whose parameter number will
be represented by the row of the returned matrix.
column_name: the (string) name of the subbasis whose parameter number
will be represented by the column of the returned matrix
returns: n x m matrix where n is the number of basis vectors in the row
subbasis and m is the number of basis vectors in the column
subbasis in the form of a 2D numpy.ndarray
"""
row_slice = self.basis_sum.slices_by_name[row_name]
column_slice = self.basis_sum.slices_by_name[column_name]
return self.basis_overlap_matrix[:,column_slice][row_slice]
def subbasis_parameter_covariance(self, name=None):
"""
Finds and returns the portion of the parameter covariance matrix
associated with the given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns 2D numpy.ndarray of shape (k, k) where k is the number of basis
vectors in the subbasis
"""
if not hasattr(self, '_subbasis_parameter_covariances'):
self._subbasis_parameter_covariances = {}
if name not in self._subbasis_parameter_covariances:
subbasis_slice = self.basis_sum.slices_by_name[name]
self._subbasis_parameter_covariances[name] =\
self.parameter_covariance[:,subbasis_slice][subbasis_slice]
return self._subbasis_parameter_covariances[name]
def subbasis_log_parameter_covariance_determinant(self, name=None):
"""
Finds the logarithm (base e) of the determinant of the posterior
parameter covariance matrix for the given subbasis.
name: string identifying subbasis under concern
returns: single float number
"""
if not hasattr(self,\
'_subbasis_log_parameter_covariance_determinants'):
self._subbasis_log_parameter_covariance_determinants = {}
if name not in self._subbasis_log_parameter_covariance_determinants:
self._subbasis_log_parameter_covariance_determinants[name] =\
la.slogdet(self.subbasis_parameter_covariance(name=name))[1]
return self._subbasis_log_parameter_covariance_determinants[name]
def subbasis_log_prior_covariance_determinant(self, name=None):
"""
Finds the logarithm (base e) of the determinant of the prior parameter
covariance matrix for the given subbasis.
name: string identifying subbasis under concern
returns: single float number
"""
if type(name) is type(None):
return self.log_prior_covariance_determinant
if not hasattr(self, '_subbasis_log_prior_covariance_determinants'):
self._subbasis_log_prior_covariance_determinants = {}
if name not in self._subbasis_log_prior_covariance_determinants:
self._subbasis_log_prior_covariance_determinants[name] =\
la.slogdet(self.priors[name + '_prior'].covariance.A)[1]
return self._subbasis_log_prior_covariance_determinants[name]
def subbasis_log_parameter_covariance_determinant_ratio(self, name=None):
"""
Finds logarithm (base e) of the ratio of the determinant of the
posterior covariance matrix to the determinant of the prior covariance
matrix for the given subbasis.
name: string identifying subbasis under concern
returns: single float number
"""
if not hasattr(self,\
'_subbasis_log_parameter_covariance_determinant_ratios'):
self._subbasis_log_parameter_covariance_determinant_ratios = {}
if name not in\
self._subbasis_log_parameter_covariance_determinant_ratios:
self._subbasis_log_parameter_covariance_determinant_ratios[name] =\
self.subbasis_log_parameter_covariance_determinant(name=name)-\
self.subbasis_log_prior_covariance_determinant(name=name)
return self._subbasis_log_parameter_covariance_determinant_ratios[name]
def subbasis_parameter_covariance_determinant_ratio(self, name=None):
"""
Finds the ratio of the determinant of the posterior covariance matrix
to the determinant of the prior covariance matrix for the given
subbasis.
name: string identifying subbasis under concern
returns: single non-negative float number
"""
if not hasattr(self,\
'_subbasis_parameter_covariance_determinant_ratios'):
self._subbasis_parameter_covariance_determinant_ratios = {}
if type(name) is type(None):
self._subbasis_parameter_covariance_determinant_ratios[name] =\
np.exp(\
self.subbasis_log_parameter_covariance_determinant_ratios_sum)
elif name not in\
self._subbasis_parameter_covariance_determinant_ratios:
self._subbasis_parameter_covariance_determinant_ratios[name] =\
np.exp(\
self.subbasis_log_parameter_covariance_determinant_ratio(\
name=name))
return self._subbasis_parameter_covariance_determinant_ratios[name]
def subbasis_channel_error(self, name=None):
"""
Finds the error (in data channel space) of the fit by a given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: 1D numpy.ndarray of the same length as the basis vectors of
the subbasis (which may or may not be different than the
length of the expanded basis vectors).
"""
if type(name) is type(None):
return self.channel_error
if not hasattr(self, '_subbasis_channel_errors'):
self._subbasis_channel_errors = {}
if name not in self._subbasis_channel_errors:
basis = self.basis_sum[name].basis
covariance_times_basis =\
np.dot(self.subbasis_parameter_covariance(name=name), basis)
self._subbasis_channel_errors[name] =\
np.sqrt(np.sum(covariance_times_basis * basis, axis=0))
return self._subbasis_channel_errors[name]
def subbasis_parameter_mean(self, name=None):
"""
Finds the posterior parameter mean for a subbasis. This is just a view
into the view posterior parameter mean.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: 1D numpy.ndarray containing the parameters for the given
subbasis
"""
if not hasattr(self, '_subbasis_parameter_means'):
self._subbasis_parameter_means = {}
if name not in self._subbasis_parameter_means:
self._subbasis_parameter_means[name] =\
self.parameter_mean[...,self.basis_sum.slices_by_name[name]]
return self._subbasis_parameter_means[name]
def subbasis_channel_mean(self, name=None):
"""
The estimate of the contribution to the data from the given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: 1D numpy.ndarray containing the channel-space estimate from
the given subbasis
"""
if not hasattr(self, '_subbasis_channel_means'):
self._subbasis_channel_means = {}
if name not in self._subbasis_channel_means:
self._subbasis_channel_means[name] =\
np.dot(self.subbasis_parameter_mean(name=name),\
self.basis_sum[name].basis) + self.basis_sum[name].translation
return self._subbasis_channel_means[name]
def subbasis_channel_RMS(self, name=None):
"""
Calculates and returns the RMS channel error on the estimate of the
contribution to the data from the given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: single float number RMS
"""
if not hasattr(self, '_subbasis_channel_RMSs'):
self._subbasis_channel_RMSs = {}
if name not in self._subbasis_channel_RMSs:
self._subbasis_channel_RMSs[name] = np.sqrt(\
np.mean(np.power(self.subbasis_channel_error(name=name), 2)))
return self._subbasis_channel_RMSs[name]
def subbasis_separation_statistic(self, name=None):
"""
Finds the separation statistic associated with the given subbasis. The
separation statistic is essentially an RMS'd error expansion factor.
name: name of the subbasis for which to find the separation statistic
"""
if not hasattr(self, '_subbasis_separation_statistics'):
self._subbasis_separation_statistics = {}
if name not in self._subbasis_separation_statistics:
weighted_basis =\
self.weight(self.basis_sum[name].expanded_basis, -1)
stat = np.dot(weighted_basis, weighted_basis.T)
stat = np.sum(stat * self.subbasis_parameter_covariance(name=name))
stat = np.sqrt(stat / self.degrees_of_freedom)
self._subbasis_separation_statistics[name] = stat
return self._subbasis_separation_statistics[name]
def subbasis_channel_bias(self, name=None, true_curve=None):
"""
Calculates and returns the bias on the estimate from the given subbasis
using the given curve as a reference.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis channel space
returns: 1D numpy.ndarray in channel space containing the difference
between the estimate of the data's contribution from the given
subbasis and the given true curve
"""
if type(name) is type(None):
if type(true_curve) is type(None):
return self.channel_bias
else:
raise ValueError("true_curve should only be given to " +\
"subbasis_channel_bias if the name of a " +\
"subbasis is specified.")
else:
if type(true_curve) is type(None):
raise ValueError("true_curve must be given to " +\
"subbasis_channel_bias if the name of a " +\
"subbasis is specified.")
if self.multiple_data_curves and (true_curve.ndim == 1):
return true_curve[np.newaxis,:] -\
self.subbasis_channel_mean(name=name)
else:
return true_curve - self.subbasis_channel_mean(name=name)
def subbasis_weighted_bias(self, name=None, true_curve=None):
"""
The bias of the contribution of a given subbasis to the data. This
function requires knowledge of the "truth".
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis
returns: 1D numpy.ndarray of weighted bias values
"""
subbasis_channel_bias =\
self.subbasis_channel_bias(name=name, true_curve=true_curve)
subbasis_channel_error = self.subbasis_channel_error(name=name)
if self.multiple_data_curves:
return subbasis_channel_bias / subbasis_channel_error[np.newaxis,:]
else:
return subbasis_channel_bias / subbasis_channel_error
def subbasis_bias_statistic(self, name=None, true_curve=None,\
norm_by_dof=False):
"""
The bias statistic of the fit to the contribution of the given
subbasis. The bias statistic is delta^T C^-1 delta where delta is the
difference between the true curve(s) and the channel mean(s) normalized
by the degrees of freedom.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis
norm_by_dof: if True, summed squared subbasis error weighted subbasis
bias is normalized by the subbasis degrees of
freedom
if False (default), summed squared subbasis error weighted
subbasis bias is returned is
normalized by the number of channels
in the subbasis
returns: single float number representing roughly
"""
weighted_bias = self.subbasis_weighted_bias(name=name,\
true_curve=true_curve)
normalization_factor = weighted_bias.shape[-1]
if norm_by_dof:
normalization_factor -= self.basis_sum[name].num_basis_vectors
if self.multiple_data_curves:
unnormalized = np.sum(weighted_bias ** 2, axis=1)
else:
unnormalized = np.dot(weighted_bias, weighted_bias)
return unnormalized / normalization_factor
def bias_score(self, training_sets, max_block_size=2**20,\
num_curves_to_score=None, bases_to_score=None):
"""
Evaluates the candidate basis_sum given the available training sets.
training_sets: dictionary of training_sets indexed by basis name
max_block_size: number of floats in the largest possible training set
block
num_curves_to_score: total number of training set curves to consider
bases_to_score: the names of the subbases to include in the scoring
(all bases are always used, the names not in
bases_to_score simply do not have their
subbasis_bias_statistic calculated/included)
returns: scalar value of Delta
"""
if len(self.basis_sum.names) != len(training_sets):
raise ValueError("There must be the same number of basis sets " +\
"as training sets.")
if (type(bases_to_score) is type(None)) or (not bases_to_score):
bases_to_score = self.basis_sum.names
score = 0.
expanders = [basis.expander for basis in self.basis_sum]
iterator = TrainingSetIterator(training_sets, expanders=expanders,\
max_block_size=max_block_size, mode='add',\
curves_to_return=num_curves_to_score, return_constituents=True)
for (block, constituents) in iterator:
num_channels = block.shape[1]
fitter = Fitter(self.basis_sum, block, self.error, **self.priors)
for basis_to_score in bases_to_score:
true_curve =\
constituents[self.basis_sum.names.index(basis_to_score)]
result = fitter.subbasis_bias_statistic(\
name=basis_to_score, true_curve=true_curve)
score += np.sum(result)
if type(num_curves_to_score) is type(None):
num_curves_to_score =\
np.prod([ts.shape[0] for ts in training_sets])
score = score / (num_curves_to_score * num_channels)
return score
def fill_hdf5_group(self, root_group, data_link=None, error_link=None,\
basis_links=None, expander_links=None, prior_mean_links=None,\
prior_covariance_links=None, save_channel_estimates=False):
"""
Fills the given hdf5 file group with data about the inputs and results
of this Fitter.
root_group: the hdf5 file group to fill (only required argument)
data_link: link to existing data dataset, if it exists (see
create_hdf5_dataset docs for info about accepted formats)
error_link: link to existing error dataset, if it exists (see
create_hdf5_dataset docs for info about accepted formats)
basis_links: list of links to basis functions saved elsewhere (see
create_hdf5_dataset docs for info about accepted formats)
expander_links: list of links to existing saved Expander (see
create_hdf5_dataset docs for info about accepted
formats)
prior_mean_links: dict of links to existing saved prior means (see
create_hdf5_dataset docs for info about accepted
formats)
prior_covariance_links: dict of links to existing saved prior
covariances (see create_hdf5_dataset docs for
info about accepted formats)
"""
self.save_data(root_group, data_link=data_link)
self.save_error(root_group, error_link=error_link)
group = root_group.create_group('sizes')
for name in self.names:
group.attrs[name] = self.sizes[name]
group = root_group.create_group('posterior')
create_hdf5_dataset(group, 'parameter_mean', data=self.parameter_mean)
create_hdf5_dataset(group, 'parameter_covariance',\
data=self.parameter_covariance)
if save_channel_estimates:
create_hdf5_dataset(group, 'channel_mean', data=self.channel_mean)
create_hdf5_dataset(group, 'channel_error', data=self.channel_error)
for name in self.names:
subgroup = group.create_group(name)
subbasis_slice = self.basis_sum.slices_by_name[name]
create_hdf5_dataset(subgroup, 'parameter_covariance',\
link=(group['parameter_covariance'],[subbasis_slice]*2))
mean_slices =\
(((slice(None),) * (self.data.ndim - 1)) + (subbasis_slice,))
create_hdf5_dataset(subgroup, 'parameter_mean',\
link=(group['parameter_mean'],mean_slices))
if save_channel_estimates:
create_hdf5_dataset(subgroup, 'channel_mean',\
data=self.subbasis_channel_mean(name=name))
create_hdf5_dataset(subgroup, 'channel_error',\
data=self.subbasis_channel_error(name=name))
self.save_basis_sum(root_group, basis_links=basis_links,\
expander_links=expander_links)
root_group.attrs['degrees_of_freedom'] = self.degrees_of_freedom
root_group.attrs['BPIC'] = self.BPIC
root_group.attrs['DIC'] = self.DIC
root_group.attrs['AIC'] = self.AIC
root_group.attrs['BIC'] = self.BIC
root_group.attrs['normalized_likelihood_bias_statistic'] =\
self.normalized_likelihood_bias_statistic
root_group.attrs['normalized_bias_statistic'] =\
self.normalized_bias_statistic
self.save_priors(root_group, prior_mean_links=prior_mean_links,\
prior_covariance_links=prior_covariance_links)
if self.has_priors:
root_group.attrs['log_evidence_per_data_channel'] =\
self.log_evidence_per_data_channel
def plot_overlap_matrix(self, title='Overlap matrix', fig=None, ax=None,\
show=True, **kwargs):
"""
Plots the overlap matrix of the total basis.
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
image = ax.imshow(self.basis_overlap_matrix, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_parameter_covariance(self, title='Covariance matrix', fig=None,\
ax=None, show=True, **kwargs):
"""
Plots the posterior parameter covariance matrix.
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
image = ax.imshow(self.parameter_covariance, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_subbasis_fit(self, nsigma=1, name=None, which_data=None,\
true_curve=None, subtract_truth=False, shorter_error=None,\
x_values=None, title=None, xlabel='x', ylabel='y', fig=None, ax=None,\
show_noise_level=False, noise_level_alpha=0.5, full_error_alpha=0.2,\
colors='b', full_error_first=True, yscale='linear', show=False):
"""
Plots the fit of the contribution to the data from a given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis
subtract_truth: Boolean which determines whether the residuals of a fit
are plotted or just the curves. Can only be True if
true_curve is given or name is None.
shorter_error: 1D numpy.ndarray of the same length as the vectors of
the subbasis containing the error on the given subbasis
x_values: (Optional) x_values to use for plot
title: (Optional) the title of the plot
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: If True, matplotlib.pyplot.show() is called before this function
returns.
"""
if self.multiple_data_curves and (type(which_data) is type(None)):
which_data = 0
if type(name) is type(None):
mean = self.channel_mean
error = self.channel_error
else:
mean = self.subbasis_channel_mean(name=name)
error = self.subbasis_channel_error(name=name)
if isinstance(colors, basestring):
colors = [colors] * 3
if self.multiple_data_curves:
mean = mean[which_data]
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
if type(x_values) is type(None):
x_values = np.arange(len(mean))
if (type(true_curve) is type(None)) and (type(name) is type(None)):
if self.multiple_data_curves:
true_curve = self.data[which_data]
else:
true_curve = self.data
if (type(true_curve) is type(None)) and subtract_truth:
raise ValueError("Truth cannot be subtracted because it is not " +\
"known. Supply it as the true_curve argument " +\
"if you wish for it to be subtracted.")
if subtract_truth:
to_subtract = true_curve
ax.plot(x_values, np.zeros_like(x_values), color='k', linewidth=2,\
label='true')
else:
to_subtract = np.zeros_like(x_values)
if type(true_curve) is not type(None):
ax.plot(x_values, true_curve, color='k', linewidth=2,\
label='true')
ax.plot(x_values, mean - to_subtract, color=colors[0], linewidth=2,\
label='mean')
if full_error_first:
ax.fill_between(x_values, mean - to_subtract - (nsigma * error),\
mean - to_subtract + (nsigma * error), alpha=full_error_alpha,\
color=colors[1])
if show_noise_level:
if type(shorter_error) is not type(None):
ax.fill_between(x_values,\
mean - to_subtract - (nsigma * shorter_error),\
mean - to_subtract + (nsigma * shorter_error),\
alpha=noise_level_alpha, color=colors[2])
elif len(mean) == self.num_channels:
if self.non_diagonal_noise_covariance:
noise_error = np.sqrt(self.error.diagonal)
ax.fill_between(x_values,\
mean - to_subtract - (nsigma * noise_error),\
mean - to_subtract + (nsigma * noise_error),\
alpha=noise_level_alpha, color=colors[2])
else:
ax.fill_between(x_values,\
mean - to_subtract - (nsigma * self.error),\
mean - to_subtract + (nsigma * self.error),\
alpha=noise_level_alpha, color=colors[2])
if not full_error_first:
ax.fill_between(x_values, mean - to_subtract - (nsigma * error),\
mean - to_subtract + (nsigma * error), alpha=full_error_alpha,\
color=colors[1])
ax.set_yscale(yscale)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if type(title) is type(None):
if subtract_truth:
ax.set_title('Fit residual')
else:
ax.set_title('Fit curve')
else:
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_overlap_matrix_block(self, row_name=None, column_name=None,\
title='Overlap matrix', fig=None, ax=None, show=True, **kwargs):
"""
Plots a block of the overlap matrix between the given subbases.
row_name: the (string) name of the subbasis whose parameter number will
be represented by the row of the returned matrix.
column_name: the (string) name of the subbasis whose parameter number
will be represented by the column of the returned matrix
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
to_show = self.subbases_overlap_matrix(row_name=row_name,\
column_name=column_name)
image = ax.imshow(to_show, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_parameter_covariance(self, name=None, title='Covariance matrix',\
fig=None, ax=None, show=True, **kwargs):
"""
Plots the posterior parameter covariance matrix.
name: the (string) name of the subbasis whose parameter number
will be represented by the rows and columns of the returned
matrix. If None, full parameter covariance is plotted.
Default: None
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
to_show = self.subbasis_parameter_covariances[name]
image = ax.imshow(to_show, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
|
py
|
1a5791fa39d5049a631f8db1d43d6a618e230099
|
from flask import url_for
from auth import db
from auth.util import get_serializer
from werkzeug.security import check_password_hash
import datetime
class User(db.Model):
user_id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(256), nullable=False, unique=True)
password = db.Column(db.String(256), nullable=False)
email = db.Column(db.String(256), nullable=False)
verified = db.Column(db.Boolean, nullable=False)
enabled = db.Column(db.Boolean, nullable=False)
admin = db.Column(db.Boolean, nullable=False)
created = db.Column(db.DateTime, nullable=False)
touched = db.Column(db.DateTime, nullable=False)
def __init__(self, username, password, email, verified, enabled, admin):
self.username = username
self.password = password
self.email = email
self.verified = verified
self.enabled = enabled
self.admin = admin
self.created = datetime.datetime.utcnow()
self.touched = self.created
def check_password(self, password):
return check_password_hash(self.password, password)
def touch(self):
self.touched = datetime.datetime.utcnow()
db.session.commit()
def get_verification_link(self):
payload = get_serializer().dumps(self.user_id)
return url_for('verify_email', payload=payload, _external=True)
class IdentityToken(db.Model):
identity_token_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.user_id), nullable=False, index=True)
name = db.Column(db.String(256), nullable=False)
token = db.Column(db.String(256), nullable=False)
enabled = db.Column(db.Boolean, nullable=False)
created = db.Column(db.DateTime, nullable=False)
touched = db.Column(db.DateTime, nullable=False)
user = db.relationship(User, backref=db.backref('identity_tokens', lazy='dynamic'))
def __init__(self, user, name, token, enabled):
self.user = user
self.name = name
self.token = token
self.enabled = enabled
self.created = datetime.datetime.utcnow()
self.touched = self.created
def check_token(self, token):
return check_password_hash(self.token, token)
def touch(self):
self.touched = datetime.datetime.utcnow()
db.session.commit()
class AccessToken(db.Model):
access_token_id = db.Column(db.Integer, primary_key=True)
identity_token_id = db.Column(db.Integer, db.ForeignKey(IdentityToken.identity_token_id), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey(User.user_id), nullable=False, index=True)
token = db.Column(db.String(256), nullable=False, unique=True)
enabled = db.Column(db.Boolean, nullable=False)
client_addr = db.Column(db.String(256), nullable=False)
client_timestamp = db.Column(db.DateTime, nullable=False)
server_addr = db.Column(db.String(256), nullable=True)
server_timestamp = db.Column(db.DateTime, nullable=True)
identity_token = db.relationship(IdentityToken, backref=db.backref('access_tokens', lazy='dynamic'))
user = db.relationship(User, backref=db.backref('access_tokens', lazy='dynamic'))
def __init__(self, identity_token, user, token, enabled, client_addr, client_timestamp, server_addr, server_timestamp):
self.identity_token = identity_token
self.user = user
self.token = token
self.enabled = enabled
self.client_addr = client_addr
self.client_timestamp = client_timestamp
self.server_addr = server_addr
self.server_timestamp = server_timestamp
@property
def server_addr_str(self):
known_servers = {
'162.243.195.82': 'michaelfogleman.com',
}
return known_servers.get(self.server_addr, self.server_addr)
@property
def age(self):
return datetime.datetime.utcnow() - self.client_timestamp
def check_token(self, token, max_age):
print("checking token:", token)
if self.age > max_age:
return False
return check_password_hash(self.token, token)
|
py
|
1a5792123506958f61ad18bfa34d672a96f7a168
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon.proxies.schemas.base import get_config
OPTIONS = """
location = /favicon.ico {{
rewrite ^ /static/images/favicon.ico;
}}
"""
def get_favicon_config():
return get_config(options=OPTIONS, indent=0)
|
py
|
1a57925325c48603f823d61951f42d7c275576ea
|
# -*- test-case-name: twisted.test.test_ssl -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module implements Transport Layer Security (TLS) support for Twisted. It
requires U{PyOpenSSL <https://pypi.python.org/pypi/pyOpenSSL>}.
If you wish to establish a TLS connection, please use one of the following
APIs:
- SSL endpoints for L{servers
<twisted.internet.endpoints.SSL4ServerEndpoint>} and L{clients
<twisted.internet.endpoints.SSL4ClientEndpoint>}
- L{startTLS <twisted.internet.interfaces.ITLSTransport.startTLS>}
- L{connectSSL <twisted.internet.interfaces.IReactorSSL.connectSSL>}
- L{listenSSL <twisted.internet.interfaces.IReactorSSL.listenSSL>}
These APIs all require a C{contextFactory} argument that specifies their
security properties, such as certificate, private key, certificate authorities
to verify the peer, allowed TLS protocol versions, cipher suites, and so on.
The recommended value for this argument is a L{CertificateOptions} instance;
see its documentation for an explanation of the available options.
The C{contextFactory} name is a bit of an anachronism now, as context factories
have been replaced with "connection creators", but these objects serve the same
role.
Be warned that implementing your own connection creator (i.e.: value for the
C{contextFactory}) is both difficult and dangerous; the Twisted team has worked
hard to make L{CertificateOptions}' API comprehensible and unsurprising, and
the Twisted team is actively maintaining it to ensure that it becomes more
secure over time.
If you are really absolutely sure that you want to take on the risk of
implementing your own connection creator based on the pyOpenSSL API, see the
L{server connection creator
<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} and L{client
connection creator
<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} interfaces.
Developers using Twisted, please ignore the L{Port}, L{Connector}, and
L{Client} classes defined here, as these are details of certain reactors' TLS
implementations, exposed by accident (and remaining here only for compatibility
reasons). If you wish to establish a TLS connection, please use one of the
APIs listed above.
@note: "SSL" (Secure Sockets Layer) is an antiquated synonym for "TLS"
(Transport Layer Security). You may see these terms used interchangeably
throughout the documentation.
"""
# System imports
from OpenSSL import SSL
from zope.interface import implementer, implementer_only, implementedBy
# Twisted imports
from twisted.internet import tcp, interfaces
supported = True
@implementer(interfaces.IOpenSSLContextFactory)
class ContextFactory:
"""A factory for SSL context objects, for server SSL connections."""
isClient = 0
def getContext(self):
"""Return a SSL.Context object. override in subclasses."""
raise NotImplementedError
class DefaultOpenSSLContextFactory(ContextFactory):
"""
L{DefaultOpenSSLContextFactory} is a factory for server-side SSL context
objects. These objects define certain parameters related to SSL
handshakes and the subsequent connection.
@ivar _contextFactory: A callable which will be used to create new
context objects. This is typically L{OpenSSL.SSL.Context}.
"""
_context = None
def __init__(
self,
privateKeyFileName,
certificateFileName,
sslmethod=SSL.SSLv23_METHOD,
_contextFactory=SSL.Context,
):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateFileName: Name of a file containing a certificate
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateFileName = certificateFileName
self.sslmethod = sslmethod
self._contextFactory = _contextFactory
# Create a context object right now. This is to force validation of
# the given parameters so that errors are detected earlier rather
# than later.
self.cacheContext()
def cacheContext(self):
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
# Disallow SSLv2! It's insecure! SSLv3 has been around since
# 1996. It's time to move on.
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.use_certificate_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
def __getstate__(self):
d = self.__dict__.copy()
del d["_context"]
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""
Return an SSL context.
"""
return self._context
@implementer(interfaces.IOpenSSLContextFactory)
class ClientContextFactory:
"""A context factory for SSL clients."""
isClient = 1
# SSLv23_METHOD allows SSLv2, SSLv3, and TLSv1. We disable SSLv2 below,
# though.
method = SSL.SSLv23_METHOD
_contextFactory = SSL.Context
def getContext(self):
ctx = self._contextFactory(self.method)
# See comment in DefaultOpenSSLContextFactory about SSLv2.
ctx.set_options(SSL.OP_NO_SSLv2)
return ctx
@implementer_only(
interfaces.ISSLTransport,
*[i for i in implementedBy(tcp.Client) if i != interfaces.ITLSTransport],
)
class Client(tcp.Client):
"""
I am an SSL client.
"""
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
# tcp.Client.__init__ depends on self.ctxFactory being set
self.ctxFactory = ctxFactory
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
def _connectDone(self):
self.startTLS(self.ctxFactory)
self.startWriting()
tcp.Client._connectDone(self)
@implementer(interfaces.ISSLTransport)
class Server(tcp.Server):
"""
I am an SSL server.
"""
def __init__(self, *args, **kwargs):
tcp.Server.__init__(self, *args, **kwargs)
self.startTLS(self.server.ctxFactory)
def getPeerCertificate(self):
# ISSLTransport.getPeerCertificate
raise NotImplementedError("Server.getPeerCertificate")
class Port(tcp.Port):
"""
I am an SSL port.
"""
transport = Server
_type = "TLS"
def __init__(
self, port, factory, ctxFactory, backlog=50, interface="", reactor=None
):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.ctxFactory = ctxFactory
def _getLogPrefix(self, factory):
"""
Override the normal prefix to include an annotation indicating this is a
port for TLS connections.
"""
return tcp.Port._getLogPrefix(self, factory) + " (TLS)"
class Connector(tcp.Connector):
def __init__(
self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None
):
self.contextFactory = contextFactory
tcp.Connector.__init__(self, host, port, factory, timeout, bindAddress, reactor)
# Force some parameter checking in pyOpenSSL. It's better to fail now
# than after we've set up the transport.
contextFactory.getContext()
def _makeTransport(self):
return Client(
self.host,
self.port,
self.bindAddress,
self.contextFactory,
self,
self.reactor,
)
from twisted.internet._sslverify import (
KeyPair,
DistinguishedName,
DN,
Certificate,
CertificateRequest,
PrivateCertificate,
OpenSSLAcceptableCiphers as AcceptableCiphers,
OpenSSLCertificateOptions as CertificateOptions,
OpenSSLDiffieHellmanParameters as DiffieHellmanParameters,
platformTrust,
OpenSSLDefaultPaths,
VerificationError,
optionsForClientTLS,
ProtocolNegotiationSupport,
protocolNegotiationMechanisms,
trustRootFromCertificates,
TLSVersion,
)
__all__ = [
"ContextFactory",
"DefaultOpenSSLContextFactory",
"ClientContextFactory",
"DistinguishedName",
"DN",
"Certificate",
"CertificateRequest",
"PrivateCertificate",
"KeyPair",
"AcceptableCiphers",
"CertificateOptions",
"DiffieHellmanParameters",
"platformTrust",
"OpenSSLDefaultPaths",
"TLSVersion",
"VerificationError",
"optionsForClientTLS",
"ProtocolNegotiationSupport",
"protocolNegotiationMechanisms",
"trustRootFromCertificates",
]
|
py
|
1a579359adcddcae0fc728895cfc32695266a99c
|
# coding=utf-8
import unittest
from dddd.commen.db_opterate.device_data import add_device, delete_device
from dddd.commen.get_browser import get_browser
from dddd.commen.sys_config import liulanqi_type, time_out, web_url
from dddd.page.edit_device_manage_page import EditDeviceManagePage
class EditDeviceManage(unittest.TestCase):
'''
编辑设备管理
'''
def setUp(self):
self.driver = get_browser(liulanqi_type) # 实例化浏览器,并打开浏览器
# 窗口最大化
self.driver.maximize_window()
# 隐式等待m
self.driver.implicitly_wait(time_out)
# 输入测试地址
self.driver.get(web_url)
self.edit_device_manage_page = EditDeviceManagePage(self.driver)
add_device("55555","gfgf")
def test_edit_device_manage(self): # 定义函数,编辑设备管理
'''编辑设备管理'''
# 第一步,点击设备管理菜单
self.edit_device_manage_page.click_device_amnage_menu01()
# 第二步,进入iframe
self.edit_device_manage_page.switch_main_to_list()
# 第三步,点击编辑按钮
self.edit_device_manage_page.click_edit_btn("55555")
# 第四步,从列表iframe 跳至弹窗iframe
self.edit_device_manage_page.switch_list_to_winiframe()
# 第五步,先清空名称输入框,再给名称输入框输入新值
self.edit_device_manage_page.input_name("gfgfblbl")
# 第六步,点击确定按钮
self.edit_device_manage_page.click_queding()
# 第七步,退出iframe至父级
self.edit_device_manage_page.quit_iframe_father()
# 第八步,定位修改后的名称元素
self.driver.find_element_by_xpath('//div[text()="gfgfblbl"]')
def tearDown(self):
# 关闭浏览器
self.driver.quit()
delete_device("55555")
|
py
|
1a5793bf020720f7e05459edbdade3ba63f89a09
|
from molsysmt._private_tools.exceptions import *
from molsysmt.forms.common_gets import *
import numpy as np
from molsysmt.native.molecular_system import molecular_system_components
from molsysmt._private_tools.files_and_directories import temp_filename
form_name='file:mdcrd'
is_form = {
'file:mdcrd':form_name
}
info=["",""]
has = molecular_system_components.copy()
for ii in ['coordinates', 'box']:
has[ii]=True
info = ["AMBER mdcrd file format","https://ambermd.org/FileFormats.php#trajectory"]
def to_molsysmt_MolSys(item, molecular_system=None, atom_indices='all', frame_indices='all'):
from molsysmt.native.io.molsys import from_mdcrd as mdcrd_to_molsysmt_MolSys
tmp_item, tmp_molecular_system = mdcrd_to_molsysmt_MolSys(item,
molecular_system=molecular_system, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item, tmp_molecular_system
def to_molsysmt_Topology(item, molecular_system=None, atom_indices='all', frame_indices='all'):
from molsysmt.native.io.topology import from_mdcrd as mdcrd_to_molsysmt_Topology
tmp_item, tmp_molecular_system = mdcrd_to_molsysmt_Topology(item,
molecular_system=molecular_system, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item, tmp_molecular_system
def to_molsysmt_Trajectory(item, molecular_system=None, atom_indices='all', frame_indices='all'):
from molsysmt.native.io.trajectory import from_mdcrd as mdcrd_to_molsysmt_Trajectory
tmp_item, tmp_molecular_system = mdcrd_to_molsysmt_Trajectory(item,
molecular_system=molecular_system, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item
def to_file_mdcrd(item, molecular_system=None, atom_indices='all', frame_indices='all', output_filename=None, copy_if_all=True):
tmp_molecular_system = None
if (atom_indices is 'all') and (frame_indices is 'all'):
if copy_if_all:
tmp_item = extract(item, output_filename=output_filename)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item)
else:
tmp_item = item
if molecular_system is not None:
tmp_molecular_system = molecular_system
else:
tmp_item = extract(item, atom_indices=atom_indices, frame_indices=frame_indices, output_filename=output_filename)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item, tmp_molecular_system
def extract(item, atom_indices='all', frame_indices='all', output_filename=None):
if output_filename is None:
output_filename = temp_filename(extension='mdcrd')
if (atom_indices is 'all') and (frame_indices is 'all'):
raise NotImplementedError()
else:
raise NotImplementedError()
return tmp_item
def merge(item_1, item_2):
raise NotImplementedError
def add(to_item, item):
raise NotImplementedError
def append_frames(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError
def concatenate_frames(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError
###### Get
# System
def get_n_frames_from_system (item, indices='all', frame_indices='all'):
return NotImplementedError
def get_n_atoms_from_system (item, indices='all', frame_indices='all'):
return NotImplementedError
|
py
|
1a57942e5009ce132a020885636866f2eccdb5e0
|
# -*- coding: utf-8 -*-
import logging
import torch
import torch.cuda
from beaver.data import build_dataset
from beaver.infer import beam_search
from beaver.loss import WarmAdam, LabelSmoothingLoss
from beaver.model import NMTModel
from beaver.utils import Saver
from beaver.utils import calculate_bleu
from beaver.utils import parseopt, get_device, printing_opt
from beaver.data.post_prob import get_prob_idx
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
opt = parseopt.parse_train_args()
device = get_device()
logging.info("\n" + printing_opt(opt))
saver = Saver(opt)
def valid(model, criterion, valid_dataset, step):
model.eval()
total_loss, total = 0.0, 0
hypothesis, references = [], []
for batch in valid_dataset:
scores = model(batch.src, batch.tgt, batch.probs, batch.idxes)
loss = criterion(scores, batch.tgt)
total_loss += loss.data
total += 1
if opt.tf:
_, predictions = scores.topk(k=1, dim=-1)
else:
predictions = beam_search(opt, model, batch.src, valid_dataset.fields)
hypothesis += [valid_dataset.fields["tgt"].decode(p) for p in predictions]
references += [valid_dataset.fields["tgt"].decode(t) for t in batch.tgt]
bleu = calculate_bleu(hypothesis, references)
logging.info("Valid loss: %.2f\tValid BLEU: %3.2f" % (total_loss / total, bleu))
checkpoint = {"model": model.state_dict(), "opt": opt}
saver.save(checkpoint, step, bleu, total_loss / total)
def train(model, criterion, optimizer, train_dataset, valid_dataset):
total_loss = 0.0
model.zero_grad()
for i, batch in enumerate(train_dataset):
scores = model(batch.src, batch.tgt, batch.probs, batch.idxes)
loss = criterion(scores, batch.tgt)
loss.backward()
total_loss += loss.data
if (i + 1) % opt.grad_accum == 0:
optimizer.step()
model.zero_grad()
if optimizer.n_step % opt.report_every == 0:
mean_loss = total_loss / opt.report_every / opt.grad_accum
logging.info("step: %7d\t loss: %7f" % (optimizer.n_step, mean_loss))
total_loss = 0.0
if optimizer.n_step % opt.save_every == 0:
with torch.set_grad_enabled(False):
valid(model, criterion, valid_dataset, optimizer.n_step)
model.train()
del loss
def main():
logging.info("Build dataset...")
prob_and_idx = get_prob_idx()
train_dataset = build_dataset(opt, opt.train, opt.vocab, device, prob_and_idx, train=True)
valid_dataset = build_dataset(opt, opt.valid, opt.vocab, device, prob_and_idx, train=False)
fields = valid_dataset.fields = train_dataset.fields
logging.info("Build model...")
pad_ids = {"src": fields["src"].pad_id, "tgt": fields["tgt"].pad_id}
vocab_sizes = {"src": len(fields["src"].vocab), "tgt": len(fields["tgt"].vocab)}
model = NMTModel.load_model(opt, pad_ids, vocab_sizes).to(device)
criterion = LabelSmoothingLoss(opt.label_smoothing, vocab_sizes["tgt"], pad_ids["tgt"]).to(device)
n_step = int(opt.train_from.split("-")[-1]) if opt.train_from else 1
optimizer = WarmAdam(model.parameters(), opt.lr, opt.hidden_size, opt.warm_up, n_step)
logging.info("start training...")
train(model, criterion, optimizer, train_dataset, valid_dataset)
if __name__ == '__main__':
main()
|
py
|
1a5794778b57159967fa1aa83ed268ca5dd3f496
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This file generates shell code for the setup.SHELL scripts to set environment variables."""
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
# while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
# since Windows finds dll's via the PATH variable, prepend it with path to lib
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
"""
Generate shell code to reset environment variables.
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
"""
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
"""
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
"""
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
"""
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
"""
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
"""Generate shell code to prepend environment variables for the all workspaces."""
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
"""
Return the prefix to prepend to the environment variable NAME.
Adding any path in NEW_PATHS_STR without creating duplicate or empty items.
"""
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
"""Generate shell code with found environment hooks for the all workspaces."""
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = r'/home/arijitnoobstar/UAVProjectileCatcher/devel;/opt/ros/melodic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
|
py
|
1a5795241895fae084dbfdf8b0e18a94d7fc646b
|
import fnmatch
import functools
import typing
import os
import pygears
from pygears import reg
from pygears.core.gear import OutSig
from ...base_resolver import ResolverBase, ResolverTypeError
from pygears.util.fileio import find_in_dirs, save_file
from pygears.conf import inject, Inject
from pygears.hdl import hdlmod
class HierarchicalResolver(ResolverBase):
@inject
def __init__(self, node):
self.node = node
if not node.meta_kwds.get('hdl', {}).get('hierarchical', node.hierarchical):
raise ResolverTypeError
@property
def hdl_path_list(self):
return reg[f'{self.lang}gen/include']
@property
def files(self):
files = [self.file_basename]
if 'hdl' in self.node.meta_kwds:
if 'files' in self.node.meta_kwds['hdl']:
for fn in self.node.meta_kwds['hdl']['files']:
if not os.path.splitext(fn)[-1]:
fn = f'{fn}.{self.lang}'
files.append(fn)
return files
@property
@functools.lru_cache()
def module_name(self):
if find_in_dirs(f'{self.hier_path_name}.{self.lang}',
self.hdl_path_list):
return self.hier_path_name + '_hier'
else:
return self.hier_path_name
@property
def file_basename(self):
return f'{self.module_name}.{self.lang}'
def module_context(self, template_env):
context = {
'pygears': pygears,
'module_name': self.module_name,
'intfs': template_env.port_intfs(self.node),
# 'sigs': [s.name for s in self.node.meta_kwds['signals']],
'sigs': self.node.meta_kwds['signals'],
'params': self.node.params,
'inst': [],
'generics': []
}
for port in context['intfs']:
context[f'_{port["name"]}'] = port
context[f'_{port["name"]}_t'] = port['type']
return context
@property
def params(self):
return {}
def get_hier_module(self, template_env):
context = self.module_context(template_env)
for child in self.node.local_intfs:
hmod = hdlmod(child)
contents = hmod.get_inst(template_env)
if contents:
context['inst'].append(contents)
for child in self.node.child:
for s in child.meta_kwds['signals']:
if isinstance(s, OutSig):
name = child.params['sigmap'][s.name]
context['inst'].append(f'logic [{s.width-1}:0] {name};')
hmod = hdlmod(child)
if hasattr(hmod, 'get_inst'):
contents = hmod.get_inst(template_env)
if contents:
if hmod.traced:
context['inst'].append('/*verilator tracing_on*/')
context['inst'].append(contents)
if hmod.traced:
context['inst'].append('/*verilator tracing_off*/')
return template_env.render_local(__file__, "hier_module.j2", context)
def generate(self, template_env, outdir):
save_file(self.file_basename, outdir,
self.get_hier_module(template_env))
|
py
|
1a57959017975dfd9051ff0de5221544f354377b
|
from django.db import models
from django.utils import timezone
# Create your models here.
class Message(models.Model):
author = models.CharField(max_length=200)
text = models.TextField()
date = models.DateTimeField(default=timezone.now)
|
py
|
1a5795bb316621b023d3f875453fb46e62176b3c
|
from app.tweet.adapters.repository import PostgresTweetAggregateRepository
from app.tweet.domain.model import Tweet
from uuid import uuid4
import pytest
class TestSave:
@pytest.mark.asyncio
async def test_save(self, postgres_session):
repo = PostgresTweetAggregateRepository(postgres_session)
aggregate = Tweet.new("Hello", uuid4())
await repo.save(aggregate)
print(await repo.find_by_id(aggregate.id))
class TestFindById:
@pytest.mark.asyncio
async def test_not_found(self, postgres_session):
repo = PostgresTweetAggregateRepository(postgres_session)
result = await repo.find_by_id(uuid4())
assert result is None
|
py
|
1a5796860f8e5e6d4965f2cde70234db3a6c34af
|
#!/usr/bin/env python
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import glob
import math
import shutil
import cv2
import matplotlib.pyplot as plt
import numpy as np
from skimage import img_as_ubyte
from skimage.filters import *
from Preprocess.tools.peakdetect import *
dirList = glob.glob("Input/*.pgm")
# dirList = glob.glob("../Input/P168-Fg016-R-C01-R01-fused.jpg")
# dirList = glob.glob("../Input/P123-Fg002-R-C01-R01-fused.jpg")
# dirList = glob.glob('/Users/Khmer/Downloads/sample-test/run_test/*.pgm')
def threshold_li(image):
"""Return threshold value based on adaptation of Li's Minimum Cross Entropy method.
Parameters
----------
image : (N, M) ndarray
Input image.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
DOI:10.1016/0031-3203(93)90115-D
.. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
DOI:10.1016/S0167-8655(98)00057-9
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165
DOI:10.1117/1.1631315
.. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
"""
# Make sure image has more than one value
if np.all(image == image.flat[0]):
raise ValueError("threshold_li is expected to work with images "
"having more than one value. The input image seems "
"to have just one value {0}.".format(image.flat[0]))
# Copy to ensure input image is not modified
image = image.copy()
# Requires positive image (because of log(mean))
immin = np.min(image)
image -= immin
imrange = np.max(image)
tolerance = 20 * imrange / 256
# Calculate the mean gray-level
mean = np.mean(image)
# Initial estimate
new_thresh = mean
old_thresh = new_thresh + 2 * tolerance
# Stop the iterations when the difference between the
# new and old threshold values is less than the tolerance
while abs(new_thresh - old_thresh) > tolerance:
old_thresh = new_thresh
threshold = old_thresh + tolerance # range
# Calculate the means of background and object pixels
mean_back = image[image <= threshold].mean()
# print(mean_back)
mean_obj = image[image > threshold].mean()
# print(mean_obj)
temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj))
if temp < 0:
new_thresh = temp - tolerance
else:
new_thresh = temp + tolerance
# print(threshold + immin)
return threshold + immin
def rotatedRectWithMaxArea(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle (maximal area) within the rotated rectangle.
"""
if w <= 0 or h <= 0:
return 0, 0
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
# since the solutions for angle, -angle and 180-angle are all the same,
# if suffices to look at the first quadrant and the absolute values of sin,cos:
sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))
if side_short <= 2. * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10:
# half constrained case: two crop corners touch the longer side,
# the other two corners are on the mid-line parallel to the longer line
x = 0.5 * side_short
wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)
else:
# fully constrained case: crop touches all 4 sides
cos_2a = cos_a * cos_a - sin_a * sin_a
wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a
return wr, hr
def rotate_bound(image, angle):
# CREDIT: https://www.pyimagesearch.com/2017/01/02/rotate-images-correctly-with-opencv-and-python/
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
return cv2.warpAffine(image, M, (nW, nH))
def rotate_max_area(image, angle):
""" image: cv2 image matrix object
angle: in degree
"""
wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle))
rotated = rotate_bound(image, angle)
h, w, _ = rotated.shape
y1 = h // 2 - int(hr / 2)
y2 = y1 + int(hr)
x1 = w // 2 - int(wr / 2)
x2 = x1 + int(wr)
return rotated[y1:y2, x1:x2]
def find_degree(image):
min_score = 999999
degree = 0
for d in range(-6, 7):
rotated_image = rotate_max_area(image, d)
# cv2.imwrite('./tr_' + str(d) + '.jpg', rotated_image)
ri_hist = cv2.reduce(rotated_image, 1, cv2.REDUCE_AVG).reshape(-1)
# plt.plot(ri_hist)
# plt.savefig('./tr_' + str(d) + '_h.jpg')
# plt.clf()
# plt.show()
line_peaks = peakdetect(ri_hist, lookahead=30)
score_ne = num_ne = 0
score_po = num_po = 0
for y in line_peaks[0]:
score_ne -= (y[1] * 1)
num_ne += 1
for y in line_peaks[1]:
score_po += (y[1] * 1)
num_po += 1
score = score_ne / num_ne + score_po / num_po
# print("score: ", score, " degree: ", d)
# print(": ", score_ne / num_ne, " : ", score_po / num_po)
if score < min_score:
degree = d
min_score = score
# print('Degree: ', degree)
rotated_image = rotate_max_area(image, degree)
# plt.imshow(rotated_image, cmap=plt.cm.gray)
# plt.show()
return rotated_image
def separate_cha_2(line):
line_hist = cv2.reduce(line, 0, cv2.REDUCE_AVG).reshape(-1)
new_line = cv2.cvtColor(line, cv2.COLOR_GRAY2BGR)
line_peaks = peakdetect(line_hist, lookahead=20)
Hl, Wl = new_line.shape[:2]
cha = []
# for y in line_peaks[0]:
# plt.plot(y[0], y[1], "r*")
# cv2.line(new_line, (y[0], 0), (y[0], Hl), (255, 0, 0), 3)
for y in line_peaks[1]:
cha.append(y[0])
# plt.plot(y[0], y[1], "g*")
cv2.line(new_line, (y[0], 0), (y[0], Hl), (0, 255, 0), 3)
cha.insert(0, 0)
cha.append(Wl)
plt.imshow(new_line, cmap=plt.cm.gray)
plt.show()
return cha
def separate_cha(line):
line_hist = cv2.reduce(line, 0, cv2.REDUCE_AVG).reshape(-1)
new_line = cv2.cvtColor(line, cv2.COLOR_GRAY2BGR)
line_peaks = peakdetect(line_hist, lookahead=25)
Hl, Wl = new_line.shape[:2]
cha = []
# for y in line_peaks[0]:
# plt.plot(y[0], y[1], "r*")
# cv2.line(new_line, (y[0], 0), (y[0], Hl), (255, 0, 0), 3)
for y in line_peaks[0]:
if y[1] >= 235:
cha.append(y[0])
# plt.plot(y[0], y[1], "g*")
cv2.line(new_line, (y[0], 0), (y[0], Hl), (0, 255, 0), 3)
cha.insert(0, 0)
cha.append(Wl)
# plt.plot(line_hist)
# plt.show()
# plt.imshow(new_line, cmap=plt.cm.gray)
# plt.show()
return cha
def separate_words(line):
line_hist = cv2.reduce(line, 0, cv2.REDUCE_AVG).reshape(-1)
new_line = cv2.cvtColor(line, cv2.COLOR_GRAY2BGR)
line_peaks = peakdetect(line_hist, lookahead=50)
Hl, Wl = new_line.shape[:2]
words = []
for y in line_peaks[0]:
if y[1] == 255:
words.append(y[0])
# plt.plot(y[0], y[1], "r*")
if y[1] == 255:
cv2.line(new_line, (y[0], 0), (y[0], Hl), (255, 0, 0), 3)
# for y in line_peaks[1]:
# plt.plot(y[0], y[1], "g*")
# if y[1] == 255:
# words.append(y[0])
# cv2.line(new_line, (y[0], 0), (y[0], Hl), (0, 255, 0), 3)
words.insert(0, 0)
words.append(Wl)
# plt.imshow(new_line, cmap=plt.cm.gray)
# plt.show()
return words
def crop_blank(img):
min_x, max_x, min_y, max_y = 0, 0, 0, 0
# for line in img:
# wl = True
# for x in line:
# if x != 255:
# wl = False
th, threshed = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
pts = cv2.findNonZero(threshed)
ret = cv2.minAreaRect(pts)
(cx, cy), (w, h), ang = ret
if w < h:
crop = img[int(w):int(h), :]
else:
crop = img[int(h):int(w), :]
# plt.imshow(crop, cmap=plt.cm.gray)
# plt.show()
# if x < y:
# if w < h:
# crop = img[w:h, x:y]
# else:
# crop = img[h:w, x:y]
# else:
# if w < h:
# crop = img[w:h, y:x]
# else:
# crop = img[h:w, y:x]
#
for d in dirList:
image = cv2.imread(d)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# kernel = np.ones((3, 3), np.float32) / 25
# image = cv2.filter2D(image, -1, kernel)
window_size = 59
thresh_sauvola = threshold_sauvola(image, window_size=window_size, k=0.5)
binary_sauvola = image > thresh_sauvola
# binary_global = image > threshold_triangle(image)
# binary_global = image > threshold_li(image)
# binary_global = image > threshold_minimum(image)
# binary_global = image > threshold_li(image)
binary_global = image > threshold_otsu(image)
cv_image = img_as_ubyte(binary_global)
ret, labels = cv2.connectedComponents(cv_image)
# Map component labels to hue val
label_hue = np.uint8(179 * labels / np.max(labels))
blank_ch = 255 * np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue == 0] = 0
# cv2.imwrite('./t1.jpg', cv_image)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(cv_image, connectivity=4)
sizes = stats[:, -1]
max_label = 1
max_size = sizes[1]
for i in range(2, nb_components):
if sizes[i] > max_size:
max_label = i
max_size = sizes[i]
img2 = np.zeros(output.shape)
img2[output == max_label] = 255
# cv2.imwrite('./t2.jpg', img2)
cv2.imwrite('./tmp.jpg', img2)
tmp = cv2.imread('tmp.jpg')
im_bw = cv2.cvtColor(tmp, cv2.COLOR_RGB2GRAY)
im_bw = 255 - im_bw
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(im_bw, connectivity=4)
sizes = stats[:, -1]
max_label = 1
max_size = sizes[1]
for i in range(2, nb_components):
if sizes[i] > max_size:
max_label = i
max_size = sizes[i]
img3 = np.zeros(output.shape)
img3[output == max_label] = 255
# cv2.imwrite('./t3.jpg', img3)
s_img_2 = img_as_ubyte(binary_sauvola)
# cv2.imwrite('./t1_2.jpg', s_img_2)
s_img_2[img3 == 255] = 255
# cv2.imwrite('./t4.jpg', s_img_2)
new_img = cv2.cvtColor(s_img_2, cv2.COLOR_GRAY2BGR)
rotated = find_degree(new_img)
rotated = cv2.cvtColor(rotated, cv2.COLOR_RGB2GRAY)
hist = cv2.reduce(rotated, 1, cv2.REDUCE_AVG).reshape(-1)
H, W = rotated.shape[:2]
peaks = peakdetect(hist, lookahead=40)
rotated2 = cv2.cvtColor(rotated, cv2.COLOR_GRAY2BGR)
peak = []
for y in peaks[0]:
peak.append(y[0])
# plt.plot(y[0], y[1], "r*")
cv2.line(rotated2, (0, y[0]), (W, y[0]), (255, 0, 0), 3)
# for y in peaks[1]:
# peak.append(y[0])
# plt.plot(y[0], y[1], "g*")
# cv2.line(rotated, (0, y[0]), (W, y[0]), (0, 255, 0), 3)
# plt.plot(hist)
# plt.savefig('hist.jpg')
# plt.clf()
peak.insert(0, 0)
peak.append(H)
# print(peak)
# plt.plot(hist)
# plt.show()
if not os.path.exists(os.path.splitext('segmentation/' + d.split('/')[-1])[0]):
os.makedirs(os.path.splitext('segmentation/' + d.split('/')[-1])[0])
else:
shutil.rmtree(os.path.splitext('segmentation/' + d.split('/')[-1])[0])
os.makedirs(os.path.splitext('segmentation/' + d.split('/')[-1])[0])
# cv2.imwrite(os.path.join(os.path.splitext(d.split('/')[-1])[0], '_t.jpg'), rotated)
# crop_blank(rotated)
# plt.imshow(rotated2, cmap=plt.cm.gray)
# plt.show()
count_line = 0
for y in range(len(peak) - 1):
if not os.path.exists(os.path.join(os.path.splitext('segmentation/' + d.split('/')[-1])[0], 'line_' + str(count_line))):
os.makedirs(os.path.join(os.path.splitext('segmentation/' + d.split('/')[-1])[0], 'line_' + str(count_line)))
else:
shutil.rmtree(os.path.join(os.path.splitext('segmentation/' + d.split('/')[-1])[0], 'line_' + str(count_line)))
os.makedirs(os.path.join(os.path.splitext('segmentation/' + d.split('/')[-1])[0], 'line_' + str(count_line)))
path = os.path.join(os.path.splitext('segmentation/' + d.split('/')[-1])[0], 'line_' + str(count_line))
crop_img = rotated[peak[y]:peak[y + 1], 0:W]
# print(peak[y], peak[y + 1])
# plt.imshow(crop_img, cmap=plt.cm.gray)
# plt.show()
word_peaks = separate_words(crop_img)
# print(word_peaks)
count_line += 1
for i in range(len(word_peaks) - 1):
new_w = crop_img[:, word_peaks[i]: word_peaks[i + 1]]
os.makedirs(os.path.join(path, 'word_' + str(i)))
cv2.line(rotated2, (word_peaks[i], peak[y]), (word_peaks[i], peak[y + 1]), (0, 0, 255), 3)
# print(y0, y[0], word_peaks[i])
cha_peaks = separate_cha(new_w)
if len(cha_peaks) == 0:
continue
for j in range(len(cha_peaks) - 1):
new_c = new_w[:, cha_peaks[j]: cha_peaks[j + 1]]
cv2.imwrite(os.path.join(os.path.join(path, 'word_' + str(i)), str(j) + '.jpg'),
new_c)
# plt.imshow(rotated2, cmap=plt.cm.gray)
# plt.show()
# cv2.imwrite('./d.jpg', rotated2)
print("Successfully process image " + d.split('/')[-1].split('jpg')[0])
|
py
|
1a579726b59d8f2528cb442bcca298b758f7baa8
|
from rest_framework.permissions import BasePermission
class IsNormalUser(BasePermission):
def has_permission(self, request, view):
# allow all POST requests
if not request.user.is_staff:
if request.method == 'POST' or request.method == 'PUT' or request.method == 'DELETE' \
or request.method == 'PATCH':
return False
# Otherwise, only allow authenticated requests
return request.user and request.user.is_authenticated
|
py
|
1a57981ec8838551dd7e5bdc05ad7b7aa254a532
|
import math
import random
import libpyDirtMP as prx
prx.init_random(random.randint(1,999999))
acrobot = prx.two_link_acrobot("acrobot")
simulation_step = 0.01
prx.set_simulation_step(simulation_step)
print("Using simulation_step:", simulation_step)
start_state = [0, 0, 0, 0]
goal_state = [math.pi, 0, 0, 0]
obs_pose_1 = prx.transform()
obs_pose_2 = prx.transform()
obs_pose_3 = prx.transform()
obs_pose_4 = prx.transform()
obs_pose_1.setIdentity()
obs_pose_2.setIdentity()
obs_pose_3.setIdentity()
obs_pose_4.setIdentity()
obs_pose_1.translation(prx.vector( 20, 20,0.5))
obs_pose_2.translation(prx.vector(-20, 20,0.5))
obs_pose_3.translation(prx.vector( 20,-20,0.5))
obs_pose_4.translation(prx.vector(-20,-20,0.5))
b1 = prx.box.create_obstacle("b1", 1., 1., 1., obs_pose_1)
b2 = prx.box.create_obstacle("b2", 1., 1., 1., obs_pose_2)
b3 = prx.box.create_obstacle("b3", 1., 1., 1., obs_pose_3)
b4 = prx.box.create_obstacle("b4", 1., 1., 1., obs_pose_4)
obstacles = [b1, b2, b3, b4]
obs_names = ["b1", "b2", "b3", "b4"]
### To have an obstacle-free environment, uncomment the following lines (and comment the above)
# obstacles = []
# obs_names = []
wm = prx.world_model([acrobot], obstacles)
wm.create_context("context", ["acrobot"], obs_names)
context = wm.get_context("context");
planner = prx.dirt("dirt");
planner_spec = prx.dirt_specification(context.system_group,context.collision_group);
planner_spec.blossom_number = 5
planner_spec.use_pruning = False
def acrobot_distance_function(s1, s2):
cost = 0
s1a0 = s1[0] + prx.PRX_PI
s1a1 = s1[1] + prx.PRX_PI
s1a2 = s1[2]
s1a3 = s1[3]
s2a0 = s2[0] + prx.PRX_PI
s2a1 = s2[1] + prx.PRX_PI
s2a2 = s2[2]
s2a3 = s2[3]
a0 = min((2 * prx.PRX_PI) - abs(s1a0 - s2a0), abs(s1a0 - s2a0));
a1 = min((2 * prx.PRX_PI) - abs(s1a1 - s2a1), abs(s1a1 - s2a1));
a2 = s1a2 - s2a2;
a3 = s1a3 - s2a3;
cost = a0 * a0 + a1 * a1 + a2 * a2 + a3 * a3
return math.sqrt(cost);
planner_spec.distance_function = prx.distance_function.set_df(acrobot_distance_function);
planner_spec.min_control_steps = 1
planner_spec.max_control_steps = 50
# planner_spec.random_seed = random.randint(1,999999);
planner_spec.bnb = True;
planner_query = prx.dirt_query(context.system_group.get_state_space(),context.system_group.get_control_space());
planner_query.start_state = context.system_group.get_state_space().make_point()
planner_query.goal_state = context.system_group.get_state_space().make_point()
context.system_group.get_state_space().copy_point_from_vector(planner_query.start_state, start_state);
context.system_group.get_state_space().copy_point_from_vector(planner_query.goal_state, goal_state);
print("Start State:", planner_query.start_state)
print("Goal State:", planner_query.goal_state)
planner_query.goal_region_radius = 0.5;
planner_query.get_visualization = True;
planner.link_and_setup_spec(planner_spec)
planner.preprocess()
planner.link_and_setup_query(planner_query)
### Note: Python slows down computation ==> more time might be needed
# checker = prx.condition_check("time", 60)
checker = prx.condition_check("iterations", 50000)
print("Resolving query...")
planner.resolve_query(checker)
planner.fulfill_query();
### This part is only to visualize the solution
if (planner_query.get_visualization):
vis_group = prx.three_js_group([acrobot], obstacles)
if ( len(planner_query.solution_traj) != 0 ) :
vis_group.add_vis_infos(prx.info_geometry.FULL_LINE, planner_query.solution_traj, "acrobot/ball", context.system_group.get_state_space(), "0x000000");
timestamp = 0.0
for state in planner_query.solution_traj :
context.system_group.get_state_space().copy_from_point(state);
vis_group.snapshot_state(timestamp)
timestamp += simulation_step
vis_group.output_html("py_output.html");
|
py
|
1a57988da409e73b06cbfbfe7dc90ec046c7171b
|
import unittest
import sectionproperties.pre.sections as sections
from sectionproperties.analysis.cross_section import CrossSection
from sectionproperties.tests.helper_functions import validate_properties
class TestValidation(unittest.TestCase):
def test_angle(self):
"""Section properties are validated against results from the Strand7
beam section utility."""
geometry = sections.AngleSection(d=150, b=90, t=12, r_r=10, r_t=5,
n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[2.5])
section = CrossSection(geometry, mesh)
val_list = []
val_list.append({"prop": "area", "val": 2746.73, "tol": 2e-4})
val_list.append({"prop": "cx", "val": 21.2255, "tol": 2e-4})
val_list.append({"prop": "cy", "val": 50.9893, "tol": 2e-4})
val_list.append({"prop": "ixx_g", "val": 1.3428e7, "tol": 2e-4})
val_list.append({"prop": "iyy_g", "val": 2.95629e6, "tol": 2e-4})
val_list.append({"prop": "ixy_g", "val": 1.08669e6, "tol": 2e-4})
val_list.append({"prop": "ixx_c", "val": 6.28678e6, "tol": 2e-4})
val_list.append({"prop": "iyy_c", "val": 1.71882e6, "tol": 3e-4})
val_list.append({"prop": "ixy_c", "val": -1.88603e6, "tol": 3e-4})
val_list.append({"prop": "zxx_plus", "val": 63496.0, "tol": 2e-4})
val_list.append({"prop": "zxx_minus", "val": 123296, "tol": 2e-4})
val_list.append({"prop": "zyy_plus", "val": 24992.1, "tol": 3e-4})
val_list.append({"prop": "zyy_minus", "val": 80979.0, "tol": 2e-4})
val_list.append({"prop": "rx", "val": 47.8416, "tol": 2e-4})
val_list.append({"prop": "ry", "val": 25.0154, "tol": 2e-4})
val_list.append({"prop": "i11_c", "val": 6.96484e6, "tol": 2e-4})
val_list.append({"prop": "i22_c", "val": 1.04076e6, "tol": 2e-4})
val_list.append({"prop": "phi", "val": 19.7744 - 180, "tol": 2e-4})
val_list.append({"prop": "z11_plus", "val": 97751.9, "tol": 2e-4})
val_list.append({"prop": "z11_minus", "val": 69403.3, "tol": 2e-4})
val_list.append({"prop": "z22_plus", "val": 27959.0, "tol": 2e-4})
val_list.append({"prop": "z22_minus", "val": 20761.6, "tol": 3e-4})
val_list.append({"prop": "r11", "val": 50.3556, "tol": 2e-4})
val_list.append({"prop": "r22", "val": 19.4656, "tol": 2e-4})
val_list.append({"prop": "sxx", "val": 113541, "tol": 2e-4})
val_list.append({"prop": "syy", "val": 45724.6, "tol": 2e-4})
val_list.append({"prop": "sf_xx_plus", "val": 113541 / 63496.0,
"tol": 2e-4})
val_list.append({"prop": "sf_xx_minus", "val": 113541 / 123296,
"tol": 2e-4})
val_list.append({"prop": "sf_yy_plus", "val": 45724.6 / 24992.1,
"tol": 3e-4})
val_list.append({"prop": "sf_yy_minus", "val": 45724.6 / 80979.0,
"tol": 2e-4})
val_list.append({"prop": "s11", "val": 121030, "tol": 2e-4})
val_list.append({"prop": "s22", "val": 43760.6, "tol": 2e-4})
val_list.append({"prop": "sf_11_plus", "val": 121030 / 97751.9,
"tol": 2e-4})
val_list.append({"prop": "sf_11_minus", "val": 121030 / 69403.3,
"tol": 2e-4})
val_list.append({"prop": "sf_22_plus", "val": 43760.6 / 27959.0,
"tol": 2e-4})
val_list.append({"prop": "sf_22_minus", "val": 43760.6 / 20761.6,
"tol": 3e-4})
val_list.append({"prop": "j", "val": 135333, "tol": 1e-3})
val_list.append({"prop": "gamma", "val": 1.62288e8, "tol": 5e-4})
val_list.append({"prop": "A_s11", "val": 885.444, "tol": 2e-4})
val_list.append({"prop": "A_s22", "val": 1459.72, "tol": 4e-4})
val_list.append({"prop": "x11_se", "val": 28.719, "tol": 1e-3})
val_list.append({"prop": "y22_se", "val": 35.2348, "tol": 5e-4})
section.calculate_geometric_properties()
section.calculate_plastic_properties()
section.calculate_warping_properties()
validate_properties(self, val_list, section)
def test_custom(self):
"""Section properties are validated against results from the Strand7
beam section utility."""
points = [[-10, 0], [110, 0], [100, 10], [55, 10], [55, 90], [100, 90],
[110, 100], [110, 110], [-10, 110], [-10, 100], [0, 90],
[45, 90], [45, 10], [-10, 10]]
facets = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7],
[7, 8], [8, 9], [9, 10], [10, 11], [11, 12], [12, 13],
[13, 0]]
holes = []
control_points = [[0, 5]]
geometry = sections.CustomSection(
points, facets, holes, control_points)
mesh = geometry.create_mesh(mesh_sizes=[5])
section = CrossSection(geometry, mesh)
val_list = []
val_list.append({"prop": "area", "val": 4250, "tol": None})
val_list.append({"prop": "cx", "val": 49.3333, "tol": None})
val_list.append({"prop": "cy", "val": 65.0196, "tol": None})
val_list.append({"prop": "ixx_g", "val": 2.56725e7, "tol": None})
val_list.append({"prop": "iyy_g", "val": 1.41858e7, "tol": None})
val_list.append({"prop": "ixy_g", "val": 1.37979e7, "tol": None})
val_list.append({"prop": "ixx_c", "val": 7.70542e6, "tol": None})
val_list.append({"prop": "iyy_c", "val": 3.84228e6, "tol": None})
val_list.append({"prop": "ixy_c", "val": 165472, "tol": None})
val_list.append({"prop": "zxx_plus", "val": 171306, "tol": None})
val_list.append({"prop": "zxx_minus", "val": 118509, "tol": None})
val_list.append({"prop": "zyy_plus", "val": 63334.2, "tol": None})
val_list.append({"prop": "zyy_minus", "val": 64757.5, "tol": None})
val_list.append({"prop": "rx", "val": 42.5798, "tol": None})
val_list.append({"prop": "ry", "val": 30.0677, "tol": None})
val_list.append({"prop": "phi", "val": 177.552 - 180, "tol": 1e-4})
val_list.append({"prop": "i11_c", "val": 7.71249e6, "tol": None})
val_list.append({"prop": "i22_c", "val": 3.8352e6, "tol": None})
val_list.append({"prop": "z11_plus", "val": 162263, "tol": None})
val_list.append({"prop": "z11_minus", "val": 114268, "tol": None})
val_list.append({"prop": "z22_plus", "val": 60503.0, "tol": None})
val_list.append({"prop": "z22_minus", "val": 62666.1, "tol": None})
val_list.append({"prop": "r11", "val": 42.5993, "tol": None})
val_list.append({"prop": "r22", "val": 30.04, "tol": None})
val_list.append({"prop": "sxx", "val": 153196, "tol": None})
val_list.append({"prop": "syy", "val": 101494, "tol": None})
val_list.append({"prop": "sf_xx_plus", "val": 153196 / 171306,
"tol": None})
val_list.append({"prop": "sf_xx_minus", "val": 153196 / 118509,
"tol": None})
val_list.append({"prop": "sf_yy_plus", "val": 101494 / 63334.2,
"tol": None})
val_list.append({"prop": "sf_yy_minus", "val": 101494 / 64757.5,
"tol": None})
val_list.append({"prop": "s11", "val": 153347, "tol": None})
val_list.append({"prop": "s22", "val": 101501, "tol": None})
val_list.append({"prop": "sf_11_plus", "val": 153347 / 162263,
"tol": None})
val_list.append({"prop": "sf_11_minus", "val": 153347 / 114268,
"tol": None})
val_list.append({"prop": "sf_22_plus", "val": 101501 / 60503.0,
"tol": None})
val_list.append({"prop": "sf_22_minus", "val": 101501 / 62666.1,
"tol": None})
val_list.append({"prop": "j", "val": 347040, "tol": 5e-3})
val_list.append({"prop": "gamma", "val": 7.53539e9, "tol": 1e-3})
val_list.append({"prop": "A_s11", "val": 2945.53, "tol": 5e-4})
val_list.append({"prop": "A_s22", "val": 956.014, "tol": 5e-4})
val_list.append({"prop": "x11_se", "val": 1.9134, "tol": 5e-3})
val_list.append({"prop": "y22_se", "val": 3.02028, "tol": 5e-3})
section.calculate_geometric_properties()
section.calculate_plastic_properties()
section.calculate_warping_properties()
validate_properties(self, val_list, section)
if __name__ == "__main__":
unittest.main()
|
py
|
1a5798b5a5895eaacfe807bd17081389ce628c92
|
import constants
import numpy as np
import MySQLdb
import time
import datetime
import os
CONCEPT_START = "START"
def get_file_prefix():
"""获得有效的文件前缀名"""
from datetime import datetime
now = datetime.now()
return "{}_{}_{}".format(now.year, now.month, now.day)
def init_file():
for i in ["_enfuzzy.csv", "_defuzzy.csv", "_record.csv"]:
if not os.path.exists(get_file_prefix() + i):
with open(get_file_prefix() + i, "w") as f:
with open("default" + i, "r") as fo:
f.write(fo.read())
print("create " + get_file_prefix() + i)
def get_valid_id():
fname = get_file_prefix() + "_enfuzzy.csv"
lid = 0
with open(fname, "r") as f:
for line in f:
lid = line.split(",")[0]
return int(lid) + 1
def record_enfuzzy(var, val, concept):
"""记录模糊化过程"""
fname = get_file_prefix() + "_enfuzzy.csv"
get_id = get_valid_id()
with open(fname, "a") as f:
# ~ print("模糊化:::{},{},{},{},{}".format(get_id, var, val, concept, time.mktime(datetime.datetime.now().timetuple())))
f.write("{},{},{},{},{}\n".format(get_id, var, val, concept, time.mktime(datetime.datetime.now().timetuple())))
return get_id
def record_inference(kid, cond, res):
"""记录推理过程"""
fname = get_file_prefix() + "_record.csv"
with open(fname, "a") as f:
# ~ print("推理:::{},{},{},{}".format(kid, cond, res, time.mktime(datetime.datetime.now().timetuple())))
f.write("{},{},{},{}\n".format(kid, cond, res, time.mktime(datetime.datetime.now().timetuple())))
def record_defuzzy(var, concept, val):
"""记录去模糊化过程"""
fname = get_file_prefix() + "_defuzzy.csv"
with open(fname, "a") as f:
# ~ print("去模糊化:::{},{},{},{}".format(var, concept, val, time.mktime(datetime.datetime.now().timetuple())))
f.write("{},{},{},{}\n".format(var, concept, val, time.mktime(datetime.datetime.now().timetuple())))
def search_defuzzy(result):
if result.count("=") != 1:
return 0
var, val = result.split("=")
fname = get_file_prefix() + "_defuzzy.csv"
data = 0
maxTime = 0
with open(fname, "r") as f:
for line in f:
d = line.rstrip("\n").split(",")
if d[0] == var and d[2] == val:
if eval(d[-1]) > maxTime:
maxTime = eval(d[-1])
data = d
return data
def get_explanation(result):
ans = search_defuzzy(result)
if ans:
return fuzzy_explain(ans)
else:
return "CAN NOT EXPLAIN"
def search_record(concept):
fname = get_file_prefix() + "_record.csv"
cond = 0
maxTime = 0
with open(fname, "r") as f:
for line in f:
d = line.rstrip("\n").split(",")
if d[2] == concept:
if maxTime < eval(d[-1]):
maxTime = eval(d[-1])
cond = d
return cond
def get_enfuzzy(enid):
fname = get_file_prefix() + "_enfuzzy.csv"
with open(fname, "r") as f:
for line in f:
d = line.rstrip("\n").split(",")
if d[0] == enid:
return d
return 0
def fuzzy_explain(ans):
defuzzy = ans[1]
inference_stack = [defuzzy]
knowledge_stack = ["defuzzy_{}->{}".format(defuzzy, ans[2])]
curr_concept = inference_stack[-1]
data = ""
while curr_concept != CONCEPT_START:
# 推理过程
data = search_record(curr_concept)
curr_concept = data[1]
inference_stack.append(curr_concept)
knowledge_stack.append(data[0])
else:
# 模糊化
enfuzzy_id = data[0]
enfuzzy_data = get_enfuzzy(enfuzzy_id)
inference_stack.pop(-1)
knowledge_stack.pop(-1)
inference_stack.append(curr_concept)
knowledge_stack.append("enfuzzy_{}:{}->{}".format(enfuzzy_data[1], enfuzzy_data[2], enfuzzy_data[3]))
infer_chain = ""
know_chain = ""
while len(inference_stack) > 0:
infer_chain = infer_chain + inference_stack.pop(-1) + "->"
know = knowledge_stack.pop(-1)
try:
x = eval(know)
if type(x) == int:
# 是一条知识的id
know_chain += "knowledge({})".format(x) + " "
except:
know_chain += know + " "
infer_chain += "END"
know_chain += "END"
return "\n".join([infer_chain, know_chain])
def initialize(csr, dbname, user):
csr.execute("USE " + dbname)
csr.execute("DROP table if EXISTS fdb")
csr.execute("DROP table if EXISTS fdb_traffic")
csr.execute("DROP table if EXISTS fdb_light_time")
csr.execute("DROP table if EXISTS ks")
ctine = "CREATE TABLE IF NOT EXISTS "
csr.execute(ctine + "FDB"
"(ID int NOT NULL AUTO_INCREMENT primary key,"
"linguistic_variable varchar(32) NOT NULL,"
"fuzzy_set int NOT NULL,"
"used int NOT NULL default 0,"
"updtime datetime,"
"administrator varchar(32) default \"%s\")" % user)
csr.execute(ctine + "KS"
"(ID int NOT NULL primary key,"
"concv varchar(32) not null,"
"closeness float(3,2) not null,"
"updtime datetime,"
"administrator varchar(32) default \"{}\")".format(user))
def getDomin(csr, ling_var):
"""
获取语言变量的域
:param csr:cursor
:param ling_var:语言变量 str
:return: 语言变量的域(numpy数组)
"""
csr.execute("SELECT VALUE from fuzzy_concept_" + ling_var)
return np.array(csr.fetchall()).reshape(1, -1)[0]
def fuzzing(csr, ling_var, val, sigma):
"""
三角法模糊化
:param conn: 数据库连接
:param dbname: 数据库名
:param ling_var: 语言变量的名字
:param val: 实际测量的精确值
:param sigma: 三角法参数
:param lb: lower bound
:param ub: upper bound
:return: 模糊集
"""
cnt = csr.execute("SELECT LingV FROM sum_lingv WHERE Lingv = '%s'" % ling_var)
if not cnt:
raise Exception("There is no such linguistic variable {} in the knowledge database as given!".format(ling_var))
domin = getDomin(csr, ling_var)
fuzzy_set = 1 - abs(domin - val) / sigma
fuzzy_set[fuzzy_set < 0] = 0
return fuzzy_set
def insert_into_FDB(dbname, csr, ling_var, fuzzy_set, c_stack):
"""
将新事实插入到FDB
:param dbname:
:param csr:
:param ling_var: (语言变量名,类型) (str,str)
:param fuzzy_set: 模糊集(数组)
:return:
"""
# 如果语言变量第一次出现,为其创建一张表
ctine = "CREATE TABLE IF NOT EXISTS "
csr.execute(ctine + "FDB_" + ling_var[0] + "("
"value " + ling_var[1] + " NOT NULL,primary key(value))")
csr.execute(
"select count(COLUMN_NAME) from information_schema.COLUMNS where table_schema = '{}' and table_name = 'fdb_{}';".format(
dbname, ling_var[0]))
num = csr.fetchone()[0]
domin = getDomin(csr, ling_var[0])
if num == 1:
for val in domin:
csr.execute("INSERT INTO fdb_" + ling_var[0] + " VALUES({})".format(val))
c_stack.append("{}set{}".format(ling_var[0],num))
# 插入事实到FDB
suc = csr.execute(
"INSERT INTO fdb(linguistic_variable, fuzzy_set, updtime) values(\"{}\",{},now())".format(ling_var[0], num))
# 插入模糊集到对应语言变量表
try:
csr.execute("ALTER TABLE fdb_{}".format(ling_var[0]) + " ADD set" + str(num) + " float(3,2) not null")
for ind in range(len(fuzzy_set)):
csr.execute("UPDATE fdb_{}".format(ling_var[0]) + " SET set" + str(num)
+ "={}".format(fuzzy_set[ind]) + "where value={}".format(domin[ind]))
except:
pass
return suc
def getSolution(csr, dbname, solution):
"""
尝试从事实库fdb中获取问题的解
:param conn:
:param dbname:
:param solution: 为问题的解指定的语言变量 str
:return: 问题的解的事实id
"""
csr.execute("select id from fdb where linguistic_variable = '" + solution + "'")
return csr.fetchall()
def defuzzing(csr, ling_var, fuzzy_set):
"""
去模糊化
:param ling_var: 语言变量 str
:param fuzzy_set: 模糊集(numpy数组)
:return: 去模糊化后的精确值
"""
fuzzy_set = np.array(fuzzy_set)
domin = getDomin(csr, ling_var)
return domin[(fuzzy_set == fuzzy_set.max())[0]].mean()
def getfdb_ling_var(csr, id):
"""
根据事实id获取事实对应的语言变量
:param csr:
:param id: fact id
:return: 事实对应的语言变量 str
"""
csr.execute("select linguistic_variable from fdb where id = {}".format(id))
return csr.fetchone()[0]
def getfdbFuzzyset(csr, id):
"""
根据事实id获取事实对应的模糊集
:param csr:
:param id:事实id
:return:事实对应的模糊集,行向量
"""
csr.execute("select linguistic_variable,fuzzy_set from fdb where id = {}".format(id))
ling_var, setid = csr.fetchone()
csr.execute("select set{} from fdb_{}".format(setid, ling_var))
return np.array(csr.fetchall()).reshape([1, -1])
def getUnusedFact(csr):
"""
从fdb中获取一条未使用过的事实
:param csr:
:return: 事实id
"""
fact = csr.execute("select id from fdb where used=0")
if fact > 0:
fact = csr.fetchone()[0]
csr.execute("update fdb set used=1 where id = {}".format(fact))
return fact
def calCloseness(csr, ling_var, fid, kid):
"""
calculate closeness 计算贴近度
:param csr:
:param fling_var: linguistic variable
:param fid: fact id
:param kid: knowledge id
:return: closeness
"""
csr.execute("select set{} from fdb_{}".format(fid, ling_var))
fset = np.array(csr.fetchall()).reshape([1, -1])
csr.execute("select FuzCptA from fuzzy_knowledge where id = {}".format(kid))
kconcpt = csr.fetchone()[0]
csr.execute("select {} from fuzzy_concept_{}".format(kconcpt, ling_var))
kset = np.array(csr.fetchall()).reshape([1, -1])
return 1 - np.linalg.norm(fset - kset) / np.sqrt(fset.size)
# return (np.minimum(fset, kset).max() + 1 - np.maximum(fset, kset).min()) / 2
def calCloseness1(fset, kset):
"""
calculate closeness 计算给定模糊集的贴近度
:param fset: fact set
:param kset: knowledge set
:return: closeness
"""
fset = np.array(fset)
kset = np.array(kset)
return (np.minimum(fset, kset).max() + 1 - np.maximum(fset, kset).min()) / 2
def fillKS(csr, fid):
"""
将与事实匹配的贴近度最大的同类知识填入到ks中
:param csr:
:param fid: fact id
:return:
"""
csr.execute("select linguistic_variable from fdb where id={}".format(fid))
fact_ling_var = csr.fetchone()[0]
csr.execute("select id,concv,lambda from fuzzy_knowledge where condv=\"{}\"".format(fact_ling_var))
kidlms = np.array(csr.fetchall())
for kidlm in kidlms:
closeness = calCloseness(csr, fact_ling_var, fid, kidlm[0])
if closeness >= kidlm[2]:
# print("insert into KS values({},\"{}\",{},now())".format(kidlm[0], kidlm[1], closeness))
csr.execute(
"insert into KS(id,concv,closeness,updtime) values({},\"{}\",{},now())".format(kidlm[0], kidlm[1],
closeness))
csr.execute("select * from ks")
csr.execute(
"select KS.id,KS.concv,KS.closeness from KS join (select concv,max(closeness) as mc from KS group by concv) b on "
"KS.concv=b.concv and KS.closeness=b.mc")
kidvs = csr.fetchall()
csr.execute("delete from ks")
concv_set = set()
for kidv in kidvs:
if kidv[1] not in concv_set:
concv_set.add(kidv[1])
csr.execute("insert into ks(ID,concv,closeness,updtime) values({},\"{}\",{},now())".format(*kidv))
def getMat(csr, kid):
"""
获取给定知识的模糊矩阵
:param csr:
:param kid: knowledge id
:return: 模糊矩阵
"""
csr.execute("select condv,fuzcptA,concv,fuzcptB from fuzzy_knowledge where id ={}".format(kid))
condv, fuzcptA, concv, fuzcptB = csr.fetchone()
cond_domin = getDomin(csr, condv)
conc_domin = getDomin(csr, concv)
mat = np.zeros([len(cond_domin), len(conc_domin)])
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
csr.execute("select R from matrix_{}_{} where val1={} and val2={}".format(fuzcptA, fuzcptB, cond_domin[i],
conc_domin[j]))
mat[i, j] = csr.fetchone()[0]
return mat
def hypomul(fset, fuz_mat):
"""
hypothesis multiple 计算假言推理矩阵乘积
:param fset: fact fuzzy set 行向量
:param fuz_mat: fuzzy matrix
:return:
"""
res = np.zeros(fuz_mat.shape[1])
for i in range(res.size):
res[i] = np.minimum(fset, fuz_mat.T[i]).max()
return res
def infer_by_knowledge(dbname, csr, k, fid, c_stack):
"""
在当前事实下,根据给定的知识进行推理
:param csr:
:param k: 给定的ks中的知识,行向量
:param fid: fact id
:return:
"""
# ling_var = getfdb_ling_var(csr,fid)
fset = getfdbFuzzyset(csr, fid)
fuz_mat = getMat(csr, k[0])
# print(k)
# print("mut")
# print(fset)
# print(fuz_mat)
res_set = hypomul(fset, fuz_mat)
csr.execute("select type from sum_lingv where lingv=\"{}\"".format(k[1]))
lingtype = csr.fetchone()[0]
insert_into_FDB(dbname, csr, (k[1], lingtype), res_set, c_stack)
# print("res", res_set)
def infer_by_ks(dbname, csr, fid, c_stack, k_stack):
"""
根据ks中的知识推理
:param csr:
:param fid: fact id
:return:
"""
csr.execute("select * from ks")
ksk = csr.fetchall()
for k in ksk:
infer_by_knowledge(dbname, csr, k, fid, c_stack)
k_stack.append(k[0])
csr.execute("delete from ks where id = {}".format(k[0]))
def infer_by_number_table(conn, dbname, user):
"""
读取number表,根据从number表中读出的车流量进行推理。number表存放从下位机传来的检测到的车流量大小。
:param conn:
:param dbname:
:param user:
:return:
"""
# initialize(conn, dbname, user)
# print("Succeeded initializing inference engine!")
csr = conn.cursor()
csr.execute("USE " + dbname)
ling_var = ("traffic", "int")
solution = "light_time"
if csr.execute("select number from number where used = 0 and direction = 'NS'"):
val = csr.fetchone()[0]
csr.execute("update number set used = 1 where number = {} and direction = 'NS'".format(val)) #
lt = infer(dbname, user, csr, ling_var, val, solution)
# try:
csr.execute(
"insert into seconds(direction,number,seconds, updtime) values('{}',{},{},now())".format('NS', val, lt))
csr.execute("commit")
# print("insert into seconds(number,seconds, updtime) values({},{},now())".format(val, lt))
# except:
# print("Error in infer_by_number_table!")
# csr.execute("rollback")
def infer(dbname, user, csr, ling_var, val, solution):
"""
推理机过程实现
:param conn:
:param dbname:
:param user:
:param ling_var: (语言变量名,类型) (str,str)
:param val: 接收到传感器传来的值
:param solution: 问题的解 是个语言变量
:return: 推理结果
"""
stack_1 = ["START"]
stack_2 = []
initialize(csr, dbname, user)
fuzzy_set = fuzzing(csr, ling_var[0], val, 5)
insert_into_FDB(dbname, csr, ling_var, fuzzy_set, stack_1)
# insert_into_FDB(conn, dbname, ling_var, fuzzing(conn, dbname, ling_var[0], 6, 2))
solutions = getSolution(csr, dbname, solution)
while len(solutions) == 0:
fid = getUnusedFact(csr)
if fid > 0:
fillKS(csr, fid)
infer_by_ks(dbname, csr, fid, stack_1, stack_2)
else:
return -1
solutions = getSolution(csr, dbname, solution)
result_fuzzy_set = getfdbFuzzyset(csr, solutions[0][0])
defuzzy_data = round(defuzzing(csr, solution, result_fuzzy_set) / 3, 2)
# TODO 解释器
# print(stack_1)
# print(stack_2)
# assert len(stack_1)-2 == len(stack_2)
enfuzzy_id = record_enfuzzy(ling_var[0], val, stack_1[1])
stack_2.insert(0, enfuzzy_id)
for i in range(len(stack_1)-1):
record_inference(stack_2[i],stack_1[i],stack_1[i+1])
record_defuzzy("light_time", stack_1[-1], defuzzy_data)
return defuzzy_data
def explain(light_val):
return get_explanation("light_time={}".format(light_val))
if __name__ == "__main__":
init_file()
host = constants.HOST
user = constants.USER
dbname = constants.DB
conn = MySQLdb.connect(host, user, constants.PWD)
solution = "light_time"
ling_var = ("traffic", "int")
csr = conn.cursor()
csr.execute("use " + dbname)
val = 5
d = infer(dbname, user, csr, ling_var, val, solution)
print(val, d)
print(get_explanation("light_time={}".format(d))) # 调用赋值型的解释器
print(explain(d)) # 直接调用数字解释器 二者完全等价
print(explain(15))
print(explain(25))
# val = np.arange(21)
# data = []
# for v in val:
# initialize(csr, dbname, user)
# d = infer(dbname, user, csr, ling_var, v, solution)
# print(v, solution, "=", d)
# data.append(d)
#
# print(get_explanation("light_time={}".format(data[3])))
|
py
|
1a5799731d41addc04009027bdda7195946fbf41
|
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from tensorflow.python.keras import models, layers, losses, optimizers, utils
from tensorflow.python.keras import backend as K
def PINet_CIFAR10():
## model
input_shape = [32,32,3]
initial_conv_width=3
initial_stride=1
initial_filters=64
initial_pool_width=3
initial_pool_stride=2
use_global_pooling = True
dropout_rate = 0.2
model_input = layers.Input(shape=input_shape)
x = layers.Conv2D(
128,
initial_conv_width,
strides=initial_stride,
padding="same")(model_input)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.MaxPooling2D(
pool_size=initial_pool_width,
strides=initial_pool_stride,
padding="same")(x)
x = layers.Conv2D(
256,
initial_conv_width,
strides=initial_stride,
padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.MaxPooling2D(
pool_size=initial_pool_width,
strides=initial_pool_stride,
padding="same")(x)
x = layers.Conv2D(
512,
initial_conv_width,
strides=initial_stride,
padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.MaxPooling2D(
pool_size=initial_pool_width,
strides=initial_pool_stride,
padding="same")(x)
x = layers.Conv2D(
1024,
initial_conv_width,
strides=initial_stride,
padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
if use_global_pooling:
x = layers.GlobalAveragePooling2D()(x)
x_logits1 = layers.Dense(2500, activation="relu")(x)
x_logits1_reshape = layers.Reshape((1,50,50))(x_logits1)
x_logits1_reshape = layers.Permute((2,3,1))(x_logits1_reshape)
x_logits2 = layers.Conv2DTranspose(
3,
50,
strides=initial_stride,
padding="same")(x_logits1_reshape)
x_logits2 = layers.BatchNormalization()(x_logits2)
x_logits2 = layers.Activation("relu")(x_logits2)
model_output = layers.Flatten()(x_logits2)
model = models.Model(model_input, model_output)
return model
|
py
|
1a57997684aeec6770214be428bcfc18e9a69cb6
|
""" package aries_staticagent """
from setuptools import setup, find_packages
from version import VERSION
def parse_requirements(filename):
"""Load requirements from a pip requirements file."""
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
if __name__ == '__main__':
with open('README.md', 'r') as fh:
LONG_DESCRIPTION = fh.read()
setup(
name='aries-staticagent',
version=VERSION,
author='Daniel Bluhm <[email protected]>, '
'Sam Curren <[email protected]>',
description='Python Static Agent Library and Examples for Aries',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/hyperledger/aries-staticagent-python',
license='Apache 2.0',
packages=find_packages(),
install_requires=parse_requirements('requirements.txt'),
extras_require={
'test': parse_requirements('requirements.dev.txt')
},
python_requires='>=3.6',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent'
]
)
|
py
|
1a5799f18e5c672807837b5989652f899e640a5d
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.NOT/Serif_16/udhr_Latn.NOT_Serif_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
py
|
1a579a3b4c23160616956142f5840fe28106d79e
|
# import the necessary packages
import argparse
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
# load the image, convert it to grayscale, and blur it slightly
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (7, 7), 0)
cv2.imshow("Image", image)
# apply Otsu's automatic thresholding -- Otsu's method automatically
# determines the best threshold value `T` for us
(T, threshInv) = cv2.threshold(blurred, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
cv2.imshow("Threshold", threshInv)
print("Otsu's thresholding value: {}".format(T))
# finally, we can visualize only the masked regions in the image
cv2.imshow("Output", cv2.bitwise_and(image, image, mask=threshInv))
cv2.waitKey(0)
|
py
|
1a579adb4edf4ff5681434a5f372b9bd9a74a323
|
'''
excel
- workbook
- sheet
-
'''
import xlwt
workbook=xlwt.Workbook(encoding='utf-8')
sheet=workbook.add_sheet("sheet")
sheet.write(1,1,"python使用excel")
workbook.save('xlwt.xls')
|
py
|
1a579b5d53c255194a12ffd41bffada78d77588a
|
import os
import re
import foobar
_REPO_DIR = os.path.dirname(os.path.dirname(__file__))
def test_version_number_match_with_changelog():
"""__version__ and CHANGELOG.md match for the latest version number."""
changelog = open(os.path.join(_REPO_DIR, "CHANGELOG.md")).read()
# latest version number in changelog = the 1st occurrence of '[x.y.z]'
version_changelog = re.search(r"\[\d+\.\d+\.\d+\]", changelog).group().strip("[]")
assert foobar.__version__ == version_changelog, (
f"Make sure both __version__ ({foobar.__version__}) and "
f"CHANGELOG ({version_changelog}) "
"are updated to match the latest version number"
)
|
py
|
1a579bbd34bc6226c54d6cf9bc6fcec3f9784fab
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import binascii
import os
import pytest
from cryptography.exceptions import (
AlreadyFinalized,
InvalidSignature,
_Reasons,
)
from cryptography.hazmat.primitives.poly1305 import Poly1305
from ...utils import (
load_nist_vectors,
load_vectors_from_file,
raises_unsupported_algorithm,
)
@pytest.mark.supported(
only_if=lambda backend: not backend.poly1305_supported(),
skip_message="Requires OpenSSL without poly1305 support",
)
def test_poly1305_unsupported(backend):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MAC):
Poly1305(b"0" * 32)
@pytest.mark.supported(
only_if=lambda backend: backend.poly1305_supported(),
skip_message="Requires OpenSSL with poly1305 support",
)
class TestPoly1305:
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join("poly1305", "rfc7539.txt"), load_nist_vectors
),
)
def test_vectors(self, vector, backend):
key = binascii.unhexlify(vector["key"])
msg = binascii.unhexlify(vector["msg"])
tag = binascii.unhexlify(vector["tag"])
poly = Poly1305(key)
poly.update(msg)
assert poly.finalize() == tag
assert Poly1305.generate_tag(key, msg) == tag
Poly1305.verify_tag(key, msg, tag)
def test_key_with_no_additional_references(self, backend):
poly = Poly1305(os.urandom(32))
assert len(poly.finalize()) == 16
def test_raises_after_finalize(self, backend):
poly = Poly1305(b"0" * 32)
poly.finalize()
with pytest.raises(AlreadyFinalized):
poly.update(b"foo")
with pytest.raises(AlreadyFinalized):
poly.finalize()
def test_reject_unicode(self, backend):
poly = Poly1305(b"0" * 32)
with pytest.raises(TypeError):
poly.update("") # type:ignore[arg-type]
with pytest.raises(TypeError):
Poly1305.generate_tag(b"0" * 32, "") # type:ignore[arg-type]
def test_verify(self, backend):
poly = Poly1305(b"0" * 32)
poly.update(b"msg")
tag = poly.finalize()
with pytest.raises(AlreadyFinalized):
poly.verify(b"")
poly2 = Poly1305(b"0" * 32)
poly2.update(b"msg")
poly2.verify(tag)
Poly1305.verify_tag(b"0" * 32, b"msg", tag)
def test_invalid_verify(self, backend):
poly = Poly1305(b"0" * 32)
poly.update(b"msg")
with pytest.raises(InvalidSignature):
poly.verify(b"")
p2 = Poly1305(b"0" * 32)
p2.update(b"msg")
with pytest.raises(InvalidSignature):
p2.verify(b"\x00" * 16)
with pytest.raises(InvalidSignature):
Poly1305.verify_tag(b"0" * 32, b"msg", b"\x00" * 16)
def test_verify_reject_unicode(self, backend):
poly = Poly1305(b"0" * 32)
with pytest.raises(TypeError):
poly.verify("") # type:ignore[arg-type]
with pytest.raises(TypeError):
Poly1305.verify_tag(b"0" * 32, b"msg", "") # type:ignore[arg-type]
def test_invalid_key_type(self, backend):
with pytest.raises(TypeError):
Poly1305(object()) # type:ignore[arg-type]
with pytest.raises(TypeError):
Poly1305.generate_tag(object(), b"msg") # type:ignore[arg-type]
def test_invalid_key_length(self, backend):
with pytest.raises(ValueError):
Poly1305(b"0" * 31)
with pytest.raises(ValueError):
Poly1305.generate_tag(b"0" * 31, b"msg")
with pytest.raises(ValueError):
Poly1305(b"0" * 33)
with pytest.raises(ValueError):
Poly1305.generate_tag(b"0" * 33, b"msg")
def test_buffer_protocol(self, backend):
key = binascii.unhexlify(
b"1c9240a5eb55d38af333888604f6b5f0473917c1402b80099dca5cb"
b"c207075c0"
)
msg = binascii.unhexlify(
b"2754776173206272696c6c69672c20616e642074686520736c69746"
b"87920746f7665730a446964206779726520616e642067696d626c65"
b"20696e2074686520776162653a0a416c6c206d696d7379207765726"
b"52074686520626f726f676f7665732c0a416e6420746865206d6f6d"
b"65207261746873206f757467726162652e"
)
key = bytearray(key)
poly = Poly1305(key)
poly.update(bytearray(msg))
assert poly.finalize() == binascii.unhexlify(
b"4541669a7eaaee61e708dc7cbcc5eb62"
)
assert Poly1305.generate_tag(key, msg) == binascii.unhexlify(
b"4541669a7eaaee61e708dc7cbcc5eb62"
)
|
py
|
1a579dcf87716b2900811f66459e622f31e65f92
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
is_warden = models.BooleanField(default=False)
class Student(models.Model):
user = models.OneToOneField(
User,
default=None,
null=True,
on_delete=models.CASCADE)
gender_choices = [('M', 'Male'), ('F', 'Female')]
student_name = models.CharField(max_length=200, null=True)
father_name = models.CharField(max_length=200, null=True)
enrollment_no = models.CharField(max_length=10, unique=True, null=True)
course = models.ForeignKey(
'Course',
null=True,
default=None,
on_delete=models.CASCADE)
dob = models.DateField(
max_length=10,
help_text="format : YYYY-MM-DD",
null=True)
gender = models.CharField(
choices=gender_choices,
max_length=1,
default=None,
null=True)
room = models.OneToOneField(
'Room',
blank=True,
on_delete=models.CASCADE,
null=True)
room_allotted = models.BooleanField(default=False)
no_dues = models.BooleanField(default=True)
def __str__(self):
return self.enrollment_no
class Room(models.Model):
room_choice = [('S', 'Single Occupancy'), ('D', 'Double Occupancy'), ('P', 'Reserved for Research Scholars'),('B', 'Both Single and Double Occupancy')]
no = models.CharField(max_length=5)
name = models.CharField(max_length=10)
room_type = models.CharField(choices=room_choice, max_length=1, default=None)
vacant = models.BooleanField(default=False)
hostel = models.ForeignKey('Hostel', on_delete=models.CASCADE)
def __str__(self):
return self.name
class Hostel(models.Model):
name = models.CharField(max_length=5)
gender_choices = [('M', 'Male'), ('F', 'Female')]
gender = models.CharField(
choices=gender_choices,
max_length=1,
default=None,
null=True)
course = models.ManyToManyField('Course', default=None, blank=True)
caretaker = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.name
class Course(models.Model):
# if a student has enrollment number iit2017001 then the course code is iit2017
code = models.CharField(max_length=100, default=None)
room_choice = [('S', 'Single Occupancy'), ('D', 'Double Occupancy'), ('P', 'Reserved for Research Scholars'), ('B', 'Both Single and Double Occupancy')]
room_type = models.CharField(choices=room_choice, max_length=1, default='D')
def __str__(self):
return self.code
class Warden(models.Model):
user = models.OneToOneField(
User,
default=None,
null=True,
on_delete=models.CASCADE)
name = models.CharField(max_length=200, null=True)
hostel = models.ForeignKey('Hostel',
default=None,
null=True,
on_delete=models.CASCADE)
def __str__(self):
return self.name
|
py
|
1a579e54a9ee38d31058d3a749a2eae499e2447e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import boto
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import time
from optparse import OptionParser
from sys import stderr
from boto.ec2.blockdevicemapping import BlockDeviceMapping, EBSBlockDeviceType
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(usage="mesos-ec2 [options] <action> <cluster_name>"
+ "\n\n<action> can be: launch, destroy, login, stop, start, get-master",
add_help_option=False)
parser.add_option("-h", "--help", action="help",
help="Show this help message and exit")
parser.add_option("-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: 1)")
parser.add_option("-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option("-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option("-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: m1.large). " +
"WARNING: must be 64 bit, thus small instances won't work")
parser.add_option("-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option("-z", "--zone", default="us-east-1b",
help="Availability zone to launch instances in")
parser.add_option("-a", "--ami", default="ami-f8806a91",
help="Amazon Machine Image ID to use")
parser.add_option("-o", "--os", default="lucid64",
help="OS on the Amazon Machine Image (lucid64 for now)")
parser.add_option("-d", "--download", metavar="SOURCE", default="none",
help="Where to download latest code from: set to 'git' to check out " +
"from git, or 'none' to use the Mesos on the AMI (default)")
parser.add_option("-b", "--branch", default="master",
help="If using git, which branch to check out. Default is 'master'")
parser.add_option("-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option("--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option("-f", "--ft", metavar="NUM_MASTERS", default="1",
help="Number of masters to run. Default is 1. " +
"Greater values cause Mesos to run in FT mode with ZooKeeper.")
parser.add_option("--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Attach a new EBS volume of size SIZE (in GB) to each node as " +
"/vol. The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs.")
(opts, args) = parser.parse_args()
opts.ft = int(opts.ft)
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
if opts.identity_file == None and action in ['launch', 'login']:
print >> stderr, ("ERROR: The -i or --identity-file argument is " +
"required for " + action)
sys.exit(1)
if os.getenv('AWS_ACCESS_KEY_ID') == None:
print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
"must be set")
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') == None:
print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " +
"must be set")
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print "Creating security group " + name
return conn.create_security_group(name, "Mesos EC2 group")
# Wait for a set of launched instances to exit the "pending" state
# (i.e. either to start running or to fail and be terminated)
def wait_for_instances(conn, reservation):
instance_ids = [i.id for i in reservation.instances]
while True:
reservations = conn.get_all_instances(instance_ids)
some_pending = False
for res in reservations:
if len([i for i in res.instances if i.state == 'pending']) > 0:
some_pending = True
break
if some_pending:
time.sleep(5)
else:
for i in reservation.instances:
i.update()
return
# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
return (instance.state in ['pending', 'running', 'stopping', 'stopped'])
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master, slave
# and zookeeper instances (in that order).
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
print "Setting up security groups..."
master_group = get_or_make_group(conn, cluster_name + "-master")
slave_group = get_or_make_group(conn, cluster_name + "-slaves")
zoo_group = get_or_make_group(conn, cluster_name + "-zoo")
if master_group.rules == []: # Group was just now created
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
master_group.authorize(src_group=zoo_group)
master_group.authorize('tcp', 22, 22, '0.0.0.0/0')
master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0')
master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0')
if slave_group.rules == []: # Group was just now created
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
slave_group.authorize(src_group=zoo_group)
slave_group.authorize('tcp', 22, 22, '0.0.0.0/0')
slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0')
slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0')
if zoo_group.rules == []: # Group was just now created
zoo_group.authorize(src_group=master_group)
zoo_group.authorize(src_group=slave_group)
zoo_group.authorize(src_group=zoo_group)
zoo_group.authorize('tcp', 22, 22, '0.0.0.0/0')
zoo_group.authorize('tcp', 2181, 2181, '0.0.0.0/0')
zoo_group.authorize('tcp', 2888, 2888, '0.0.0.0/0')
zoo_group.authorize('tcp', 3888, 3888, '0.0.0.0/0')
# Check if instances are already running in our groups
print "Checking for running cluster..."
reservations = conn.get_all_instances()
for res in reservations:
group_names = [g.id for g in res.groups]
if master_group.name in group_names or slave_group.name in group_names or zoo_group.name in group_names:
active = [i for i in res.instances if is_active(i)]
if len(active) > 0:
print >> stderr, ("ERROR: There are already instances running in " +
"group %s, %s or %s" % (master_group.name, slave_group.name, zoo_group.name))
sys.exit(1)
print "Launching instances..."
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print >> stderr, "Could not find AMI " + opts.ami
sys.exit(1)
# Create block device mapping so that we can add an EBS volume if asked to
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.delete_on_termination = True
block_map["/dev/sdv"] = device
# Launch slaves
slave_res = image.run(key_name = opts.key_pair,
security_groups = [slave_group],
instance_type = opts.instance_type,
placement = opts.zone,
min_count = opts.slaves,
max_count = opts.slaves,
block_device_map = block_map)
print "Launched slaves, regid = " + slave_res.id
# Launch masters
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
master_res = image.run(key_name = opts.key_pair,
security_groups = [master_group],
instance_type = master_type,
placement = opts.zone,
min_count = opts.ft,
max_count = opts.ft,
block_device_map = block_map)
print "Launched master, regid = " + master_res.id
# Launch ZooKeeper nodes if required
if opts.ft > 1:
zoo_res = image.run(key_name = opts.key_pair,
security_groups = [zoo_group],
instance_type = opts.instance_type,
placement = opts.zone,
min_count = 3,
max_count = 3,
block_device_map = block_map)
print "Launched zoo, regid = " + zoo_res.id
else:
zoo_res = None
# Return all the instances
return (master_res, slave_res, zoo_res)
# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of EC2 reservation objects for the master, slave
# and zookeeper instances (in that order).
def get_existing_cluster(conn, opts, cluster_name):
print "Searching for existing cluster " + cluster_name + "..."
reservations = conn.get_all_instances()
master_res = None
slave_res = None
zoo_res = None
for res in reservations:
active = [i for i in res.instances if is_active(i)]
if len(active) > 0:
group_names = [g.id for g in res.groups]
if group_names == [cluster_name + "-master"]:
master_res = res
elif group_names == [cluster_name + "-slaves"]:
slave_res = res
elif group_names == [cluster_name + "-zoo"]:
zoo_res = res
if master_res != None and slave_res != None:
print "Found master regid: " + master_res.id
print "Found slave regid: " + slave_res.id
if zoo_res != None:
print "Found zoo regid: " + zoo_res.id
return (master_res, slave_res, zoo_res)
else:
if master_res == None and slave_res != None:
print "ERROR: Could not find master in group " + cluster_name + "-master"
elif master_res != None and slave_res == None:
print "ERROR: Could not find slaves in group " + cluster_name + "-slaves"
else:
print "ERROR: Could not find any existing cluster"
sys.exit(1)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_res, slave_res, zoo_res, opts, deploy_ssh_key):
print "Deploying files to master..."
deploy_files(conn, "deploy." + opts.os, opts, master_res, slave_res, zoo_res)
master = master_res.instances[0].public_dns_name
if deploy_ssh_key:
print "Copying SSH key %s to master..." % opts.identity_file
ssh(master, opts, 'mkdir -p /root/.ssh')
scp(master, opts, opts.identity_file, '/root/.ssh/id_rsa')
print "Running setup on master..."
ssh(master, opts, "chmod u+x mesos-ec2/setup")
ssh(master, opts, "mesos-ec2/setup %s %s %s" % (opts.os, opts.download, opts.branch))
print "Done!"
# Wait for a whole cluster (masters, slaves and ZooKeeper) to start up
def wait_for_cluster(conn, master_res, slave_res, zoo_res):
print "Waiting for instances to start up..."
time.sleep(5)
wait_for_instances(conn, master_res)
wait_for_instances(conn, slave_res)
if zoo_res != None:
wait_for_instances(conn, zoo_res)
print "Waiting 40 more seconds..."
time.sleep(40)
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
if instance_type in ["m1.xlarge", "c1.xlarge", "m2.xlarge", "cc1.4xlarge"]:
return 4
elif instance_type in ["m1.small", "c1.medium"]:
return 1
else:
return 2
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
def deploy_files(conn, root_dir, opts, master_res, slave_res, zoo_res):
active_master = master_res.instances[0].public_dns_name
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/hdfs/dfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/hdfs/dfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
if zoo_res != None:
zoo_list = '\n'.join([i.public_dns_name for i in zoo_res.instances])
cluster_url = "zoo://" + ",".join(
["%s:2181/mesos" % i.public_dns_name for i in zoo_res.instances])
else:
zoo_list = "NONE"
cluster_url = "1@%s:5050" % active_master
template_vars = {
"master_list": '\n'.join([i.public_dns_name for i in master_res.instances]),
"active_master": active_master,
"slave_list": '\n'.join([i.public_dns_name for i in slave_res.instances]),
"zoo_list": zoo_list,
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs
}
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = (("rsync -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' " +
"'%s/' 'root@%s:/'") % (opts.identity_file, tmp_dir, active_master))
subprocess.check_call(command, shell=True)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
# Copy a file to a given host through scp, throwing an exception if scp fails
def scp(host, opts, local_file, dest_file):
subprocess.check_call(
"scp -q -o StrictHostKeyChecking=no -i %s '%s' 'root@%s:%s'" %
(opts.identity_file, local_file, host, dest_file), shell=True)
# Run a command on a host through ssh, throwing an exception if ssh fails
def ssh(host, opts, command):
subprocess.check_call(
"ssh -t -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(opts.identity_file, host, command), shell=True)
def main():
(opts, action, cluster_name) = parse_args()
conn = boto.connect_ec2()
if action == "launch":
if opts.resume:
(master_res, slave_res, zoo_res) = get_existing_cluster(
conn, opts, cluster_name)
else:
(master_res, slave_res, zoo_res) = launch_cluster(
conn, opts, cluster_name)
wait_for_cluster(conn, master_res, slave_res, zoo_res)
setup_cluster(conn, master_res, slave_res, zoo_res, opts, True)
elif action == "destroy":
response = raw_input("Are you sure you want to destroy the cluster " +
cluster_name + "?\nALL DATA ON ALL NODES WILL BE LOST!!\n" +
"Destroy cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_res, slave_res, zoo_res) = get_existing_cluster(
conn, opts, cluster_name)
print "Terminating master..."
for inst in master_res.instances:
inst.terminate()
print "Terminating slaves..."
for inst in slave_res.instances:
inst.terminate()
if zoo_res != None:
print "Terminating zoo..."
for inst in zoo_res.instances:
inst.terminate()
elif action == "login":
(master_res, slave_res, zoo_res) = get_existing_cluster(
conn, opts, cluster_name)
master = master_res.instances[0].public_dns_name
print "Logging into master " + master + "..."
proxy_opt = ""
if opts.proxy_port != None:
proxy_opt = "-D " + opts.proxy_port
subprocess.check_call("ssh -o StrictHostKeyChecking=no -i %s %s root@%s" %
(opts.identity_file, proxy_opt, master), shell=True)
elif action == "get-master":
(master_res, slave_res, zoo_res) = get_existing_cluster(conn, opts, cluster_name)
print master_res.instances[0].public_dns_name
elif action == "stop":
response = raw_input("Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_res, slave_res, zoo_res) = get_existing_cluster(
conn, opts, cluster_name)
print "Stopping master..."
for inst in master_res.instances:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print "Stopping slaves..."
for inst in slave_res.instances:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
if zoo_res != None:
print "Stopping zoo..."
for inst in zoo_res.instances:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
elif action == "start":
(master_res, slave_res, zoo_res) = get_existing_cluster(
conn, opts, cluster_name)
print "Starting slaves..."
for inst in slave_res.instances:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print "Starting master..."
for inst in master_res.instances:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
if zoo_res != None:
print "Starting zoo..."
for inst in zoo_res.instances:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster(conn, master_res, slave_res, zoo_res)
setup_cluster(conn, master_res, slave_res, zoo_res, opts, False)
elif action == "shutdown":
print >> stderr, ("The shutdown action is no longer available.\n" +
"Use either 'destroy' to delete a cluster and all data on it,\n" +
"or 'stop' to shut down the machines but have them persist if\n" +
"you launched an EBS-backed cluster.")
sys.exit(1)
else:
print >> stderr, "Invalid action: %s" % action
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
|
py
|
1a579f555bb850b92b51b2f6cbed2c32c5d943df
|
import os, datetime
import csv
import pycurl
import sys
import shutil
from openpyxl import load_workbook
import pandas as pd
import download.box
from io import BytesIO
import numpy as np
from download.box import LifespanBox
verbose = True
snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y')
box_temp='/home/petra/UbWinSharedSpace1/boxtemp' #location of local copy of curated data
box = LifespanBox(cache=box_temp)
redcapconfigfile="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/.boxApp/redcapconfig.csv"
#grab stuff from corrected and curated
#get list of filenames
##########################
#folderlistlabels=['WashU_HCAorBoth','WashU_HCD', 'UCLA_HCAorBoth','UCLA_HCD', 'UMN_HCAorBoth','UMN_HCD', 'MGH_HCAorBoth','Harvard_HCD']
#folderlistnums= [82804729845, 82804015457,82807223120, 82805124019, 82803665867, 82805151056,82761770877, 82803734267]
#Harvard
Harv=82803734267
Harvattn=96013516511
MGH2=82761770877
MGHattn=96148925420
WashUD=82804015457
WashUDattn=96147128675
WashUA=82804729845
WashUAattn=96149947498
UMNA=82803665867
UMNAattn=96153923311
UMND=82805151056
UMNDattn=96155708581
UCLAA=82807223120
UCLAAattn=96154919803
UCLAD=82805124019
UCLADattn=96162759127
harvcleandata, harvcleanscore=curatedandcorrected(Harv,Harvattn)
mghcleandata, mghcleanscore=curatedandcorrected(MGH2,MGHattn)
washudcleandata,washudcleanscore=curatedandcorrected(WashUD,WashUDattn)
washuacleandata,washuacleanscore=curatedandcorrected(WashUA,WashUAattn)
umnacleandata,umnacleanscore=curatedandcorrected(UMNA,UMNAattn)
umndcleandata,umndcleanscore=curatedandcorrected(UMND,UMNDattn)
uclaacleandata,uclaacleanscore=curatedandcorrected(UCLAA,UCLAAattn)
ucladcleandata,ucladcleanscore=curatedandcorrected(UCLAD,UCLADattn)
###stopped here
harvcleandata.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
#box.update_file(497579203898,box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
harvcleanscore.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#box.update_file(497530866864,box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
mghcleandata.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
mghcleanscore.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#update box files by hand
washudcleandata.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washudcleanscore.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
washuacleandata.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washuacleanscore.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umnacleandata.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umnacleanscore.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umndcleandata.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umndcleanscore.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
uclaacleandata.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
uclaacleanscore.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
ucladcleandata.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
ucladcleanscore.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#concatenate cleandata for snapshotdate - putting read_csv here in case not loaded into memory
harvcleandata=pd.read_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
mghcleandata=pd.read_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washudcleandata=pd.read_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washuacleandata=pd.read_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umnacleandata=pd.read_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umndcleandata=pd.read_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
uclaacleandata=pd.read_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
ucladcleandata=pd.read_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
allrawdataHCAorBoth=pd.concat([mghcleandata,washuacleandata,umnacleandata,uclaacleandata],axis=0)
allrawdataHCD=pd.concat([harvcleandata,washudcleandata,umndcleandata,ucladcleandata],axis=0)
harvcleanscore=pd.read_csv(box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
mghcleanscore=pd.read_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
washudcleanscore=pd.read_csv(box_temp+'/WashU_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
washuacleanscore=pd.read_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
umnacleanscore=pd.read_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
umndcleanscore=pd.read_csv(box_temp+'/UMN_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
uclaacleanscore=pd.read_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
ucladcleanscore=pd.read_csv(box_temp+'/UCLA_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
allscoresHCAorBoth=pd.concat([mghcleanscore,washuacleanscore,umnacleanscore,uclaacleanscore],axis=0)
allscoresHCD=pd.concat([harvcleanscore,washudcleanscore,umndcleanscore,ucladcleanscore],axis=0)
#make csv
allrawdataHCAorBoth.to_csv(box_temp+'/HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
allrawdataHCD.to_csv(box_temp+'/HCD_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
allscoresHCAorBoth.to_csv(box_temp+'/HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
allscoresHCD.to_csv(box_temp+'/HCD_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
def curatedandcorrected(curatedfolderid,needsattnfolder):
harvardfiles, harvardfolders=foldercontents(curatedfolderid)
#dont grab files that need attention
harvardfolders=harvardfolders.loc[~(harvardfolders.foldername.str.contains('needs_attention'))]
harvardfiles2, harvardfolders2=folderlistcontents(harvardfolders.foldername,harvardfolders.folder_id)
harvardfiles=pd.concat([harvardfiles,harvardfiles2],axis=0,sort=True)
data4process=harvardfiles.loc[~(harvardfiles.filename.str.upper().str.contains('SCORE')==True)]
scores4process=harvardfiles.loc[harvardfiles.filename.str.upper().str.contains('SCORE')==True]
box.download_files(data4process.file_id)
box.download_files(scores4process.file_id)
#trick the catcontents macro to create catable dataset, but dont actually cat until you remove the
#PINS in the corrected file from the curated file
#step1 - separate data4process/scores4process into corrected and old curated data
cdata=data4process.loc[data4process.filename.str.contains('corrected')]
cscores=scores4process.loc[scores4process.filename.str.contains('corrected')]
olddata=data4process.loc[~(data4process.filename.str.contains('corrected'))]
oldscores=scores4process.loc[~(scores4process.filename.str.contains('corrected'))]
#create catable dataset for corrected data
hdatainitcorr=catcontents(cdata,box_temp)
hscoreinitcorr=catcontents(cscores,box_temp)
#get list of ids in this corrected data #60 for Harvard
corrl=findpairs(hdatainitcorr,hscoreinitcorr) #this is the list of ids in both scored and raw corrected data
#create catable dataset for old curated data
hdatainitold=catcontents(olddata,box_temp)
hscoreinitold=catcontents(oldscores,box_temp)
#remove the data with PINS from corrected
hdatainitoldsub=hdatainitold[~(hdatainitold.PIN.isin(corrl))]
hscoreinitoldsub=hscoreinitold[~(hscoreinitold.PIN.isin(corrl))]
#now cat the two datasets together
hdatainit=pd.concat([hdatainitcorr,hdatainitoldsub],axis=0,sort=True) #these have 60 more unique pins than before...good
hscoreinit=pd.concat([hscoreinitcorr,hscoreinitoldsub],axis=0,sort=True) #these have 60 more than before...good
l=findpairs(hdatainit,hscoreinit) #this is the list of ids in both scored and raw data
#set aside those who arebnt in both and those that are in dlist or slist
notbothdatalist=hdatainit[~(hdatainit.PIN.isin(l))]
notbothscorelist=hscoreinit[~(hscoreinit.PIN.isin(l))]
nbs=list(notbothscorelist.PIN.unique())
nbd=list(notbothdatalist.PIN.unique())
hdatainit2=hdatainit[hdatainit.PIN.isin(l)]
hscoreinit2=hscoreinit[hscoreinit.PIN.isin(l)]
#check that this is same as above -- it is
#hdatainit2qc=hdatainit[~(hdatainit.PIN.isin(nbs+nbd))]
#hscoreinit2qc=hscoreinit[~(hscoreinit.PIN.isin(nbs+nbd))]
#find instrument duplications that are not identical
dlist,slist=findwierdos(hdatainit2,hscoreinit2)
dslist=pd.concat([dlist,slist],axis=0)
wierdlist=list(dslist.PIN.unique())
#set aside those who are in the wierdlist
nonidenticaldupdata=hdatainit2.loc[hdatainit2.PIN.isin(wierdlist)]
nonidenticaldupscore=hscoreinit2.loc[hscoreinit2.PIN.isin(wierdlist)]
wierdd=list(dlist.PIN.unique())
wierds=list(slist.PIN.unique())
#so we have the notinboth lists and the wierdlists
#Already set aside the notinbothlists
#if we exclude any wierdlist PINs from both, this should get rid of everything that isnt one-to-one
hdatainit3=hdatainit2.loc[~(hdatainit2.PIN.isin(wierdlist))]
hscoreinit3=hscoreinit2.loc[~(hscoreinit2.PIN.isin(wierdlist))]
#both have 580 unique ids - make them into a list
l3=findpairs(hdatainit3,hscoreinit3) #this is the list of ids in both scored and raw data
dlist,slist=findwierdos(hdatainit3,hscoreinit3)
#now delete any identical duplicates check for issues finding wierdos
if dlist.empty and slist.empty:
hdatainit3=hdatainit3.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
hscoreinit3=hscoreinit3.drop_duplicates(subset={'PIN','Inst'})
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
#export scores and data for all pins in dslist or nbs or nbd with flags
notbothdatalist.to_csv(box_temp+'/Toolbox_notinboth_Data_'+snapshotdate+'.csv')
notbothscorelist.to_csv(box_temp+'/Toolbox_notinboth_Scores_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/Toolbox_notinboth_Data_'+snapshotdate+'.csv',needsattnfolder)
box.upload_file(box_temp+'/Toolbox_notinboth_Scores_'+snapshotdate+'.csv',needsattnfolder)
nonidenticaldupdata.to_csv(box_temp+'/Toolbox_NonidentDups_Data_'+snapshotdate+'.csv')
nonidenticaldupscore.to_csv(box_temp+'/Toolbox_NonidentDups_Scores_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/Toolbox_NonidentDups_Data_'+snapshotdate+'.csv',needsattnfolder)
box.upload_file(box_temp+'/Toolbox_NonidentDups_Scores_'+snapshotdate+'.csv',needsattnfolder)
#last but not least...set aside ids not in REDCap, and IDs that need visit numbers
#get reds from hdatatinit3 (should be same as list from hscoreinit3)
#generate hdatainit4 and hscoreinit4 which is relieved of these ids
hdatainit4=subjectsvisits(hdatainit3)
hscoreinit4=subjectsvisits(hscoreinit3)
mv=hscoreinit4.loc[~(hscoreinit4.visit.isin(['V1','V2','V3','X1','X2','X3']))].copy()
mvs=list(mv.subject.unique()) #list of PINs without visit numbers
check=subjectpairs(hdatainit4,hscoreinit4) #this number will be fewer because V1 and V2 PINs for same subject only counted once)
redids=box.getredcapids()
dfcheck=pd.DataFrame(check,columns=['subject'])
boxids=pd.merge(dfcheck,redids,how='left',on='subject',indicator=True)
reds=list(boxids.loc[boxids._merge=='left_only'].subject) #subjects not in redcap
boxandredcap=boxids.loc[boxids._merge=='both'].subject
#export the otherwise cleanest data ready for snapshotting as the new updated curated file -- then run this for all sites befo
#write code here - has only ids with visit numbers and one to one scores and data correspondence and no wierd duplications
#but check one last time that hdatainit5 and hscoreinit5 is super clean
hdatainit5=hdatainit4.loc[~(hdatainit4.subject.isin(mvs+reds))]
hscoreinit5=hscoreinit4.loc[~(hscoreinit4.subject.isin(mvs+reds))]
#export the lists of ids and reasons they were excluded
df=pd.DataFrame(columns=['reason','affectedIDs'])
df=df.append({'reason': 'PIN In Scores but not Data', 'affectedIDs': nbs}, ignore_index=True)
df=df.append({'reason': 'PIN In Data but not Scores', 'affectedIDs': nbd}, ignore_index=True)
df=df.append({'reason': 'PIN/Instrument Non-identical Duplication in Data', 'affectedIDs': wierdd}, ignore_index=True)
df=df.append({'reason': 'PIN/Instrument Non-identical Duplication in Scores', 'affectedIDs': wierds}, ignore_index=True)
df=df.append({'reason': 'PIN/subject in Scores and Data but missing visit', 'affectedIDs': mvs}, ignore_index=True)
df=df.append({'reason': 'subject in Scores and Data but not REDCap ', 'affectedIDs': reds}, ignore_index=True)
df.to_csv(box_temp+'/List_of_IDs_and_Reasons_they_in_these_files_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/List_of_IDs_and_Reasons_they_in_these_files_'+snapshotdate+'.csv',needsattnfolder)
return hdatainit5,hscoreinit5
#get subject and visit from a PIN in a dataframe
def subjectsvisits(hdatainit3):
hdatainit3['subject']=hdatainit3.PIN.str.strip().str[:10]
hdatainit3['visit']=''
hdatainit3.loc[hdatainit3.PIN.str.contains('v1',case=False),'visit']='V1'
hdatainit3.loc[hdatainit3.PIN.str.contains('v2',case=False),'visit']='V2'
hdatainit3.loc[hdatainit3.PIN.str.contains('v3',case=False),'visit']='V3'
hdatainit3.loc[hdatainit3.PIN.str.contains('x1',case=False),'visit']='X1'
hdatainit3.loc[hdatainit3.PIN.str.contains('x2',case=False),'visit']='X2'
hdatainit3.loc[hdatainit3.PIN.str.contains('x3',case=False),'visit']='X3'
return hdatainit3
#pull id visit combos that arent in both scores and data files
def findpairs(hdatainit,hscoreinit):
pinsinboth=[]
for i in hscoreinit.PIN.unique():
if i in hdatainit.PIN.unique() and isinstance(i,str):
pinsinboth=pinsinboth+[i]
else:
print('the following PINs in scores but not data:')
print(i)
for i in hdatainit.PIN.unique():
if i in hscoreinit.PIN.unique():
pass
else:
print('the following PINs in data but not scores:')
print(i)
return pinsinboth
def subjectpairs(hdatainit,hscoreinit):
pinsinboth=[]
for i in hscoreinit.subject.unique():
if i in hdatainit.subject.unique() and isinstance(i,str):
pinsinboth=pinsinboth+[i]
else:
print('the following subjects in scores but not data:')
print(i)
for i in hdatainit.subject.unique():
if i in hscoreinit.subject.unique():
pass
else:
print('the following subjectss in data but not scores:')
print(i)
return pinsinboth
def findwierdos(hdatainit,hscoreinit):
#compare the two types of sort to identify which files have non-identical duplications
sort1data=hdatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
sort1score=hscoreinit.drop_duplicates(subset={'PIN','Inst'})
sort2data=hdatainit.drop_duplicates(subset=set(hdatainit.columns).difference({'filename','file_id'}))
sort2score=hscoreinit.drop_duplicates(subset=set(hscoreinit.columns).difference({'filename','file_id'}))
s1d=sort1data.groupby('PIN').count()
s2d=sort2data.groupby('PIN').count()
databoth=pd.merge(s1d.reset_index()[['PIN','DeviceID']], s2d.reset_index()[['PIN','DeviceID']],on=['PIN','DeviceID'],how='outer',indicator=True)
wierd_data=databoth.loc[databoth._merge!='both'].rename(columns={'DeviceID':'Number of Rows'})
s1s=sort1score.groupby('PIN').count()
s2s=sort2score.groupby('PIN').count()
scoreboth=pd.merge(s1s.reset_index()[['PIN','DeviceID']], s2s.reset_index()[['PIN','DeviceID']],on=['PIN','DeviceID'],how='outer',indicator=True)
wierd_score=scoreboth.loc[scoreboth._merge!='both'].rename(columns={'DeviceID':'Number of Rows'})
return wierd_data,wierd_score
def catcontents(files,cache_space): #dataframe that has filename and file_id as columns
scoresfiles=files.copy()
scoresinit=pd.DataFrame()
for i in scoresfiles.filename:
filepath=os.path.join(cache_space,i)
filenum=scoresfiles.loc[scoresfiles.filename==i,'file_id']
try:
temp=pd.read_csv(filepath,header=0,low_memory=False)
temp['filename']=i
temp['file_id']=pd.Series(int(filenum.values[0]),index=temp.index)
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
except:
print(filepath+' wouldnt import')
temp=pd.DataFrame()
temp['filename']=pd.Series(i,index=[0])
temp['file_id']=pd.Series(int(filenum.values[0]),index=[0])
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
return scoresinit
def catfromlocal(endpoint_temp,scores2cat): #dataframe that has filenames
scoresfiles=scores2cat.copy()
scoresinit=pd.DataFrame()
for i in scoresfiles.fname:
filepath=os.path.join(endpoint_temp,i)
try:
temp=pd.read_csv(filepath,header=0,low_memory=False)
temp['filename']="endpointmachine/"+i
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
except:
print(filepath+' wouldnt import')
temp=pd.DataFrame()
temp['filename']=pd.Series("endpointmachine/"+i,index=[0])
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
return scoresinit
def folderlistcontents(folderslabels,folderslist):
bdasfilelist=pd.DataFrame()
bdasfolderlist=pd.DataFrame()
for i in range(len(folderslist)):
print('getting file and folder contents of box folder ' +folderslabels[i])
subfiles,subfolders=foldercontents(folderslist[i]) #foldercontents generates two dfs: a df with names and ids of files and a df with names and ids of folders
bdasfilelist=bdasfilelist.append(subfiles)
bdasfolderlist=bdasfolderlist.append(subfolders)
return bdasfilelist,bdasfolderlist
def foldercontents(folder_id):
filelist=[]
fileidlist=[]
folderlist=[]
folderidlist=[]
WUlist=box.client.folder(folder_id=folder_id).get_items(limit=None, offset=0, marker=None, use_marker=False, sort=None, direction=None, fields=None)
for item in WUlist:
if item.type == 'file':
filelist.append(item.name)
fileidlist.append(item.id)
if item.type == 'folder':
folderlist.append(item.name)
folderidlist.append(item.id)
files=pd.DataFrame({'filename':filelist, 'file_id':fileidlist})
folders=pd.DataFrame({'foldername':folderlist, 'folder_id':folderidlist})
return files,folders
|
py
|
1a57a0837691b56763aecf8dedabb20e180c1497
|
from guild import batch_util
# Flags
max_trials = 5
batch_fail = False
trials_fail = ""
batch_run = batch_util.batch_run()
proto_flags = batch_run.batch_proto.get("flags") or {}
trials_count = batch_run.get("max_trials") or max_trials
trials_fail_list = [int(s) for s in str(trials_fail).split(",") if s]
for i in range(trials_count):
trial_flags = dict(proto_flags)
trial_flags["fail"] = (i + 1) in trials_fail_list
try:
batch_util.run_trial(batch_run, trial_flags)
except SystemExit as e:
if batch_fail:
print("BATCH FAIL")
raise SystemExit(2)
|
py
|
1a57a0a278f103eb67c02a722d10cfa1e84adf26
|
#!/usr/bin/env python
# coding: utf-8
# Imports
from luigi.parameter import IntParameter
from luigi import LocalTarget, Task
from luigi.format import UTF8
import datetime
import pandas as pd
import re
import os
from configs.Configurations import Configurations
'''bigrams'''
from gensim.models import Phrases
from collections import Counter
from Preprocessor import Preprocessor
'''Plotting'''
import matplotlib.pyplot as plt
class DataVisualizer(Task):
# Date for Output-File prefix
from datetime import date
date = datetime.datetime.now()
configId = IntParameter(default=0)
# Method to declare the Output-File
def output(self):
prefix = self.date.strftime("%Y-%m-%dT%H%M%S")
return LocalTarget("../output/%s_configID_%s_DataVisualizer.csv" % (prefix, self.configId), format=UTF8)
# Method to define the required Task (Importer)
def requires(self):
return Preprocessor(self.configId)
# Prepare preprocessed data for data evaluation
def run(self):
# use configID from commandline
configs = Configurations().configs[self.configId]
# default values if not set otherwise in configs
n_most_freq_words = 20
n_most_freq_words_per_class = 15
n_most_freq_bigrams_per_class = 15
# set values according to configs
if ("n_most_freq_words" in configs):
n_most_freq_words = configs.get("n_most_freq_words")
if ("n_most_freq_words_per_class" in configs):
n_most_freq_words_per_class = configs.get("n_most_freq_words_per_class")
if ("n_most_freq_bigrams_per_class" in configs):
n_most_freq_bigrams_per_class = configs.get("n_most_freq_bigrams_per_class")
# dictionary holding all data_reports
eval_dict = {}
input_df = pd.read_csv(self.input().path)
cleaned_df = pd.DataFrame(columns=('text', 'cleaned_text', 'url', 'title', 'class'))
# convert document['cleaned_text'] from string to list of words
for index, document in input_df.iterrows():
text = document['cleaned_text']
text = re.sub(r"[',\[\]]", "", text)
wordlist = text.split(" ")
row = [document.text, wordlist, document.url, document.title, document['class']]
cleaned_df.loc[index] = row
# Top n most frequent words for all the articles
cl_text_list = cleaned_df['cleaned_text']
wf = self.word_freq(cl_text_list, n_most_freq_words)
eval_dict['n_frequent_words'] = wf.head(n_most_freq_words)
# Avg word count by category
cleaned_df['word_count'] = cleaned_df['cleaned_text'].apply(self.word_count)
avg_wc = cleaned_df.groupby('class').mean().reset_index()
eval_dict['avg_word_count_per_class'] = avg_wc[['class', 'word_count']]
# Preparing the dataframes
# Splitting the df into the different classes
df_menu = cleaned_df.loc[cleaned_df['class'] == 1]
df_no_menu = cleaned_df.loc[cleaned_df['class'] == 0]
# Top n words by category. Taking bigrams into account
text_menu = df_menu['cleaned_text']
text_no_menu = df_no_menu['cleaned_text']
menu = self.word_freq_bigrams(text_menu, top_n=n_most_freq_words_per_class)
no_menu = self.word_freq_bigrams(text_no_menu, top_n=n_most_freq_words_per_class)
df_wf = pd.concat([menu, no_menu], axis=1)
cols = ['menu', 'count', 'no menu', 'count']
df_wf.columns = cols
eval_dict['n_top_words_per_class'] = df_wf
# Top n bigrams by category
menu_bigrams = self.bigram_freq(text_menu, top_n=n_most_freq_bigrams_per_class)
no_menu_bigrams = self.bigram_freq(text_no_menu, top_n=n_most_freq_bigrams_per_class)
df_bigram_wf = pd.concat([menu_bigrams, no_menu_bigrams], axis=1)
df_bigram_wf.columns = cols
eval_dict['n_top_bigrams_per_class'] = df_bigram_wf
#
# Plot the distribution of word count by article
fig, ax = plt.subplots(1, 2, figsize=(12, 10))
fig.suptitle('Distribution of Word Count by Category', fontsize=15)
bins = 200
ax[0].hist(df_menu['word_count'], bins=bins, color='#41924F')
ax[0].set_title('Menu Category', fontsize=13)
ax[0].set_xlim(0, 150)
ax[1].hist(df_no_menu['word_count'], bins=bins, color='#FFC300')
ax[1].set_title('No Menu Category', fontsize=13)
ax[1].set_xlim(0, 150)
# create data report
data_report = "Data report\n\n"
data_report += "configID: %s\n" % self.configId
#
data_report += "\n"
data_report += "Average word count per class\n"
data_report += str(eval_dict['avg_word_count_per_class'].head())
data_report += "\n"
#
data_report += "\n"
data_report += "Top %s frequent words\n" % n_most_freq_words
data_report += str(eval_dict['n_frequent_words'].head(n_most_freq_words))
data_report += "\n"
#
data_report += "\n"
data_report += "Top %s words by category (Taking bigrams into account)\n" % n_most_freq_words_per_class
data_report += str(eval_dict['n_top_words_per_class'].head(n_most_freq_bigrams_per_class))
data_report += "\n"
#
data_report += "\n"
data_report += "Top %s bigrams by category\n" % n_most_freq_words_per_class
data_report += str(eval_dict['n_top_bigrams_per_class'].head(n_most_freq_bigrams_per_class))
data_report += "\n"
# write report to file
prefix = self.date.strftime("%Y-%m-%dT%H%M%S")
filename = "../data/data_report/configID_%s_%s.txt" % (self.configId, prefix)
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, "w")
f.write(data_report)
f.close()
plt.savefig("../data/data_report/word_dist_by_class_%s.png" % prefix)
plt.close(fig)
# Write .csv-File
with self.output().open("w") as out:
input_df.to_csv(out, encoding="utf-8")
def word_count(self, text):
return len(str(text).split(' '))
def word_freq(self, clean_text_list, top_n):
"""
Word Frequency
"""
flat = [item for sublist in clean_text_list for item in sublist]
with_counts = Counter(flat)
top = with_counts.most_common(top_n)
word = [each[0] for each in top]
num = [each[1] for each in top]
return pd.DataFrame([word, num]).T
def bigram_freq(self, clean_text_list, top_n):
bigram_model = Phrases(clean_text_list, min_count=2, threshold=1)
w_bigrams = bigram_model[clean_text_list]
flat_w_bigrams = [item for sublist in w_bigrams for item in sublist]
bigrams = []
for each in flat_w_bigrams:
if '_' in each:
bigrams.append(each)
counts = Counter(bigrams)
top = counts.most_common(top_n)
word = [each[0] for each in top]
num = [each[1] for each in top]
return pd.DataFrame([word, num]).T
def word_freq_bigrams(self, clean_text_list, top_n):
"""
Word Frequency With Bigrams
"""
bigram_model = Phrases(clean_text_list, min_count=2, threshold=1)
w_bigrams = bigram_model[clean_text_list]
flat_w_bigrams = [item for sublist in w_bigrams for item in sublist]
with_counts = Counter(flat_w_bigrams)
top = with_counts.most_common(top_n)
word = [each[0] for each in top]
num = [each[1] for each in top]
return pd.DataFrame([word, num]).T
|
py
|
1a57a1d59c29bdbcea667fb990c548e9fb9e8934
|
from gaitanalyzer.app import main
if __name__ == '__main__':
main().main_loop()
|
py
|
1a57a1db0faf08acf9bef88837f1a1aed55b6d05
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from GenNet_utils.hase.config import basedir, PYTHON_PATH
os.environ['HASEDIR'] = basedir
if PYTHON_PATH is not None:
for i in PYTHON_PATH: sys.path.insert(0, i)
from GenNet_utils.hase.hdgwas.tools import HaseAnalyser
import argparse
import pandas as pd
import numpy as np
from collections import OrderedDict
if __name__ == "__main__":
os.environ['HASEDIR'] = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser = argparse.ArgumentParser(description='Script analyse results of HASE')
parser.add_argument("-r", required=True, help="path to hase results")
parser.add_argument("-o", "--out", type=str, required=True, help="path to save result folder")
parser.add_argument("-df", type=float, default=None,
help="degree of freedom = ( #subjects in study - #covariates - 1 )")
parser.add_argument("-N", type=int, default=None, help="file number to read")
# TODO (low) add reference panel
args = parser.parse_args()
Analyser = HaseAnalyser()
print(args)
Analyser.DF = args.df
Analyser.result_path = args.r
Analyser.file_number = args.N
results = OrderedDict()
results['RSID'] = np.array([])
results['p_value'] = np.array([])
results['t-stat'] = np.array([])
results['phenotype'] = np.array([])
results['SE'] = np.array([])
results['MAF'] = np.array([])
results['BETA'] = np.array([])
while True:
Analyser.summary()
if Analyser.results is None:
break
print('Saving data...')
if not os.path.exists(os.path.join(args.out, 'results' + '.csv')):
df = pd.DataFrame.from_dict(results)
df.to_csv(os.path.join(args.out, 'results' + '.csv'), sep=" ", index=None)
df = pd.DataFrame.from_dict(Analyser.results)
with open(os.path.join(args.out, 'results' + '.csv'), 'a') as f:
df.to_csv(f, sep=" ", header=False, index=None)
|
py
|
1a57a23ce0890a6f2e74f3713ed3b1da82bfb411
|
import logging
import os
from chirp.common import conf
from chirp.library import album
from chirp.library import audio_file
class Dropbox(object):
def __init__(self, dropbox_path=None):
dropbox_path = dropbox_path or conf.MUSIC_DROPBOX
self._path = dropbox_path
self._dirs = {}
self._all_files = []
# Scan the path and remember all of the subdirectories and
# the MP3 files that they cotain.
for basename in os.listdir(dropbox_path):
child_path = os.path.join(dropbox_path, basename)
if os.path.isdir(child_path):
mp3_names = []
for name in os.listdir(child_path):
# Skip dot-files.
if name.startswith("."):
continue
# Must have the right file extension.
if not name.lower().endswith(".mp3"):
continue
mp3_path = os.path.join(child_path, name)
# Only accept things that look like ordinary files.
if os.path.isfile(mp3_path):
mp3_names.append(name)
self._all_files.append(mp3_path)
self._dirs[child_path] = mp3_names
self._all_albums = None
self._all_tracks = None
def files(self):
return list(self._all_files)
def scan_fast(self):
"""Quickly scan all MP3 files in the dropbox.
Returns:
A dict mapping relative file paths to either audio_file.AudioFile
objects, or to None in the case of a corrupted or unreadable file.
"""
# Note the use of ad-hoc relativization in the path.
return dict(
(mp3_path[len(self._path):], audio_file.scan_fast(mp3_path))
for mp3_path in self._all_files)
def albums(self):
"""Return unstandardized versions of all albums in the dropbox."""
if self._all_albums is None:
self._all_albums = []
for path in sorted(self._dirs):
for au in album.from_directory(path):
self._all_albums.append(au)
yield au
else:
for au in self._all_albums:
yield au
def tracks(self):
"""Do a fast scan and return all tracks in the dropbox."""
if self._all_tracks is None:
self._all_tracks = []
for path in self._dirs:
for alb in album.from_directory(path, fast=True):
self._all_tracks.extend(alb.all_au_files)
return self._all_tracks
|
py
|
1a57a4d45d4dea66746e613fa1919c506309c7cd
|
"""
We need new tests framework
"""
from unittest import TestCase
from slack_entities.entities.channel import Channel
class ChannelTestCase(TestCase):
def test_get(self):
# Getting channel by name
channel_1 = Channel.get(name='test')
# Getting channel by id
channel_2 = Channel.get(id=channel_1.id)
self.assertEqual(channel_1, channel_2)
|
py
|
1a57a5af42eca7efe6558cb22df7bb21a79fb0ca
|
from flask import request, redirect, abort, jsonify, url_for
from CTFd.models import db, Solves, Challenges, WrongKeys, Keys, Tags, Files
from CTFd import utils
import os
import boto3
import hashlib
import string
from werkzeug.utils import secure_filename
def clean_filename(c):
if c in string.ascii_letters + string.digits + '-' + '_' + '.':
return True
def get_s3_conn(app):
access_key_id = utils.get_app_config('ACCESS_KEY_ID')
secret_access_key = utils.get_app_config('SECRET_ACCESS_KEY')
if access_key_id and secret_access_key:
client = boto3.client(
's3',
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key
)
bucket = utils.get_app_config('BUCKET')
return client, bucket
else:
client = boto3.client('s3')
bucket = utils.get_app_config('BUCKET')
return client, bucket
def load(app):
def upload_file(file, chalid):
s3, bucket = get_s3_conn(app)
filename = ''.join(list(filter(clean_filename, secure_filename(file.filename).replace(' ', '_'))))
if len(filename) <= 0:
return False
md5hash = hashlib.md5(os.urandom(64)).hexdigest()
key = md5hash + '/' + filename
s3.upload_fileobj(file, bucket, key)
db_f = Files(chalid, key)
db.session.add(db_f)
db.session.commit()
return db_f.id, (md5hash + '/' + filename)
def delete_file(filename):
s3, bucket = get_s3_conn(app)
f = Files.query.filter_by(id=filename).first_or_404()
key = f.location
s3.delete_object(Bucket=bucket, Key=key)
db.session.delete(f)
db.session.commit()
return True
def file_handler(path):
f = Files.query.filter_by(location=path).first_or_404()
chal = Challenges.query.filter_by(id=f.chal).first()
s3, bucket = get_s3_conn(app)
if utils.is_admin() or chal is None:
key = f.location
url = s3.generate_presigned_url('get_object', Params = {
'Bucket': bucket,
'Key': key, })
return redirect(url)
if utils.user_can_view_challenges():
if not utils.ctftime():
if not utils.view_after_ctf():
abort(403)
if chal.hidden:
abort(403)
key = f.location
url = s3.generate_presigned_url('get_object', Params = {
'Bucket': bucket,
'Key': key, })
return redirect(url)
else:
return redirect(url_for('auth.login'))
utils.upload_file = upload_file
utils.delete_file = delete_file
app.view_functions['views.file_handler'] = file_handler
|
py
|
1a57a5b02781b7035a4e0ca42cf3d2fd21d0110b
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test merkleblock fetch/validation
#
from test_framework.test_framework import DankcoinTestFramework
from test_framework.util import *
class MerkleBlockTest(DankcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
|
py
|
1a57a67f8169ef98cddfa7464a08da0d1cb34ac6
|
import steam
class MyClient(steam.Client):
async def on_ready(self):
print("------------")
print("Logged in as")
print("Username:", self.user)
print("ID:", self.user.id64)
print("Friends:", len(self.user.friends))
print("------------")
async def on_message(self, message):
# we do not want the bot to reply to itself
if message.author == self.user:
return
if message.content.startswith("!hello"):
await message.channel.send(f"Hello {message.author}")
client = MyClient()
client.run("username", "password")
|
py
|
1a57a721081e1d2ad379ece84e9d8ec3bdff952d
|
import os
import logging
from django.conf import settings
log_path = os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'logs'), 'web.log')
# 创建 logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# logger.propagate = 0
formatter = logging.Formatter('%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s - %(message)s')
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
consoleHandler.setLevel(logging.DEBUG)
# 创建一个输出到文件的 handler
fileHandler = logging.FileHandler(log_path, mode='w')
fileHandler.setFormatter(formatter)
fileHandler.setLevel(logging.INFO)
if settings.DEBUG:
fileHandler.setLevel(logging.DEBUG)
else:
fileHandler.setLevel(logging.INFO)
if settings.DEBUG:
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
|
py
|
1a57a726e8fe12352f67edb706816b21c95b2529
|
"""
This module lets you practice the ACCUMULATOR pattern
in its simplest classic forms:
SUMMING: total = total + number
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Mark Hays, Amanda Stouder,
their colleagues and Emily Wilcox.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
run_test_sum_powers()
run_test_sum_powers_in_range()
def run_test_sum_powers():
""" Tests the sum_powers function. """
# ------------------------------------------------------------------
# Done: 2. Implement this function.
# It TESTS the sum_powers function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_powers function:')
print('--------------------------------------------------')
# Test 1
expected = 512
answer = sum_powers(8, 3)
print('expected:', expected)
print('actual :', answer)
# Test 2
expected = 256
answer = sum_powers(4, 4)
print('expected:', expected)
print('actual :', answer)
# Test 3
expected = 128
answer = sum_powers(2, 7)
print('expected:', expected)
print('actual :', answer)
def sum_powers(n, p):
"""
What comes in: A non-negative integer n
and a number p.
What goes out: The sum 1**p + 2**p + 3**p + ... + n**p
for the given numbers n and p. The latter may be any number
(possibly a floating point number, and possibly negative).
Side effects: None.
Examples:
-- sum_powers(5, -0.3) returns about 3.80826
-- sum_powers(100, 0.1) returns about 144.45655
"""
# ------------------------------------------------------------------
# Done: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# No fair running the code of sum_powers to GENERATE
# test cases; that would defeat the purpose of TESTING!
# ------------------------------------------------------------------
total = 0
for k in range(n):
total = (k + 1) ** p
return total
def run_test_sum_powers_in_range():
""" Tests the sum_powers_in_range function. """
# ------------------------------------------------------------------
# Done: 4. Implement this function.
# It TESTS the sum_powers_in_range function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_powers_in_range function:')
print('--------------------------------------------------')
# Test 1
expected = 6084
answer = sum_powers_in_range(4, 75, 2)
print('Expected:', expected)
print('Actual: ', answer)
# Test 2
expected = 7.14
answer = sum_powers_in_range(2, 50, .5)
print('Expected:', expected)
print('Actual: ', answer)
# Test 3
expected = 3.088
answer = sum_powers_in_range(2, 90, .25)
print('Expected:', expected)
print('Actual: ', answer)
def sum_powers_in_range(m, n, p):
"""
What comes in: Non-negative integers m and n, with n >= m,
and a number p.
What goes out: the sum
m**p + (m+1)**p + (m+2)**p + ... + n**p
for the given numbers m, n and p. The latter may be any number
(possibly a floating point number, and possibly negative).
Side effects: None.
Example:
-- sum_powers_in_range(3, 100, 0.1) returns about 142.384776
"""
# ------------------------------------------------------------------
# Done: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# No fair running the code of sum_powers_in_range to GENERATE
# test cases; that would defeat the purpose of TESTING!
# ------------------------------------------------------------------
total = 0
for k in range(n):
total = (m + k) ** p
return total
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.