content
stringlengths 5
1.05M
|
---|
# coding:utf-8
# -- standard library ---------------------------------------------------------
import json
import unittest
# --Modules to test -----------------------------------------------------------
from VestaService.Report import WorkerReport, TaskReport
class UtilsTests(unittest.TestCase):
def test_WorkerReport_update(self):
wr = WorkerReport(nb_tasks=3)
tr = TaskReport(doc_id="secret", tool="screwdriver")
tr.set_succeeded()
wr.update(tr)
self.assertEqual(wr.nb_success, 1,
msg="Error by updating a worker report "
"with a successful task report.")
tr.set_failed(code=444, message="No screwdriver in this drawer.")
wr.update(tr)
self.assertEqual(wr.nb_failures, 1,
msg="Error by updating a worker report "
"with a failed task report.")
self.assertEqual(len(wr.detail), 2,
msg="Error by updating a worker report "
"with a failed task report. "
" Wrong number of tasks.")
def test_WorkerReport_tojson(self):
wr = WorkerReport(nb_tasks=2)
tr = TaskReport(doc_id="secret", tool="screwdriver")
tr.set_succeeded()
wr.update(tr)
tr2 = TaskReport(doc_id="secret", tool="screwdriver")
tr2.set_failed(code=444, message="No screwdriver in this drawer.")
wr.update(tr2)
wr.set_succeeded()
wr.update_completion_ratio()
attended_wrjson_str = ('{"nb_success": 1, "nb_ignores": 0, '
'"nb_failures": 1, "completion_ratio" : 1.0, '
'"nb_tasks" : 2, "status" : "success",'
'"detail": ['
'{"doc_id" : "secret", "step" : "screwdriver", '
'"status" : "success"},'
'{"doc_id" : "secret", "step" : "screwdriver", '
'"status" : "failure", "code":444, '
'"message" : "No screwdriver in this drawer."}'
']'
'}')
wrjson = wr.to_json()
self.assertEqual(json.JSONDecoder().decode(wrjson),
json.JSONDecoder().decode(attended_wrjson_str))
def test_WorkerReport_abbreviated_json(self):
wr = WorkerReport(nb_tasks=2)
tr = TaskReport(doc_id="secret", tool="screwdriver")
tr.set_succeeded()
wr.update(tr)
tr2 = TaskReport(doc_id="secret", tool="screwdriver")
tr2.set_failed(code=444, message="No screwdriver in this drawer.")
wr.update(tr2)
wr.set_succeeded()
wr.update_completion_ratio()
attended_wrjson_str = ('{"nb_success": 1, "nb_ignores": 0, '
'"nb_failures": 1, "completion_ratio" : 1.0, '
'"nb_tasks" : 2, "status" : "success",'
'"full_report_url":"http://mss:1234"'
'}')
wrjson = wr.abbreviated_json("http://mss:1234")
self.assertEqual(json.JSONDecoder().decode(wrjson),
json.JSONDecoder().decode(attended_wrjson_str))
|
#!/usr/bin/env python2.7
# vim : set fileencoding=utf-8 expandtab noai ts=4 sw=4 filetype=python :
"""
embeddedfactor GmbH 2015
Simple python orchestration with fabric, stitch.datastore and fabtools
"""
from __future__ import print_function
import stitch.datastore
from stitch.datastore.utils import resolve
from stitch.execution import execute_step
class Commands(dict):
"""A simple dict extension to store command tuples"""
def __init__(self, lst):
"""
With in the constructor a list of python modules names is given.
The module must contain the typical stitch command structure.
Which means the docstring of the module, option and execute method
as well as the docstring of the execution method are important.
In the options method the subparser for the arguments of the commands
is filled.
Execute is executed with all argeuments in the kw args.
"""
super(Commands, self).__init__()
import importlib
for cmd in lst:
mod = importlib.import_module(cmd)
name = getattr(mod, '__name__', cmd).split('.')[-1]
doc = getattr(mod, '__doc__', "")
options = getattr(mod, 'options', None)
funct = getattr(mod, 'execute', None)
if funct:
self[name] = (doc, options, funct)
def add_yaml_command(self, cmd):
"""Create and register YAML command"""
name = cmd.get("name")
doc = cmd.get("help", "")
description = cmd.get("description", "")
args = cmd.get("arguments", {})
obj = dict(cmd)
def options(parser):
"""Get options from yaml file"""
for k, kwargs in args.items():
if not kwargs:
kwargs = {}
parser.add_argument(*k, **kwargs)
def execute():
"""Execute YAML description"""
global_defaults = dict(stitch.datastore.env)
global_defaults.update(**resolve(obj.get("defaults", {}), **global_defaults))
execute_step(obj, global_defaults)
execute.__doc__ = description
self.add_command(name, doc, options, execute)
def import_from_yaml(self):
"""Import all commands from the stitch.datastore datastore"""
for cmd in stitch.datastore.env.conf.get("command", {}).values():
self.add_yaml_command(cmd)
def add_command(self, name, doc, options, funct):
"""Adding firther commands"""
self[name] = (doc, options, funct)
def help(self, name):
"""Show help of the command with the name name"""
if name in self:
print(self[name][0])
def execute(self, name):
"""Execute the command with name name and all the arguments in the kw dict"""
return self[name][2]()
def parse(self, parser):
"""Create argparser for all subcommands"""
commands = parser.add_subparsers(
title='Available Command',
help="Available commands",
dest='command'
)
for key, cmd in self.items():
subparser = commands.add_parser(key, help=cmd[2].__doc__)
if cmd[1]:
cmd[1](subparser)
return commands
|
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# [email protected].
## TODO 3.1: This file has not been updated to use new (April 2009)
## Progress objects.
from ooflib.SWIG.common import mpitools
from ooflib.SWIG.common import switchboard
from ooflib.common import debug
from ooflib.common import primitives
from ooflib.common import registeredclass
from ooflib.common import utils
from ooflib.common.IO import oofmenu
from ooflib.common.IO import parameter
from ooflib.common.IO import reporter
from ooflib.SWIG.engine import cfiddlenodesbaseParallel
from ooflib.engine import skeletoncontext
from ooflib.engine import deputy
from ooflib.engine import skeletonmodifier
from ooflib.engine.IO import skeletonIPC
from ooflib.engine.IO import skeletonmenu
import math
import random
import string
import sys
import time
cfiddler = cfiddlenodesbaseParallel
_rank = mpitools.Rank()
_size = mpitools.Size()
def _apply(self, oldskeleton, context):
if _rank == 0:
pBar = progressbar.getProgress()
pBar.set_message(self.intro)
return oldskeleton.deputyCopy()
def _postProcess(self, context):
if _rank == 0:
pBar = progressbar.getProgress()
pBar.set_message(self.header)
skeleton = context.getObject()
before = mpitools.Allreduce_DoubleSum(
skeleton.energyTotal(self.criterion.alpha))
if _rank == 0:
if self.pbar_type == "continuous":
n = self.iteration.iterations
self.count = 0
while self.iteration.goodToGo():
self.count += 1
# the context to acquires the writing persmissions
# inside coreProcess.
mpitools.Barrier()
self.coreProcess_parallel(context)
self.updateIteration_parallel()
if _rank == 0:
if pBar.query_stop():
pBar.set_failure()
pBar.set_message("Failed")
# Sending a break signal
mpitools.Isend_Bool(False, range(1,_size))
break
else:
if self.pbar_type == "continuous":
pBar.set_progress(1.0*self.count/n)
# does this ever get displayed?
pBar.set_message("%s%d/%d"
% (self.header, self.count, n))
# Sending a continue signal
mpitools.Isend_Bool(True, range(1,_size))
else:
if not mpitools.Recv_Bool(0):
break
switchboard.notify("skeleton nodes moved", context)
if _rank == 0:
if pBar.query_stop(): # or pBar.get_success() <0:
pBar.set_failure()
pBar.set_message("Failed")
mpitools.Isend_Bool(False, range(1,_size))
return
else:
mpitools.Isend_Bool(True, range(1,_size))
else:
if not mpitools.Recv_Bool(0):
return
mpitools.Barrier()
after = mpitools.Allreduce_DoubleSum(
skeleton.energyTotal(self.criterion.alpha))
# Reporting to the message window
if _rank == 0:
if before:
rate = 100.0*(before-after)/before
else:
rate = 0.0
diffE = after - before
reporter.report("%s deltaE = %10.4e (%6.3f%%)"
% (self.outro, diffE, rate))
#################################################################
##def _coreProcess(self, context):
## global _rank
## fiddler = FiddleNodesParallel(context,
## self.criterion,
## self.targets,
## self.T,
## self.movedPosition)
## context.begin_writing()
## try:
## fiddler.play()
## # Information for upate
## self.deltaE = fiddler.deltaE
## self.totalE = fiddler.totalE
## self.nok = fiddler.nok
## self.nbad = fiddler.nbad
## finally:
## context.end_writing()
## if _rank == 0:
## switchboard.notify("redraw")
#################################################################
def _annealCoreProcess(self, context):
fiddler = AnnealParallel(context,
self.criterion,
self.targets,
self.T,
self.movedPosition)
_commonCoreProcess(self, context, fiddler)
def _smoothCoreProcess(self, context):
fiddler = SmoothParallel(context,
self.criterion,
self.targets,
self.T,
self.movedPosition)
_commonCoreProcess(self, context, fiddler)
def _snapCoreProcess(self, context):
fiddler = SnapParallel(context,
self.criterion,
self.targets,
self.T, #trivial
self.movedPosition)
_commonCoreProcess(self, context, fiddler)
def _commonCoreProcess(method, context, fiddler):
global _rank
context.begin_writing()
try:
fiddler.play()
# Information for upate
method.deltaE = fiddler.deltaE
method.totalE = fiddler.totalE
method.nok = fiddler.nok
method.nbad = fiddler.nbad
finally:
context.end_writing()
if _rank == 0:
switchboard.notify("redraw")
#################################################################
def _updateIteration(self):
deltaE = mpitools.Allreduce_DoubleSum(self.deltaE)
totalE = mpitools.Allreduce_DoubleSum(self.totalE)
nok = mpitools.Allreduce_IntSum(self.nok)
nbad = mpitools.Allreduce_IntSum(self.nbad)
if nok+nbad > 0:
self.iteration.update(deltaE, totalE,
(1.0*nok)/(nok+nbad),
self.count)
else:
self.iteration.update(None, None, None, self.count)
# Custom debug message function with message ID
report_id = 0
def REPORT(*args):
global _rank
global report_id
report_id += 1
values =["###"]+[_rank]+["("]+[report_id]+[")"]+[":"]+list(args)
print string.join(map(str, values), ' ')
sys.stdout.flush()
#################################################################
# Now the schedule
class Scheduler:
def __init__(self, nnodes, allnodes, allshared, fiddler):
global _size
self.nnodes = nnodes
self.allnodes = allnodes
self.allshared = allshared
self.fiddler = fiddler
self.works = [[] for i in range(_size)]
self.pointers = [0]*_size # Current node (list) index
self.completed = [False]*_size # Current work size
def done(self, i):
return self.pointers[i] == self.nnodes[i]
def completedTurn(self, i):
return self.completed[i]
def completedTurns(self, ii): # True, if any.
for i in ii:
if self.completedTurn(i):
return True
return False
def __call__(self, rank):
global _rank
global _size
while self.pointers != self.nnodes:
for i in range(_size):
if self.done(i) or self.completedTurn(i):
continue
# Add something to the queue
node = self.allnodes[i][self.pointers[i]]
shared = self.allshared[i][self.pointers[i]]
# Shared or not
if shared:
# Do the share-holders first
if self.completedTurns(shared):
# If any of shared holders finished its turn,
# the node should be done in the later turn.
continue
for s in shared: # passive work
self.works[s].append((self.fiddler.passiveProcess, i))
self.completed[s] = True
self.works[i].append((self.fiddler.activeProcess, node))
else:
self.works[i].append((self.fiddler.soloProcess, node))
self.completed[i] = True
self.pointers[i] += 1
# Filling the void
for i in range(_size):
if not self.completedTurn(i):
self.works[i].append(None)
self.completed = [False]*_size
return self.works[rank]
#################################################################
# FiddleNodesParallel class will do the majority of fiddling
class FiddleNodesParallel:
def __init__(self, context, criterion, targets, T, mover):
self.context = context
self.skeleton = context.getObject()
self.criterion = criterion
self.targets = targets
self.T = T
self.mover = mover
self.alpha = criterion.alpha
self.totalE = self.skeleton.energyTotal(self.alpha) # initial E
self.deltaE = 0. # improvement
self.nok = 0 # successful ones
self.nbad = 0 # unsuccessful ones
# Data communication
self.move_channel = 1
self.illegal_channel = 2
self.report_channel = 3
self.verdict_channel = 4
# Initialize MPI datatype for communication
cfiddler.tuneFiddle()
def ownNode(self, node):
global _rank
return _rank == node.master()
def passiveProcess(self, stopper):
self.mover.passive(self.skeleton, stopper) # Non-trivial for Smooth
moveData = cfiddler.Recv_MoveData(stopper, tag=self.move_channel)
node = self.skeleton.getNodeWithIndex(moveData.index)
## REPORT("HELPING", stopper, "FOR NODE #", node.remoteIndex(stopper))
# recording energy-before (should this use periodic neighbor Elements?)
neighbors = node.aperiodicNeighborElements(self.skeleton)
reportData = [el.energyHomogeneity(self.skeleton) for el in neighbors]
reportData += [el.energyShape() for el in neighbors]
# move to the position -- self.skeleton is a DeputySkeleton
self.skeleton.moveNodeTo(
node, primitives.Point(moveData.x, moveData.y))
# Check & send illegality
mpitools.Send_Bool(bool(node.illegal()),
stopper,
tag=self.illegal_channel)
# if illegal in any processes, it should be aborted
if mpitools.Recv_Bool(
stopper, tag=self.verdict_channel): # True:continue, False:abort
# recording energy-after
reportData += [el.energyHomogeneity(self.skeleton)
for el in neighbors]
reportData += [el.energyShape() for el in neighbors]
# reporting
mpitools.Send_DoubleVec(reportData,
stopper,
tag=self.report_channel)
# receiving verdivt, True:stay, False:move back
if not mpitools.Recv_Bool(stopper, self.verdict_channel):
self.skeleton.moveNodeBack(node)
else: # Illegal!
self.skeleton.moveNodeBack(node)
## REPORT("DONE HELPING", moveData.master, " ON NODE #",
## node.remoteIndex(moveData.master))
def activeProcess(self, index):
node = self.skeleton.getNodeWithIndex(index)
change = deputy.DeputyProvisionalChanges()
move_to = self.mover(self.skeleton, node)
change.moveNode(node,
move_to,
self.skeleton) # moved the node
# Building data to be sent to sharers.
shared = node.sharedWith()
nodeMoves = []
for s in shared:
nodeMoves.append(
cfiddler.create_movedata(
_rank, # master process
node.remoteIndex(s), # remote index
move_to.x, # x
move_to.y # y
))
# Sending move data to shared processes
cfiddler.Isend_MoveData(nodeMoves, shared,
tag=self.move_channel)
## REPORT("STARTED WORKING ON NODE #", index, "WITH", shared)
# receiving illegality from shared processes
illegal = mpitools.Irecv_Bools(shared,
tag=self.illegal_channel)
if True in illegal or change.illegal(self.skeleton):
self.moveBack(node)
return
else: # continue
mpitools.Isend_Bool(True, shared, tag=self.verdict_channel)
# Receiving report from shared processes
reports = mpitools.Irecv_DoubleVecs(shared, tag=self.report_channel)
homog0 = []
shape0 = []
homog1 = []
shape1 = []
for r in reports:
n = len(r)/4
homog0 += r[:n]
shape0 += r[n:2*n]
homog1 += r[2*n:3*n]
shape1 += r[3*n:4*n]
change.augmentData(homog0, homog1, shape0, shape1)
# Now, the decision time
bestchange = self.criterion([change], self.skeleton)
if bestchange is not None:
self.stay(node, bestchange)
elif self.T > 0. and not self.criterion.hopeless():
diffE = change.deltaE(self.skeleton, self.alpha)
if math.exp(-diffE/self.T) > random.random():
self.stay(node, change)
else:
self.moveBack(node)
else:
self.moveBack(node)
## REPORT("DONE WORKING ON NODE #", index, "WITH", shared)
def stay(self, node, change):
self.nok += 1
self.deltaE += change.deltaE(self.skeleton, self.alpha)
change.accept(self.skeleton)
if node.isShared():
mpitools.Isend_Bool(True, node.sharedWith(),
tag=self.verdict_channel)
def moveBack(self, node):
self.nbad += 1
if node.isShared():
mpitools.Isend_Bool(False, node.sharedWith(),
tag=self.verdict_channel)
def soloProcess(self, index):
## REPORT("WORKING SOLO ON NODE #", index)
node = self.skeleton.getNodeWithIndex(index)
change = deputy.DeputyProvisionalChanges()
change.moveNode(node,
self.mover(self.skeleton, node),
self.skeleton) # moved
# Now, the decision time
bestchange = self.criterion([change], self.skeleton)
if bestchange is not None:
self.stay(node, bestchange)
elif self.T > 0. and not self.criterion.hopeless():
diffE = change.deltaE(self.skeleton, self.alpha)
if math.exp(-diffE/self.T) > random.random():
self.stay(node, change)
else:
self.moveBack(node)
else:
self.moveBack(node)
## REPORT("DONE SOLO ON NODE #", index)
def createWorkOrder(self, activeNodes):
global _rank
global _size
# First the data collection
nnodes = mpitools.Allgather_Int(len(activeNodes))
allnodes = mpitools.Allgather_IntVec(
[n.getIndex() for n in activeNodes], size_known=nnodes)
allsignatures = mpitools.Allgather_IntVec(
[n.nshared() for n in activeNodes], size_known=nnodes)
nshared = [reduce(lambda x,y: x+y, s) for s in allsignatures]
myshared = [n.sharedWith() for n in activeNodes]
myshared = reduce(lambda x,y: x+y, myshared)
allshared = mpitools.Allgather_IntVec(myshared,
size_known=nshared)
def listrize(list, signature):
nsig = len(signature)
count = 0
output = [[] for i in range(nsig)]
for i in range(nsig):
for j in range(signature[i]):
output[i].append(list[count])
count += 1
return output
for i in range(len(allshared)):
allshared[i] = listrize(allshared[i], allsignatures[i])
scheduler = Scheduler(nnodes, allnodes, allshared, self)
self.mywork = scheduler(_rank)
def play(self):
global _rank
global _size
# Get the nodes & shuffle them
activeNodes = self.targets(self.context)
activeNodes = filter(self.ownNode, activeNodes)
random.shuffle(activeNodes)
self.createWorkOrder(activeNodes)
mpitools.Barrier()
for work in self.mywork:
if work is not None: # work = (callback function, arguments)
work[0](work[1])
mpitools.Barrier()
skeletonIPC.collect_pieces(self.skeleton)
self.skeleton.timestamp.increment()
#################################################################
class AnnealParallel(FiddleNodesParallel):
pass
#################################################################
class SmoothParallel(FiddleNodesParallel):
pass
#################################################################
class SnapParallel(FiddleNodesParallel):
def passiveProcess(self, stopper):
# the node to move
myindex = mpitools.Recv_Int(stopper, tag=self.move_channel)
node = self.skeleton.getNodeWithIndex(myindex)
self.mover.passive(self.skeleton, node, stopper)
# getting no. of move candidates
nmoves = mpitools.Recv_Int(stopper, tag=self.move_channel)
for i in range(nmoves):
moveData = cfiddler.Recv_MoveData(stopper, tag=self.move_channel)
## REPORT("HELPING", stopper, "FOR NODE #", node.remoteIndex(stopper))
# recording energy-before
neighbors = node.aperiodicNeighborElements(self.skeleton)
reportData = [el.energyHomogeneity(self.skeleton) for el in neighbors]
reportData += [el.energyShape() for el in neighbors]
# move to the position -- self.skeleton is a DeputySkeleton
self.skeleton.moveNodeTo(
node, primitives.Point(moveData.x, moveData.y))
# Check & send illegality
mpitools.Send_Bool(bool(node.illegal()),
stopper,
tag=self.illegal_channel)
# if illegal in any processes, it should be aborted
if mpitools.Recv_Bool(
stopper, tag=self.verdict_channel): # True:continue, False:abort
# recording energy-after
reportData += [el.energyHomogeneity(self.skeleton)
for el in neighbors]
reportData += [el.energyShape() for el in neighbors]
# reporting
mpitools.Send_DoubleVec(reportData,
stopper,
tag=self.report_channel)
# reset for the next one
self.skeleton.moveNodeBack(node)
## REPORT("DONE HELPING", moveData.master, " ON NODE #",
## node.remoteIndex(moveData.master))
# receiving verdivt, True:stay, False:move back
if mpitools.Recv_Bool(stopper, self.verdict_channel):
x, y = mpitools.Recv_DoubleVec(stopper,
tag=self.move_channel,
size=2)
self.skeleton.moveNodeTo(node, primitives.Point(x, y))
def activeProcess(self, index):
node = self.skeleton.getNodeWithIndex(index)
shared = node.sharedWith()
# send the node (remote) index
for s in shared:
mpitools.Send_Int(node.remoteIndex(s), s, self.move_channel)
move_candidates = self.mover.active(self.skeleton, node)
mpitools.Isend_Int(len(move_candidates), shared, tag=self.move_channel)
changes = []
for mc in move_candidates:
change = deputy.DeputyProvisionalChanges()
change.moveNode(node, mc, self.skeleton) # moved the node
# Building data to be sent to sharers.
nodeMoves = []
for s in shared:
nodeMoves.append(
cfiddler.create_movedata(
_rank, # master process
node.remoteIndex(s), # remote index
mc.x, # x
mc.y # y
))
# Sending move data to shared processes
cfiddler.Isend_MoveData(nodeMoves, shared,
tag=self.move_channel)
## REPORT("STARTED WORKING ON NODE #", index, "WITH", shared)
# receiving illegality from shared processes
illegal = mpitools.Irecv_Bools(shared,
tag=self.illegal_channel)
legal = True not in illegal and not change.illegal(self.skeleton)
mpitools.Isend_Bool(legal, shared, tag=self.verdict_channel)
if not legal:
continue
# Receiving report from shared processes
reports = mpitools.Irecv_DoubleVecs(shared, tag=self.report_channel)
homog0 = []
shape0 = []
homog1 = []
shape1 = []
for r in reports:
n = len(r)/4
homog0 += r[:n]
shape0 += r[n:2*n]
homog1 += r[2*n:3*n]
shape1 += r[3*n:4*n]
change.augmentData(homog0, homog1, shape0, shape1)
changes.append(change)
# Now, the decision time
bestchange = self.criterion(changes, self.skeleton)
if bestchange is not None:
self.nok += 1
self.deltaE += bestchange.deltaE(self.skeleton, self.alpha)
bestchange.accept(self.skeleton)
mpitools.Isend_Bool(True, shared, tag=self.verdict_channel)
theindex = changes.index(bestchange)
x = move_candidates[theindex].x
y = move_candidates[theindex].y
mpitools.Isend_DoubleVec([x, y], shared, tag=self.move_channel, size=2)
else:
self.nbad += 1
mpitools.Isend_Bool(False, shared, tag=self.verdict_channel)
## REPORT("DONE WORKING ON NODE #", index, "WITH", shared)
def soloProcess(self, index):
## REPORT("WORKING SOLO ON NODE #", index)
node = self.skeleton.getNodeWithIndex(index)
move_candidates = self.mover(self.skeleton, node) # list of points
move_candidates = [mc for mc in move_candidates if mc] # removes "None"
changes = []
for mc in move_candidates:
change = deputy.DeputyProvisionalChanges()
change.moveNode(node, mc, self.skeleton) # moved the node
changes.append(change)
# Now, the decision time
bestchange = self.criterion(changes, self.skeleton)
if bestchange is not None:
self.nok += 1
self.deltaE += bestchange.deltaE(self.skeleton, self.alpha)
bestchange.accept(self.skeleton)
else:
self.nbad += 1
## REPORT("DONE SOLO ON NODE #", index)
|
import os
import numpy as np
from datetime import datetime as dt,timedelta
import pandas as pd
import requests
import pickle
from scipy.interpolate import interp1d
from scipy.ndimage import gaussian_filter as gfilt,gaussian_filter1d as gfilt1d
from scipy.ndimage.filters import minimum_filter
import matplotlib.dates as mdates
try:
import matplotlib as mlib
import matplotlib.lines as mlines
import matplotlib.colors as mcolors
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
except:
warnings.warn("Warning: Matplotlib is not installed in your python environment. Plotting functions will not work.")
from .plot import ReconPlot
#Import tools
from .tools import *
from ..utils import *
class ReconDataset:
r"""
Creates an instance of a ReconDataset object containing all recon data for a single storm.
Parameters
----------
stormtuple : tuple or list
Requested storm. Can be either tuple or list containing storm name and year (e.g., ("Matthew",2016)).
save_path : str, optional
Filepath to save recon data in. Recommended in order to avoid having to re-read in the data.
read_path : str, optional
Filepath to read saved recon data from. If specified, "save_path" cannot be passed as an argument.
Returns
-------
Dataset
An instance of ReconDataset, initialized with the following:
* **missiondata** - A dictionary of missions.
Each entry is a dateframe from a single mission.
Dictionary keys are given by mission number and agency (e.g. '15_NOAA').
* **recentered** - A dataframe with all missions concatenated together, and columns 'xdist' and 'ydist'
indicating the distance (km) of the ob from the interpolated center of the storm.
Notes
-----
Recon data is currently read in via Tropical Atlantic. Future releases of Tropycal will incorporate NHC recon archives.
"""
def __init__(self, storm, deltap_thresh=8, mission_url_list=None, save_path="", read_path="", update=False):
#Error check
#if save_path != "" and read_path != "":
# raise ValueError("Error: Cannot read in and save a file at the same time.")
#Create URL prefix for reading in recon data
self.url_prefix = 'http://tropicalatlantic.com/recon/recon.cgi?'
self.storm_obj = storm
self.storm = str(storm.name)
self.year = str(storm.year)
self.deltap_thresh = deltap_thresh
self.UPDATE = update
self.mission_url_list = mission_url_list
#If reading in a pickled file, load it in
if read_path != "":
self.missiondata = pickle.load(open(read_path,'rb'))
if self.UPDATE:
self.missiondata = self.allMissions()
#Otherwise, retrieve all mission data for this storm
else:
self.missiondata = self.allMissions()
#Save mission data as a pickle if necessary
if save_path != "": pickle.dump(self.missiondata,open(save_path,'wb'),-1)
#Convert recon data to storm-centered coordinates
self.recentered = self.recenter()
#print(f'Most recent data: {max(self.recentered['time']):%Y %b %d %H:%M} UTC')
#print(f'Most recent center pass: {max(self.recentered.loc[self.recentered['iscenter']>0]['time']):%Y %b %d %H:%M} UTC')
def getMission(self,agency,mission_num,url_mission=None):
if url_mission is None:
url_mission = f'{self.url_prefix}basin=al&year={self.year}&product=hdob&storm={self.storm}&mission={mission_num}&agency={agency}'
content = np.array(requests.get(url_mission).content.decode("utf-8").split('\n'))
obs = [line.split('\"')[1] for line in content if 'option value=' in line][::-1]
for i,ob in enumerate(obs):
url_ob = url_mission+'&ob='+ob
data = pd.read_html(url_ob)[0]
data = data.rename(columns = {[name for name in data if 'Time' in name][0]:'Time'})
if i==0:
mission = data[:-1]
day0 = dt.strptime(self.year+ob[:5],'%Y%m-%d')
else:
mission = mission.append(data[:-1],ignore_index=True)
def getVar(x,name):
a = np.nan
if x!='-' and '*' not in x and x!='No Wind':
if name == 'Time':
a = x
if name == 'Coordinates':
lat,lon = x.split(' ')
lat = float(lat[:-1])*[1,-1][lat[-1]=='S']
lon = float(lon[:-1])*[1,-1][lon[-1]=='W']
a = np.array((lon,lat))
elif name == 'Aircraft Static Air Pressure':
a=float(x.split(' mb')[0])
elif name == 'Aircraft Geo. Height':
a=float(x.split(' meters')[0].replace(',', ''))
elif name == 'Extrapolated Sfc. Pressure':
a=float(x.split(' mb')[0])
elif name == 'Flight Level Wind (30 sec. Avg.)':
a=x.split(' ')
wdir = float(a[1][:-1])
wspd = float(a[3])
a = np.array((wdir,wspd))
elif name == 'Peak (10 sec. Avg.) Flight Level Wind':
a=float(x.split(' knots')[0])
elif name == 'SFMR Peak (10s Avg.) Sfc. Wind':
a=x.split(' knots')
a=float(a[0])
if name in ['Coordinates','Flight Level Wind (30 sec. Avg.)'] and type(a)==float:
a=np.array([a]*2)
return a
varnames = ['Time','Coordinates','Aircraft Static Air Pressure','Aircraft Geo. Height',
'Extrapolated Sfc. Pressure','Flight Level Wind (30 sec. Avg.)',
'Peak (10 sec. Avg.) Flight Level Wind','SFMR Peak (10s Avg.) Sfc. Wind']
mission = {name:[getVar(item,name) for item in mission[name]] for name in varnames}
for i,t in enumerate(mission['Time']):
mission['Time'][i] = day0.replace(hour=int(t[:2]),minute=int(t[3:5]),second=int(t[6:8]))
if i>0 and (mission['Time'][i]-mission['Time'][i-1]).total_seconds()<0:
mission['Time'][i]+=timedelta(days=1)
data={}
data['lon'],data['lat'] = zip(*mission['Coordinates'])
data['time'] = mission['Time']
data['p_sfc'] = mission['Extrapolated Sfc. Pressure']
data['wdir'],data['wspd'] = zip(*mission['Flight Level Wind (30 sec. Avg.)'])
data['pkwnd'] = mission['Peak (10 sec. Avg.) Flight Level Wind']
data['sfmr'] = mission['SFMR Peak (10s Avg.) Sfc. Wind']
data['plane_p'] = mission['Aircraft Static Air Pressure']
data['plane_z'] = mission['Aircraft Geo. Height']
return_data = pd.DataFrame.from_dict(data)
return_data['time'] = [pd.to_datetime(i) for i in return_data['time']]
#remove nan's for lat/lon coordinates
return_data = return_data.dropna(subset=['lat', 'lon'])
return return_data
def allMissions(self):
url_storm = f'{self.url_prefix}basin=al&year={self.year}&storm={self.storm}&product=hdob'
if self.mission_url_list is None:
missions = pd.read_html(url_storm)[0]
else:
URL_LIST = self.mission_url_list
missions = pd.DataFrame.from_dict({'Agency':['listedurl']*len(URL_LIST),'MissionNumber':[f'{n:02}' for n in range(len(URL_LIST))],'URL':URL_LIST})
if self.UPDATE:
missiondata = self.missiondata
lastMissionNumber = max([int(x.split('_')[0]) for x in list(missiondata.keys())])
idxf = [x for x in missions['MissionNumber']].index(lastMissionNumber)+1
idxf = min([idxf+1,len(missions)]) # update last two missions
else:
idxf = len(missions)
missiondata={}
timer_start = dt.now()
print(f'--> Starting to read in recon missions')
for i_mission in range(0,idxf):
if self.mission_url_list is None:
mission_num = str(missions['MissionNumber'][i_mission]).zfill(2)
agency = ''.join(filter(str.isalpha, missions['Agency'][i_mission]))
missiondata[f'{mission_num}_{agency}'] = self.getMission(agency,mission_num)
else:
mission_num = missions['MissionNumber'][i_mission]
agency = missions['Agency'][i_mission]
url = missions['URL'][i_mission]
missiondata[f'{mission_num}{agency}'] = self.getMission(agency,mission_num,url)
print(f'{mission_num}_{agency}')
print('--> Completed reading in recon missions (%.2f seconds)' % (dt.now()-timer_start).total_seconds())
return missiondata
def find_centers(self,data):
def fill_nan(A):
#Interpolate to fill nan values
A = np.array(A)
inds = np.arange(len(A))
good = np.where(np.isfinite(A))
good_grad = np.gradient(good[0])
if len(good[0])>=3:
f = interp1d(inds[good], A[good],bounds_error=False,kind='quadratic')
B = np.where(np.isfinite(A)[good[0][0]:good[0][-1]+1],
A[good[0][0]:good[0][-1]+1],
f(inds[good[0][0]:good[0][-1]+1]))
return [np.nan]*good[0][0]+list(B)+[np.nan]*(inds[-1]-good[0][-1])
else:
return [np.nan]*len(A)
#Check that sfc pressure spread is big enough to identify real minima
if np.nanpercentile(data['p_sfc'],90)-np.nanpercentile(data['p_sfc'],10)>self.deltap_thresh:
data['p_sfc'][:20]=[np.nan]*20 #NaN out the first 10 minutes of the flight
p_sfc_interp = fill_nan(data['p_sfc']) #Interp p_sfc across missing data
wspd_interp = fill_nan(data['wspd']) #Interp wspd across missing data
#Smooth p_sfc and wspd
p_sfc_smooth = [np.nan]*1+list(np.convolve(p_sfc_interp,[1/3]*3,mode='valid'))+[np.nan]*1
wspd_smooth = [np.nan]*1+list(np.convolve(wspd_interp,[1/3]*3,mode='valid'))+[np.nan]*1
#Add wspd to p_sfc to encourage finding p mins with wspd mins
#and prevent finding p mins in intense thunderstorms
pw_test = np.array(p_sfc_smooth)+np.array(wspd_smooth)*.1
#Find mins in 15-minute windows
imin = np.nonzero(pw_test == minimum_filter(pw_test,30))[0]
#Only use mins if below 15th %ile of mission p_sfc data and when plane p is 500-900mb
imin = [i for i in imin if 800<p_sfc_interp[i]<np.nanpercentile(data['p_sfc'],15) and \
550<data['plane_p'][i]<950]
else:
imin=[]
data['iscenter'] = np.zeros(len(data['p_sfc']))
for i in imin:
j = data.index.values[i]
data['iscenter'][j] = 1
return data
def recenter(self,use='all'):
self.use = use
def stitchMissions():
list_of_dfs=[]
for name in self.missiondata:
if self.use == 'all' or self.use in name:
mission = self.missiondata[name]
tmp = self.find_centers(mission)
list_of_dfs.append( tmp )
data_concat = pd.concat(list_of_dfs,ignore_index=True)
data_chron = data_concat.sort_values(by='time').reset_index(drop=True)
return data_chron
data = stitchMissions()
centers = data.loc[data['iscenter']>0]
if len(centers)<2:
print('Sorry, less than 2 center passes')
else:
print(f'Found {len(centers)} center passes!')
timer_start = dt.now()
#Interpolate center position to time of each ob
f1 = interp1d(mdates.date2num(centers['time']),centers['lon'],fill_value='extrapolate',kind='linear')
interp_clon = f1(mdates.date2num(data['time']))
f2 = interp1d(mdates.date2num(centers['time']),centers['lat'],fill_value='extrapolate',kind='linear')
interp_clat = f2(mdates.date2num(data['time']))
#Get x,y distance of each ob from coinciding interped center position
data['xdist'] = [great_circle( (interp_clat[i],interp_clon[i]), \
(interp_clat[i],data['lon'][i]) ).kilometers* \
[1,-1][int(data['lon'][i] < interp_clon[i])] for i in range(len(data))]
data['ydist'] = [great_circle( (interp_clat[i],interp_clon[i]), \
(data['lat'][i],interp_clon[i]) ).kilometers* \
[1,-1][int(data['lat'][i] < interp_clat[i])] for i in range(len(data))]
print('--> Completed recentering recon data (%.2f seconds)' % (dt.now()-timer_start).total_seconds())
return data
def __getSubTime(self,time):
if isinstance(time,(tuple,list)):
t1=min(time)
t2=max(time)
else:
t1 = time-timedelta(hours=6)
t2 = time+timedelta(hours=6)
subRecon = self.recentered.loc[(self.recentered['time']>=t1) & \
(self.recentered['time']<t2)]
return subRecon
def findMission(self,time):
r"""
Returns the name of a mission or list of missions given a specified time.
Parameters
----------
time : datetime.datetime or list
Datetime object or list of datetime objects representing the time of the requested mission.
Returns
-------
list
The names of any/all missions that had in-storm observations during the specified time.
"""
if isinstance(time,list):
t1=min(time)
t2=max(time)
else:
t1 = t2 = time
selected=[]
for name in self.missiondata:
t_start = min(self.missiondata[name]['time'])
t_end = max(self.missiondata[name]['time'])
if (t_start<t1<t_end) or (t_start<t2<t_end) or (t1<t_start<t2):
selected.append(name)
if len(selected)==0:
print('There were no in-storm recon missions during this time')
return selected
def plot_points(self,recon_select=None,varname='wspd',domain="dynamic",plane_p_range=None,\
ax=None,return_ax=False,cartopy_proj=None,**kwargs):
r"""
Creates a plot of recon data points.
Parameters
----------
recon_select : Requested recon data
pandas.DataFrame or dict,
or string referencing the mission name (e.g. '12_NOAA'),
or datetime or list of start/end datetimes.
varname : str
Variable to plot. Can be one of the following keys in recon_select dataframe:
* **"sfmr"** = SFMR surface wind
* **"wspd"** = 30-second flight level wind (default)
* **"pkwnd"** = 10-second flight level wind
* **"p_sfc"** = extrapolated surface pressure
domain : str
Domain for the plot. Default is "dynamic". Please refer to :ref:`options-domain` for available domain options.
ax : axes
Instance of axes to plot on. If none, one will be generated. Default is none.
return_ax : bool
If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.
cartopy_proj : ccrs
Instance of a cartopy projection to use. If none, one will be generated. Default is none.
Other Parameters
----------------
prop : dict
Customization properties of recon plot. Please refer to :ref:`options-prop-recon-plot` for available options.
map_prop : dict
Customization properties of Cartopy map. Please refer to :ref:`options-map-prop` for available options.
"""
#Pop kwargs
prop = kwargs.pop('prop',{})
map_prop = kwargs.pop('map_prop',{})
#Get plot data
if recon_select is None:
dfRecon = self.recentered
elif isinstance(recon_select,pd.core.frame.DataFrame):
dfRecon = recon_select
elif isinstance(recon_select,dict):
dfRecon = pd.DataFrame.from_dict(recon_select)
elif isinstance(recon_select,str):
dfRecon = self.missiondata[recon_select]
else:
dfRecon = self.__getSubTime(recon_select)
#Apply flight level filter
if plane_p_range is not None:
dfRecon = dfRecon.loc[(dfRecon['plane_p']>min(plane_p_range)) & (dfRecon['plane_p']<max(plane_p_range))]
#Create instance of plot object
self.plot_obj = ReconPlot()
#Create cartopy projection
if cartopy_proj == None:
self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)
cartopy_proj = self.plot_obj.proj
#Plot recon
plot_info = self.plot_obj.plot_points(self.storm_obj,dfRecon,domain,varname=varname,\
ax=ax,return_ax=return_ax,prop=prop,map_prop=map_prop)
#Return axis
if ax != None or return_ax==True:
return plot_info
def plot_hovmoller(self,recon_select=None,varname='wspd',radlim=None,track_dict=None,plane_p_range=None,\
window=6,align='center',ax=None,return_ax=False,**kwargs):
r"""
Creates a hovmoller plot of azimuthally-averaged recon data.
Parameters
----------
recon_select : Requested recon data
pandas.DataFrame or dict,
or datetime or list of start/end datetimes.
varname : Variable to average and plot (e.g. 'wspd').
String
ax : axes
Instance of axes to plot on. If none, one will be generated. Default is none.
return_ax : bool
If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.
cartopy_proj : ccrs
Instance of a cartopy projection to use. If none, one will be generated. Default is none.
Other Parameters
----------------
prop : dict
Customization properties for recon plot. Please refer to :ref:`options-prop-recon-hovmoller` for available options.
"""
#Pop kwargs
prop = kwargs.pop('prop',{})
default_prop = {'cmap':'category','levels':None,'smooth_contourf':False}
for key in default_prop.keys():
if key not in prop.keys():
prop[key]=default_prop[key]
#Get recon data based on recon_select
if recon_select is None:
dfRecon = self.recentered
elif isinstance(recon_select,pd.core.frame.DataFrame):
dfRecon = recon_select
elif isinstance(recon_select,dict):
dfRecon = pd.DataFrame.from_dict(recon_select)
else:
dfRecon = self.__getSubTime(recon_select)
#Apply flight level filter
if plane_p_range is not None:
dfRecon = dfRecon.loc[(dfRecon['plane_p']>min(plane_p_range)) & (dfRecon['plane_p']<max(plane_p_range))]
#Retrieve track dictionary if none is specified
if track_dict is None:
track_dict = self.storm_obj.dict
#Interpolate recon data to a hovmoller
iRecon = interpRecon(dfRecon,varname,radlim,window=window,align=align)
Hov_dict = iRecon.interpHovmoller(track_dict)
#title = get_recon_title(varname) #may not be necessary
#If no contour levels specified, generate levels based on data min and max
if prop['levels'] is None:
prop['levels'] = (np.nanmin(Hov_dict['hovmoller']),np.nanmax(Hov_dict['hovmoller']))
#Retrieve updated contour levels and colormap based on input arguments and variable type
cmap,clevs = get_cmap_levels(varname,prop['cmap'],prop['levels'])
#Retrieve hovmoller times, radii and data
time = Hov_dict['time']
radius = Hov_dict['radius']
vardata = Hov_dict['hovmoller']
#Error check time
time = [dt.strptime((i.strftime('%Y%m%d%H%M')),'%Y%m%d%H%M') for i in time]
#------------------------------------------------------------------------------
#Create plot
plt.figure(figsize=(9,11),dpi=150)
ax = plt.subplot()
#Plot surface category colors individually, necessitating normalizing colormap
if varname in ['vmax','sfmr','fl_to_sfc'] and prop['cmap'] == 'category':
norm = mcolors.BoundaryNorm(clevs,cmap.N)
cf = ax.contourf(radius,time,gfilt1d(vardata,sigma=3,axis=1),
levels=clevs,cmap=cmap,norm=norm)
#Multiple clevels or without smooth contouring
elif len(prop['levels']) > 2 or prop['smooth_contourf'] == False:
cf = ax.contourf(radius,time,gfilt1d(vardata,sigma=3,axis=1),
levels=clevs,cmap=cmap)
#Automatically generated levels with smooth contouring
else:
cf = ax.contourf(radius,time,gfilt1d(vardata,sigma=3,axis=1),
cmap=cmap,levels=np.linspace(min(prop['levels']),max(prop['levels']),256))
ax.axis([0,max(radius),min(time),max(time)])
#Plot colorbar
cbar = plt.colorbar(cf,orientation='horizontal',pad=0.1)
#Format y-label ticks and labels as dates
ax.yaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H'))
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
#Set axes labels
ax.set_ylabel('UTC Time (MM-DD HH)',fontsize=15)
ax.set_xlabel('Radius (km)',fontsize=15)
#--------------------------------------------------------------------------------------
#Generate left and right title strings
title_left, title_right = hovmoller_plot_title(self.storm_obj,Hov_dict,varname)
ax.set_title(title_left,loc='left',fontsize=16,fontweight='bold')
ax.set_title(title_right,loc='right',fontsize=12)
#Return axis
if return_ax:
return ax
#PLOT FUNCTION FOR RECON MAPS
def plot_maps(self,recon_select=None,varname='wspd',track_dict=None,recon_stats=None,domain="dynamic",\
window=6,align='center',radlim=None,plane_p_range=None,ax=None,return_ax=False,savetopath=None,cartopy_proj=None,**kwargs):
#plot_time, plot_mission (only for dots)
r"""
Creates maps of interpolated recon data.
Parameters
----------
recon_select : Requested recon data
pandas.DataFrame or dict,
or string referencing the mission name (e.g. '12_NOAA'),
or datetime or list of start/end datetimes.
varname : str
Variable to plot. Can be one of the following keys in recon_select dataframe:
* **"sfmr"** = SFMR surface wind
* **"wspd"** = 30-second flight level wind (default)
* **"pkwnd"** = 10-second flight level wind
* **"p_sfc"** = extrapolated surface pressure
domain : str
Domain for the plot. Default is "dynamic". Please refer to :ref:`options-domain` for available domain options.
ax : axes
Instance of axes to plot on. If none, one will be generated. Default is none.
return_ax : bool
If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.
cartopy_proj : ccrs
Instance of a cartopy projection to use. If none, one will be generated. Default is none.
Other Parameters
----------------
prop : dict
Customization properties of recon plot. Please refer to :ref:`options-prop-recon-swath` for available options.
map_prop : dict
Customization properties of Cartopy map. Please refer to :ref:`options-map-prop` for available options.
"""
#Pop kwargs
prop = kwargs.pop('prop',{})
map_prop = kwargs.pop('map_prop',{})
#Get plot data
ONE_MAP = False
if recon_select is None:
dfRecon = self.recentered
elif isinstance(recon_select,pd.core.frame.DataFrame):
dfRecon = recon_select
elif isinstance(recon_select,dict):
dfRecon = pd.DataFrame.from_dict(recon_select)
elif isinstance(recon_select,str):
dfRecon = self.missiondata[recon_select]
else:
dfRecon = self.__getSubTime(recon_select)
if not isinstance(recon_select,(tuple,list)):
ONE_MAP = True
MULTIVAR=False
if isinstance(varname,(tuple,list)):
MULTIVAR=True
#Apply flight level filter
if plane_p_range is not None:
dfRecon = dfRecon.loc[(dfRecon['plane_p']>min(plane_p_range)) & (dfRecon['plane_p']<max(plane_p_range))]
if track_dict is None:
track_dict = self.storm_obj.dict
#Error check for time dimension name
if 'time' not in track_dict.keys():
track_dict['time'] = track_dict['date']
if ONE_MAP:
f = interp1d(mdates.date2num(track_dict['time']),track_dict['lon'], fill_value='extrapolate')
clon = f(mdates.date2num(recon_select))
f = interp1d(mdates.date2num(track_dict['time']),track_dict['lat'], fill_value='extrapolate')
clat = f(mdates.date2num(recon_select))
#clon = np.interp(mdates.date2num(recon_select),mdates.date2num(track_dict['time']),track_dict['lon'])
#clat = np.interp(mdates.date2num(recon_select),mdates.date2num(track_dict['time']),track_dict['lat'])
track_dict = {'time':recon_select,'lon':clon,'lat':clat}
if MULTIVAR:
Maps=[]
for v in varname:
iRecon = interpRecon(dfRecon,v,radlim,window=window,align=align)
tmpMaps = iRecon.interpMaps(track_dict)
Maps.append(tmpMaps)
else:
iRecon = interpRecon(dfRecon,varname,radlim,window=window,align=align)
Maps = iRecon.interpMaps(track_dict)
#titlename,units = get_recon_title(varname)
if 'levels' not in prop.keys() or 'levels' in prop.keys() and prop['levels'] is None:
prop['levels'] = np.arange(np.floor(np.nanmin(Maps['maps'])/10)*10,
np.ceil(np.nanmax(Maps['maps'])/10)*10+1,10)
if not ONE_MAP:
if savetopath is True:
savetopath = f'{self.storm}{self.year}_{varname}_maps'
try:
os.system(f'mkdir {savetopath}')
except:
pass
if MULTIVAR:
Maps2 = Maps[1]
Maps = Maps[0]
print(np.nanmax(Maps['maps']),np.nanmin(Maps2['maps']))
figs = []
for i,t in enumerate(Maps['time']):
Maps_sub = {'time':t,'grid_x':Maps['grid_x'],'grid_y':Maps['grid_y'],'maps':Maps['maps'][i],\
'center_lon':Maps['center_lon'][i],'center_lat':Maps['center_lat'][i],'stats':Maps['stats']}
#Create instance of plot object
self.plot_obj = ReconPlot()
#Create cartopy projection
self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)
cartopy_proj = self.plot_obj.proj
#Maintain the same lat / lon dimensions for all dynamic maps
#Determined by the dynamic domain from the first map
if i>0 and domain is 'dynamic':
d1 = {'n':Maps_sub['center_lat']+dlat,\
's':Maps_sub['center_lat']-dlat,\
'e':Maps_sub['center_lon']+dlon,\
'w':Maps_sub['center_lon']-dlon}
else:
d1 = domain
#Plot recon
if MULTIVAR:
Maps_sub1 = dict(Maps_sub)
Maps_sub2 = dict(Maps_sub)
Maps_sub = [Maps_sub1,Maps_sub2]
Maps_sub[1]['maps'] = Maps2['maps'][i]
print(np.nanmax(Maps_sub[0]['maps']),np.nanmin(Maps_sub[1]['maps']))
plot_ax,d0 = self.plot_obj.plot_maps(self.storm_obj,Maps_sub,varname,recon_stats,\
domain=d1,ax=ax,return_ax=True,return_domain=True,prop=prop,map_prop=map_prop)
#Get domain dimensions from the first map
if i==0:
dlat = .5*(d0['n']-d0['s'])
dlon = .5*(d0['e']-d0['w'])
figs.append(plot_ax)
if savetopath is not None:
plt.savefig(f'{savetopath}/{t.strftime("%Y%m%d%H%M")}',bbox_inches='tight')
plt.close()
if savetopath is None:
return figs
else:
#Create instance of plot object
self.plot_obj = ReconPlot()
#Create cartopy projection
if cartopy_proj is None:
self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)
cartopy_proj = self.plot_obj.proj
#Plot recon
plot_info = self.plot_obj.plot_maps(self.storm_obj,Maps,varname,recon_stats,\
domain,ax,return_ax,prop=prop,map_prop=map_prop)
#Return axis
if ax is not None or return_ax:
return plot_info
#PLOT FUNCTION FOR RECON SWATH
def plot_swath(self,recon_select=None,varname='wspd',swathfunc=None,track_dict=None,radlim=None,\
domain="dynamic",plane_p_range=None,ax=None,return_ax=False,cartopy_proj=None,**kwargs):
r"""
Creates a map plot of a swath of interpolated recon data.
Parameters
----------
recon_select : Requested recon data
pandas.DataFrame or dict,
or string referencing the mission name (e.g. '12_NOAA'),
or datetime or list of start/end datetimes.
varname : str
Variable to plot. Can be one of the following keys in recon_select dataframe:
* **"sfmr"** = SFMR surface wind
* **"wspd"** = 30-second flight level wind (default)
* **"pkwnd"** = 10-second flight level wind
* **"p_sfc"** = extrapolated surface pressure
swathfunc : function
Function to operate on interpolated recon data.
e.g., np.max, np.min, or percentile function
domain : str
Domain for the plot. Default is "dynamic". Please refer to :ref:`options-domain` for available domain options.
ax : axes
Instance of axes to plot on. If none, one will be generated. Default is none.
return_ax : bool
If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.
cartopy_proj : ccrs
Instance of a cartopy projection to use. If none, one will be generated. Default is none.
Other Parameters
----------------
prop : dict
Customization properties of recon plot. Please refer to :ref:`options-prop-recon-swath` for available options.
map_prop : dict
Customization properties of Cartopy map. Please refer to :ref:`options-map-prop` for available options.
"""
#Pop kwargs
prop = kwargs.pop('prop',{})
map_prop = kwargs.pop('map_prop',{})
#Get plot data
if recon_select is None:
dfRecon = self.recentered
elif isinstance(recon_select,pd.core.frame.DataFrame):
dfRecon = recon_select
elif isinstance(recon_select,dict):
dfRecon = pd.DataFrame.from_dict(recon_select)
elif isinstance(recon_select,str):
dfRecon = self.missiondata[recon_select]
else:
dfRecon = self.__getSubTime(recon_select)
#Apply flight level filter
if plane_p_range is not None:
dfRecon = dfRecon.loc[(dfRecon['plane_p']>min(plane_p_range)) & (dfRecon['plane_p']<max(plane_p_range))]
if track_dict is None:
track_dict = self.storm_obj.dict
if swathfunc is None:
if varname == 'p_sfc':
swathfunc = np.min
else:
swathfunc = np.max
iRecon = interpRecon(dfRecon,varname)
Maps = iRecon.interpMaps(track_dict,interval=.2)
#Create instance of plot object
self.plot_obj = ReconPlot()
#Create cartopy projection
if cartopy_proj == None:
self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)
cartopy_proj = self.plot_obj.proj
#Plot recon
plot_info = self.plot_obj.plot_swath(self.storm_obj,Maps,varname,swathfunc,track_dict,radlim,\
domain,ax,return_ax,prop=prop,map_prop=map_prop)
#Return axis
if ax != None or return_ax==True:
return plot_info
|
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate,\
GlobalAveragePooling2D, add, UpSampling2D, Dropout, Activation
from tensorflow.keras.models import Model
def unet(num_channels,
ds=2,
lr=1e-4,
verbose=0,):
inputs = Input((None, None, num_channels))
conv1 = Conv2D(64//ds, 3, activation='relu', padding='same', )(inputs)
conv1 = Conv2D(64//ds, 3, activation='relu', padding='same', )(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128//ds, 3, activation='relu', padding='same',)(pool1)
conv2 = Conv2D(128//ds, 3, activation='relu', padding='same', )(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256//ds, 3, activation='relu', padding='same', )(pool2)
conv3 = Conv2D(256//ds, 3, activation='relu', padding='same', )(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512//ds, 3, activation='relu', padding='same', )(pool3)
conv4 = Conv2D(512//ds, 3, activation='relu', padding='same', )(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024//ds, 3, activation='relu', padding='same', )(pool4)
conv5 = Conv2D(1024//ds, 3, activation='relu', padding='same', )(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512//ds, 2, activation='relu', padding='same')(UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512//ds, 3, activation='relu', padding='same')(merge6)
conv6 = Conv2D(512//ds, 3, activation='relu', padding='same')(conv6)
up7 = Conv2D(256//ds, 2, activation='relu', padding='same')(UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256//ds, 3, activation='relu', padding='same')(merge7)
conv7 = Conv2D(256//ds, 3, activation='relu', padding='same')(conv7)
up8 = Conv2D(128//ds, 2, activation='relu', padding='same')(UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128//ds, 3, activation='relu', padding='same')(merge8)
conv8 = Conv2D(128//ds, 3, activation='relu', padding='same')(conv8)
up9 = Conv2D(64//ds, 2, activation='relu', padding='same')(UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64//ds, 3, activation='relu', padding='same')(merge9)
conv9 = Conv2D(64//ds, 3, activation='relu', padding='same')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', )(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
return model
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView):
""" Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as function(get,post,patcth, put delete)',
'is similar to a traditional Django View',
'gives you the mosst control over you applicaiton logic',
'is mapped manually to URLs',
]
return Response({'an_apiview': an_apiview})
def post(self,request):
"""Create a hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message' : message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""handle updating an bject"""
return Response({'method' : 'PUT'})
def patch(self, request, pk=None):
"""Handle a partial update of an object"""
return Response({'method': ' PATCH'})
def delete(self,request,pk=None):
"""delete an object"""
return Response({'method':'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""test viewwset"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""return a hello message"""
a_viewset = [
'uses actions (list,create,retrieve,updte,partial_updare)'
]
return Response({'a_viewset': a_viewset})
def create(self,request):
"""create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello{name}!'
return Response({'message':message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""henadle getting an object by its ID"""
return Response({'hhtp_method':'GET'})
def update(self,request,pk=None):
"""handle updating an object"""
return Response({'http_metohd':'PUT'})
def partial_update(self,request,pk=None):
"""handle updating part of an object"""
return Response({'http_method':'PATCH'})
def destroy(self,request, pk=None):
"""handle removing an object"""
return Response({'http_method':'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name','email',)
class UserLoginApiView(ObtainAuthToken):
""""Handle creating user authenticaiton tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
|
#!/usr/bin/env python
"""A widget to display changing values in real time as a strip chart
Known issues:
Matplotlib's defaults present a number of challenges for making a nice strip chart display.
Here are manual workarounds for some common problems:
- Memory Leak:
Matplotlib 1.0.0 has a memory leak in canvas.draw(), at least when using TgAgg:
<https://sourceforge.net/tracker/?func=detail&atid=560720&aid=3124990&group_id=80706>
Unfortunately canvas.draw is only way to update the display after altering the x/time axis.
Thus every StripChartWdg will leak memory until the matplotlib bug is fixed;
the best you can do is reduce the leak rate by increasing updateInterval.
- Jumping Ticks:
By default the major time ticks and grid jump to new values as time advances. I haven't found an
automatic way to keep them steady, but you can do it manually by following these examples:
# show a major tick every 10 seconds on even 10 seconds
stripChart.xaxis.set_major_locator(matplotlib.dates.SecondLocator(bysecond=range(0, 60, 10)))
# show a major tick every 5 seconds on even 5 minutes
stripChart.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=range(0, 60, 5)))
- Reducing The Spacing Between Subplots:
Adjacent subplots are rather widely spaced. You can manually shrink the spacing but then
the major Y labels will overlap. Here is a technique that includes "pruning" the top major tick label
from each subplot and then shrinking the subplot horizontal spacing:
for subplot in stripChartWdg.subplotArr:
subplot.yaxis.get_major_locator().set_params(prune = "upper")
stripChartWdg.figure.subplots_adjust(hspace=0.1)
- Truncated X Axis Labels:
The x label is truncated if the window is short, due to poor auto-layout on matplotlib's part.
Also the top and sides may have too large a margin. Tony S Yu provided code that should solve the
issue automatically, but I have not yet incorporated it. You can try the following manual tweak:
(values are fraction of total window height or width, so they must be in the range 0-1):
stripChartWdg.figure.subplots_adjust(bottom=0.15) # top=..., left=..., right=...
Unfortunately, values that look good at one window size may not be suitable at another.
- Undesirable colors and font sizes:
If you are unhappy with the default choices of font size and background color
you can edit the .matplotlibrc file or make settings programmatically.
Some useful programmatic settings:
# by default the background color of the outside of the plot is gray; set using figure.facecolor:
matplotlib.rc("figure", facecolor="white")
# by default legends have large text; set using legend.fontsize:
matplotlib.rc("legend", fontsize="medium")
Requirements:
- Requires matplotlib built with TkAgg support
Acknowledgements:
I am grateful to Benjamin Root, Tony S Yu and others on matplotlib-users
for advice on tying the x axes together and improving the layout.
History:
2010-09-29 ROwen
2010-11-30 ROwen Fixed a memory leak (Line._purgeOldData wasn't working correctly).
2010-12-10 ROwen Document a memory leak caused by matplotlib's canvas.draw.
2010-12-23 ROwen Backward-incompatible changes:
- addPoint is now called on the object returned by addLine, not StripChartWdg.
This eliminate the need to give lines unique names.
- addPoint is silently ignored if y is None
- addLine and addConstantLine have changed:
- There is no "name" argument; use label if you want a name that shows up in legends.
- The label does not have to be unique.
- They return an object.
Added removeLine method.
2010-12-29 ROwen Document useful arguments for addLine.
2012-05-31 ROwen Add a clear method to StripChartWdg and _Line.
2012-06-04 ROwen Reduce CPU usage by doing less work if not visible (not mapped).
2012-07-09 ROwen Modified to use opscore.RO.TkUtil.Timer.
2012-09-18 ROwen Explicitly import matplotlib.dates to avoid a problem with matplotlib 1.2.0rc1
2015-09-24 ROwen Replace "== None" with "is None" to modernize the code.
2015-11-03 ROwen Replace "!= None" with "is not None" to modernize the code.
"""
__all__ = ["StripChartWdg"]
import bisect
import datetime
import time
import numpy
from six.moves import tkinter
import matplotlib
import matplotlib.dates
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from opscore.RO.TkUtil import Timer
class StripChartWdg(tkinter.Frame):
"""A widget to changing values in real time as a strip chart
Usage Hints:
- For each variable quantity to display:
- Call addLine once to specify the quantity
- Call addPoint for each new data point you wish to display
- For each constant line (e.g. limit) to display call addConstantLine
- To make sure a plot includes one or two y values (e.g. 0 or a range of values) call showY
- To manually scale a Y axis call setYLimits (by default all y axes are autoscaled).
- All supplied times are POSIX timestamps (e.g. as supplied by time.time()).
You may choose the kind of time displayed on the time axis (e.g. UTC or local time) using cnvTimeFunc
and the format of that time using dateFormat.
Known Issues:
matplotlib's defaults present a number of challenges for making a nice strip chart display.
Some issues and manual solutions are discussed in the main file's document string.
Potentially Useful Attributes:
- canvas: the matplotlib FigureCanvas
- figure: the matplotlib Figure
- subplotArr: list of subplots, from top to bottom; each is a matplotlib Subplot object,
which is basically an Axes object but specialized to live in a rectangular grid
- xaxis: the x axis shared by all subplots
"""
def __init__(self,
master,
timeRange = 3600,
numSubplots = 1,
width = 8,
height = 2,
showGrid = True,
dateFormat = "%H:%M:%S",
updateInterval = None,
cnvTimeFunc = None,
):
"""Construct a StripChartWdg with the specified time range
Inputs:
- master: Tk parent widget
- timeRange: range of time displayed (seconds)
- width: width of graph in inches
- height: height of graph in inches
- numSubplots: the number of subplots
- showGrid: if True a grid is shown
- dateFormat: format for major axis labels, using time.strftime format
- updateInterval: now often the time axis is updated (seconds); if None a value is calculated
- cnvTimeFunc: a function that takes a POSIX timestamp (e.g. time.time()) and returns matplotlib days;
typically an instance of TimeConverter; defaults to TimeConverter(useUTC=False)
"""
tkinter.Frame.__init__(self, master)
self._timeRange = timeRange
self._isVisible = self.winfo_ismapped()
self._isFirst = True
if updateInterval is None:
updateInterval = max(0.1, min(5.0, timeRange / 2000.0))
self.updateInterval = float(updateInterval)
# print "updateInterval=", self.updateInterval
if cnvTimeFunc is None:
cnvTimeFunc = TimeConverter(useUTC=False)
self._cnvTimeFunc = cnvTimeFunc
# how many time axis updates occur before purging old data
self._maxPurgeCounter = max(1, int(0.5 + (5.0 / self.updateInterval)))
self._purgeCounter = 0
self.figure = matplotlib.figure.Figure(figsize=(width, height), frameon=True)
self.canvas = FigureCanvasTkAgg(self.figure, self)
self.canvas.get_tk_widget().grid(row=0, column=0, sticky="news")
self.canvas.mpl_connect('draw_event', self._handleDrawEvent)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
bottomSubplot = self.figure.add_subplot(numSubplots, 1, numSubplots)
self.subplotArr = [self.figure.add_subplot(numSubplots, 1, n+1, sharex=bottomSubplot) \
for n in range(numSubplots-1)] + [bottomSubplot]
if showGrid:
for subplot in self.subplotArr:
subplot.grid(True)
self.xaxis = bottomSubplot.xaxis
bottomSubplot.xaxis_date()
self.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(dateFormat))
# dictionary of constant line name: (matplotlib Line2D, matplotlib Subplot)
self._constLineDict = dict()
for subplot in self.subplotArr:
subplot._scwLines = [] # a list of contained _Line objects;
# different than the standard lines property in that:
# - lines contains Line2D objects
# - lines contains constant lines as well as data lines
subplot._scwBackground = None # background for animation
subplot.label_outer() # disable axis labels on all but the bottom subplot
subplot.set_ylim(auto=True) # set auto scaling for the y axis
self.bind("<Map>", self._handleMap)
self.bind("<Unmap>", self._handleUnmap)
self._timeAxisTimer = Timer()
self._updateTimeAxis()
def addConstantLine(self, y, subplotInd=0, **kargs):
"""Add a new constant to plot
Inputs:
- y: value of constant line
- subplotInd: index of subplot
- All other keyword arguments are sent to the matplotlib Line2D constructor
to control the appearance of the data. See addLine for more information.
"""
subplot = self.subplotArr[subplotInd]
line2d = subplot.axhline(y, **kargs)
yMin, yMax = subplot.get_ylim()
if subplot.get_autoscaley_on() and numpy.isfinite(y) and not (yMin <= y <= yMax):
subplot.relim()
subplot.autoscale_view(scalex=False, scaley=True)
return line2d
def addLine(self, subplotInd=0, **kargs):
"""Add a new quantity to plot
Inputs:
- subplotInd: index of subplot
- All other keyword arguments are sent to the matplotlib Line2D constructor
to control the appearance of the data. Useful arguments include:
- label: name of line (displayed in a Legend)
- color: color of line
- linestyle: style of line (defaults to a solid line); "" for no line, "- -" for dashed, etc.
- marker: marker shape, e.g. "+"
Please do not attempt to control other sorts of line properties, such as its data.
Arguments to avoid include: animated, data, xdata, ydata, zdata, figure.
"""
subplot = self.subplotArr[subplotInd]
return _Line(
subplot = subplot,
cnvTimeFunc = self._cnvTimeFunc,
wdg = self,
**kargs)
def clear(self):
"""Clear data in all non-constant lines
"""
for subplot in self.subplotArr:
for line in subplot._scwLines:
line.clear()
def getDoAutoscale(self, subplotInd=0):
return self.subplotArr[subplotInd].get_autoscaley_on()
def removeLine(self, line):
"""Remove an existing line added by addLine or addConstantLine
Raise an exception if the line is not found
"""
if isinstance(line, _Line):
# a _Line object needs to be removed from _scwLines as well as the subplot
line2d = line.line2d
subplot = line.subplot
subplot._scwLines.remove(line)
else:
# a constant line is just a matplotlib Line2D instance
line2d = line
subplot = line.axes
subplot.lines.remove(line2d)
if subplot.get_autoscaley_on():
subplot.relim()
subplot.autoscale_view(scalex=False, scaley=True)
self.canvas.draw()
def setDoAutoscale(self, doAutoscale, subplotInd=0):
"""Turn autoscaling on or off for the specified subplot
You can also turn off autoscaling by calling setYLimits.
"""
doAutoscale = bool(doAutoscale)
subplot = self.subplotArr[subplotInd]
subplot.set_ylim(auto=doAutoscale)
if doAutoscale:
subplot.relim()
subplot.autoscale_view(scalex=False, scaley=True)
def setYLimits(self, minY, maxY, subplotInd=0):
"""Set y limits for the specified subplot and disable autoscaling.
Note: if you want to autoscale with a minimum range, use showY.
"""
self.subplotArr[subplotInd].set_ylim(minY, maxY, auto=False)
def showY(self, y0, y1=None, subplotInd=0):
"""Specify one or two values to always show in the y range.
Inputs:
- subplotInd: index of subplot
- y0: first y value to show
- y1: second y value to show; None to omit
Warning: setYLimits overrides this method (but the values are remembered in case you turn
autoscaling back on).
"""
subplot = self.subplotArr[subplotInd]
yMin, yMax = subplot.get_ylim()
if y1 is not None:
yList = [y0, y1]
else:
yList = [y0]
doRescale = False
for y in yList:
subplot.axhline(y, linestyle=" ")
if subplot.get_autoscaley_on() and numpy.isfinite(y) and not (yMin <= y <= yMax):
doRescale = True
if doRescale:
subplot.relim()
subplot.autoscale_view(scalex=False, scaley=True)
def _handleDrawEvent(self, event=None):
"""Handle draw event
"""
# print "handleDrawEvent"
for subplot in self.subplotArr:
subplot._scwBackground = self.canvas.copy_from_bbox(subplot.bbox)
for line in subplot._scwLines:
subplot.draw_artist(line.line2d)
self.canvas.blit(subplot.bbox)
def _handleMap(self, evt):
"""Handle map event (widget made visible)
"""
self._isVisible = True
self._handleDrawEvent()
self._updateTimeAxis()
def _handleUnmap(self, evt):
"""Handle unmap event (widget made not visible)
"""
self._isVisible = False
def _updateTimeAxis(self):
"""Update the time axis; calls itself
"""
tMax = time.time() + self.updateInterval
tMin = tMax - self._timeRange
minMplDays = self._cnvTimeFunc(tMin)
maxMplDays = self._cnvTimeFunc(tMax)
self._purgeCounter = (self._purgeCounter + 1) % self._maxPurgeCounter
doPurge = self._purgeCounter == 0
if doPurge:
for subplot in self.subplotArr:
for line in subplot._scwLines:
line._purgeOldData(minMplDays)
if self._isVisible or self._isFirst:
for subplot in self.subplotArr:
subplot.set_xlim(minMplDays, maxMplDays)
if doPurge:
if subplot.get_autoscaley_on():
# since data is being purged the y limits may have changed
subplot.relim()
subplot.autoscale_view(scalex=False, scaley=True)
self._isFirst = False
self.canvas.draw()
self._timeAxisTimer.start(self.updateInterval, self._updateTimeAxis)
class _Line(object):
"""A line (trace) on a strip chart representing some varying quantity
Attributes that might be useful:
- line2d: the matplotlib.lines.Line2D associated with this line
- subplot: the matplotlib Subplot instance displaying this line
- cnvTimeFunc: a function that takes a POSIX timestamp (e.g. time.time()) and returns matplotlib days;
typically an instance of TimeConverter; defaults to TimeConverter(useUTC=False)
"""
def __init__(self, subplot, cnvTimeFunc, wdg, **kargs):
"""Create a line
Inputs:
- subplot: the matplotlib Subplot instance displaying this line
- cnvTimeFunc: a function that takes a POSIX timestamp (e.g. time.time()) and returns matplotlib days;
typically an instance of TimeConverter; defaults to TimeConverter(useUTC=False)
- wdg: parent strip chart widget; used to test visibility
- **kargs: keyword arguments for matplotlib Line2D, such as color
"""
self.subplot = subplot
self._cnvTimeFunc = cnvTimeFunc
self._wdg = wdg
# do not use the data in the Line2D because in some versions of matplotlib
# line.get_data returns numpy arrays, which cannot be appended to
self._tList = []
self._yList = []
self.line2d = matplotlib.lines.Line2D([], [], animated=True, **kargs)
self.subplot.add_line(self.line2d)
self.subplot._scwLines.append(self)
def addPoint(self, y, t=None):
"""Append a new data point
Inputs:
- y: y value; if None the point is silently ignored
- t: time as a POSIX timestamp (e.g. time.time()); if None then "now"
"""
if y is None:
return
if t is None:
t = time.time()
mplDays = self._cnvTimeFunc(t)
self._tList.append(mplDays)
self._yList.append(y)
self._redraw()
def _redraw(self):
"""Redraw the graph
"""
self.line2d.set_data(self._tList, self._yList)
if not self._wdg.winfo_ismapped():
return
if len(self._yList) > 0:
# see if limits need updating to include last point
lastY = self._yList[-1]
if self.subplot.get_autoscaley_on() and numpy.isfinite(lastY):
yMin, yMax = self.subplot.get_ylim()
self.line2d.set_data(self._tList, self._yList)
if not (yMin <= lastY <= yMax):
self.subplot.relim()
self.subplot.autoscale_view(scalex=False, scaley=True)
return # a draw event was triggered
# did not trigger redraw event so do it now
if self.subplot._scwBackground:
canvas = self.subplot.figure.canvas
canvas.restore_region(self.subplot._scwBackground)
for line in self.subplot._scwLines:
self.subplot.draw_artist(line.line2d)
canvas.blit(self.subplot.bbox)
def clear(self):
"""Clear all data
"""
self._tList = []
self._yList = []
self._redraw()
def _purgeOldData(self, minMplDays):
"""Purge data with t < minMplDays
Inputs:
- minMplDays: time before which to delete data (matpotlib days)
Warning: does not update the display (the caller must do that)
"""
if not self._tList:
return
numToDitch = bisect.bisect_left(self._tList, minMplDays) - 1 # -1 avoids a gap at the left
if numToDitch > 0:
self._tList = self._tList[numToDitch:]
self._yList = self._yList[numToDitch:]
self.line2d.set_data(self._tList, self._yList)
class TimeConverter(object):
"""A functor that takes a POSIX timestamp (e.g. time.time()) and returns matplotlib days
"""
_DaysPerSecond = 1.0 / (24.0 * 60.0 * 60.0)
def __init__(self, useUTC, offset=0.0):
"""Create a TimeConverter
Inputs:
- useUTC: use UTC instead of the local time zone?
- offset: time offset: returned time - supplied time (sec)
"""
self._offset = float(offset)
unixSec = time.time()
if useUTC:
d = datetime.datetime.utcfromtimestamp(unixSec)
else:
d = datetime.datetime.fromtimestamp(unixSec)
matplotlibDays = matplotlib.dates.date2num(d)
self.mplSecMinusUnixSec = (matplotlibDays / self._DaysPerSecond) - unixSec
def __call__(self, unixSec):
"""Given a a POSIX timestamp (e.g. from time.time()) return matplotlib days
"""
return (unixSec + self._offset + self.mplSecMinusUnixSec) * self._DaysPerSecond
if __name__ == "__main__":
import opscore.RO.Alg
root = tkinter.Tk()
stripChart = StripChartWdg(
master = root,
timeRange = 60,
numSubplots = 2,
# updateInterval = 5,
width = 9,
height = 3,
)
stripChart.pack(expand=True, fill="both")
countsLine = stripChart.addLine(label="Counts", subplotInd=0, color="blue")
satConstLine = stripChart.addConstantLine(2.5, label="Saturated", subplotInd=0, color="red")
stripChart.subplotArr[0].yaxis.set_label_text("Counts")
# make sure the Y axis of subplot 0 always includes 0 and 2.7
# stripChart.showY(0.0, 2.8, subplotInd=0)
walk1Line = stripChart.addLine(label="Walk 1", subplotInd=1, color="blue")
walk2Line = stripChart.addLine(label="Walk 2", subplotInd=1, color="green")
stripChart.subplotArr[1].yaxis.set_label_text("Random Walk")
# stripChart.showY(0.0, subplotInd=0)
stripChart.subplotArr[1].legend(loc=3)
# stop major time ticks from jumping around as time advances:
stripChart.xaxis.set_major_locator(matplotlib.dates.SecondLocator(bysecond=list(range(0,60,10))))
varDict = {
countsLine: opscore.RO.Alg.ConstrainedGaussianRandomWalk(1, 0.2, 0, 2.8),
walk1Line: opscore.RO.Alg.RandomWalk.GaussianRandomWalk(0, 2),
walk2Line: opscore.RO.Alg.RandomWalk.GaussianRandomWalk(0, 2),
}
def addRandomValues(line, interval=0.1):
"""Add random values to the specified strip chart line
Inputs:
- line: strip chart line
- interval: interval between updates (sec)
"""
var = varDict[line]
line.addPoint(next(var))
Timer(interval, addRandomValues, line, interval)
addRandomValues(countsLine, interval=0.5)
addRandomValues(walk1Line, 1.6)
addRandomValues(walk2Line, 1.9)
def deleteSatConstLine():
stripChart.removeLine(satConstLine)
tkinter.Button(root, text="Delete Saturated Counts", command=deleteSatConstLine).pack()
def deleteWalk1():
stripChart.removeLine(walk1Line)
tkinter.Button(root, text="Delete Walk 1", command=deleteWalk1).pack()
root.mainloop()
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import base64
from io import BytesIO
from typing import Iterable
from werkzeug.utils import secure_filename
from werkzeug.wrappers import Request
from bentoml import config
from bentoml.utils.lazy_loader import LazyLoader
from bentoml.marshal.utils import SimpleRequest, SimpleResponse
from bentoml.exceptions import BadInput
from bentoml.adapters.base_input import BaseInputAdapter
# BentoML optional dependencies, using lazy load to avoid ImportError
imageio = LazyLoader('imageio', globals(), 'imageio')
def verify_image_format_or_raise(file_name, accept_format_list):
"""
Raise error if file's extension is not in the accept_format_list
"""
if accept_format_list:
_, extension = os.path.splitext(file_name)
if extension.lower() not in accept_format_list:
raise BadInput(
"Input file not in supported format list: {}".format(accept_format_list)
)
def get_default_accept_image_formats():
"""With default bentoML config, this returns:
['.jpg', '.png', '.jpeg', '.tiff', '.webp', '.bmp']
"""
return [
extension.strip()
for extension in config("apiserver")
.get("default_image_input_accept_file_extensions")
.split(",")
]
class ImageInput(BaseInputAdapter):
"""Transform incoming image data from http request, cli or lambda event into numpy
array.
Handle incoming image data from different sources, transform them into numpy array
and pass down to user defined API functions
* If you want to operate raw image file stream or PIL.Image objects, use lowlevel
alternative FileInput.
Args:
accept_image_formats (string[]): A list of acceptable image formats.
Default value is loaded from bentoml config
'apiserver/default_image_input_accept_file_extensions', which is
set to ['.jpg', '.png', '.jpeg', '.tiff', '.webp', '.bmp'] by default.
List of all supported format can be found here:
https://imageio.readthedocs.io/en/stable/formats.html
pilmode (string): The pilmode to be used for reading image file into numpy
array. Default value is 'RGB'. Find more information at:
https://imageio.readthedocs.io/en/stable/format_png-pil.html
Raises:
ImportError: imageio package is required to use ImageInput
Example:
>>> from bentoml import BentoService, api, artifacts
>>> from bentoml.artifact import TensorflowArtifact
>>> from bentoml.adapters import ImageInput
>>>
>>> CLASS_NAMES = ['cat', 'dog']
>>>
>>> @artifacts([TensorflowArtifact('classifer')])
>>> class PetClassification(BentoService):
>>> @api(input=ImageInput())
>>> def predict(self, image_ndarrays):
>>> results = self.artifacts.classifer.predict(image_ndarrays)
>>> return [CLASS_NAMES[r] for r in results]
"""
HTTP_METHODS = ["POST"]
BATCH_MODE_SUPPORTED = True
def __init__(
self,
accept_image_formats=None,
pilmode="RGB",
is_batch_input=False,
**base_kwargs,
):
assert imageio, "`imageio` dependency can be imported"
if is_batch_input:
raise ValueError('ImageInput can not accpept batch inputs')
super(ImageInput, self).__init__(is_batch_input=is_batch_input, **base_kwargs)
if 'input_names' in base_kwargs:
raise TypeError(
"ImageInput doesn't take input_names as parameters since bentoml 0.8."
"Update your Service definition "
"or use LegacyImageInput instead(not recommended)."
)
self.pilmode = pilmode
self.accept_image_formats = (
accept_image_formats or get_default_accept_image_formats()
)
@property
def config(self):
return {
# Converting to list, google.protobuf.Struct does not work with tuple type
"accept_image_formats": self.accept_image_formats,
"pilmode": self.pilmode,
}
@property
def request_schema(self):
return {
"image/*": {"schema": {"type": "string", "format": "binary"}},
"multipart/form-data": {
"schema": {
"type": "object",
"properties": {
"image_file": {"type": "string", "format": "binary"}
},
}
},
}
@property
def pip_dependencies(self):
return ["imageio"]
def _load_image_data(self, request: Request):
if len(request.files):
if len(request.files) != 1:
raise BadInput(
"ImageInput requires one and at least one image file at a time, "
"if you just upgraded from bentoml 0.7, you may need to use "
"FileInput or LegacyImageInput instead"
)
input_file = next(iter(request.files.values()))
if not input_file:
raise BadInput("BentoML#ImageInput unexpected HTTP request format")
file_name = secure_filename(input_file.filename)
verify_image_format_or_raise(file_name, self.accept_image_formats)
input_stream = input_file.stream
else:
data = request.get_data()
if not data:
raise BadInput("BentoML#ImageInput unexpected HTTP request format")
else:
input_stream = data
input_data = imageio.imread(input_stream, pilmode=self.pilmode)
return input_data
def handle_batch_request(
self, requests: Iterable[SimpleRequest], func: callable
) -> Iterable[SimpleResponse]:
"""
Batch version of handle_request
"""
input_datas = []
ids = []
for i, req in enumerate(requests):
if not req.data:
ids.append(None)
continue
request = Request.from_values(
input_stream=BytesIO(req.data),
content_length=len(req.data),
headers=req.headers,
)
try:
input_data = self._load_image_data(request)
except BadInput:
ids.append(None)
continue
input_datas.append(input_data)
ids.append(i)
results = func(input_datas) if input_datas else []
return self.output_adapter.to_batch_response(results, ids, requests)
def handle_request(self, request, func):
"""Handle http request that has one image file. It will convert image into a
ndarray for the function to consume.
Args:
request: incoming request object.
func: function that will take ndarray as its arg.
options: configuration for handling request object.
Return:
response object
"""
input_data = self._load_image_data(request)
result = func((input_data,))[0]
return self.output_adapter.to_response(result, request)
def handle_cli(self, args, func):
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, nargs='+')
parser.add_argument("--batch-size", default=None, type=int)
parsed_args, unknown_args = parser.parse_known_args(args)
file_paths = parsed_args.input
batch_size = (
parsed_args.batch_size if parsed_args.batch_size else len(file_paths)
)
for i in range(0, len(file_paths), batch_size):
step_file_paths = file_paths[i : i + batch_size]
image_arrays = []
for file_path in step_file_paths:
verify_image_format_or_raise(file_path, self.accept_image_formats)
if not os.path.isabs(file_path):
file_path = os.path.abspath(file_path)
image_arrays.append(imageio.imread(file_path, pilmode=self.pilmode))
results = func(image_arrays)
for result in results:
return self.output_adapter.to_cli(result, unknown_args)
def handle_aws_lambda_event(self, event, func):
if event["headers"].get("Content-Type", "").startswith("images/"):
image = imageio.imread(
base64.decodebytes(event["body"]), pilmode=self.pilmode
)
else:
raise BadInput(
"BentoML currently doesn't support Content-Type: {content_type} for "
"AWS Lambda".format(content_type=event["headers"]["Content-Type"])
)
result = func((image,))[0]
return self.output_adapter.to_aws_lambda_event(result, event)
|
import re
from src.detection import Result
from src.detection.harness import Harness
def check_effective_with_differential_test(testcase: str, normal_outputs: list, suspicious_outputs: list, with_output_info=False):
testbed = []
for output in (normal_outputs + suspicious_outputs):
testbed.append(output.testbed)
harness = Harness()
harness_result = harness.run_testcase(testcase)
bug_info = Result.differential_test(harness_result)
if len(bug_info) != len(suspicious_outputs):
return False
else:
suspicious_testbeds_before = set(output.testbed for output in suspicious_outputs)
suspicious_testbeds_after = [info.testbed for info in bug_info]
if len(suspicious_testbeds_before.union(suspicious_testbeds_after)) > len(suspicious_testbeds_before):
return False
if with_output_info:
testbed_output_dict_before = dict(
[(output.testbed, get_key_outputs(output)) for output in (suspicious_outputs + normal_outputs)])
for output in harness_result.outputs:
if get_key_outputs(output) != testbed_output_dict_before.get(output.testbed):
return False
return True
def split_output(result: Result.HarnessResult):
"""
将所有的输出拆分为可疑的(可能说是bug)和输出正常的
:param result:
:return:
"""
# 此处不重新进行差分测试会导致bug,原因:由于过滤导致的从数据库中读取的测试结果不一定是差分测试后的所有不一致的全部结果
differential_result_output_ids = [info.output_id for info in Result.differential_test(result)]
suspicious_output_ids_set = set(differential_result_output_ids)
suspicious_output = []
normal_output = []
for output in result.outputs:
if suspicious_output_ids_set.__contains__(output.id):
suspicious_output.append(output)
else:
normal_output.append(output)
return [suspicious_output, normal_output]
def is_removable(init_result: Result.HarnessResult, code: str, with_output_info=False):
[suspicious_outputs, normal_outputs] = split_output(init_result)
return check_effective_with_differential_test(code, normal_outputs, suspicious_outputs, with_output_info=with_output_info)
def get_key_outputs(output: Result.Output):
"""
返回lithium能识别的关键报错信息或输出
:param output:
:return:
"""
key_outputs = list_essential_exception_message(output.stderr + output.stdout)
if key_outputs == "":
key_outputs = output.stdout
return key_outputs
def list_essential_exception_message(outputs_info: str):
"""
若能匹配异常信息,则返回异常信息,否则返回""
"""
regex_error = "(([a-zA-Z]*Error|timeout):.*?)(\\.\\s|\\n|\\.$)"
regex_hermes_error = "(error:.*?)(\\. |\\n|\\.$)"
regex_note = "(note:.*?)(\\.\\s|\\n|\\.$)"
regex_elegent = "[a-zA-Z]+Error:.*"
pattern_error = re.compile(regex_error, re.M)
pattern_hermes_error = re.compile(regex_hermes_error, re.M)
pattern_note = re.compile(regex_note, re.M)
pattern_elegent = re.compile(regex_elegent, re.M)
matcher_error = set([e[0] for e in pattern_error.findall(outputs_info)])
matcher_hermes_error = set([e[0] for e in pattern_hermes_error.findall(outputs_info)])
matcher_note = set([e[0] for e in pattern_note.findall(outputs_info)])
matcher_error_list = list(matcher_error)
for index in range(len(matcher_error_list)):
tmp = pattern_elegent.findall(matcher_error_list[index])
if len(tmp) > 0:
matcher_error_list[index] = tmp[0]
matcher = []
if len(matcher_error) > 0:
matcher += matcher_error_list
elif len(matcher_hermes_error) > 0: # 只有Hermes的报错信息没有具体的错误类型
matcher += matcher_hermes_error
elif len(matcher_note) > 0: # Hermes没有报错信息
matcher += matcher_note
matcher_key_exceptions = "\n".join(matcher)
return matcher_key_exceptions
|
# Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
# pylint: disable=unused-import
from enum import Enum
from io import BytesIO
import numpy as np
import os
import os.path as osp
_IMAGE_BACKENDS = Enum("_IMAGE_BACKENDS", ["cv2", "PIL"])
_IMAGE_BACKEND = None
try:
import cv2
_IMAGE_BACKEND = _IMAGE_BACKENDS.cv2
except ImportError:
import PIL
_IMAGE_BACKEND = _IMAGE_BACKENDS.PIL
from datumaro.util.image_cache import ImageCache as _ImageCache
def load_image(path, dtype=np.float32):
"""
Reads an image in the HWC Grayscale/BGR(A) float [0; 255] format.
"""
if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2:
import cv2
image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
image = image.astype(dtype)
elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL:
from PIL import Image
image = Image.open(path)
image = np.asarray(image, dtype=dtype)
if len(image.shape) == 3 and image.shape[2] in {3, 4}:
image[:, :, :3] = image[:, :, 2::-1] # RGB to BGR
else:
raise NotImplementedError()
if image is None:
raise ValueError("Can't open image '%s'" % path)
assert len(image.shape) in {2, 3}
if len(image.shape) == 3:
assert image.shape[2] in {3, 4}
return image
def save_image(path, image, create_dir=False, dtype=np.uint8, **kwargs):
# NOTE: Check destination path for existence
# OpenCV silently fails if target directory does not exist
dst_dir = osp.dirname(path)
if dst_dir:
if create_dir:
os.makedirs(dst_dir, exist_ok=True)
elif not osp.isdir(dst_dir):
raise FileNotFoundError("Directory does not exist: '%s'" % dst_dir)
if not kwargs:
kwargs = {}
if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2:
import cv2
params = []
ext = path[-4:]
if ext.upper() == ".JPG":
params = [int(cv2.IMWRITE_JPEG_QUALITY), kwargs.get("jpeg_quality", 75)]
image = image.astype(dtype)
cv2.imwrite(path, image, params=params)
elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL:
from PIL import Image
params = {}
params["quality"] = kwargs.get("jpeg_quality")
if kwargs.get("jpeg_quality") == 100:
params["subsampling"] = 0
image = image.astype(dtype)
if len(image.shape) == 3 and image.shape[2] in {3, 4}:
image[:, :, :3] = image[:, :, 2::-1] # BGR to RGB
image = Image.fromarray(image)
image.save(path, **params)
else:
raise NotImplementedError()
def encode_image(image, ext, dtype=np.uint8, **kwargs):
if not kwargs:
kwargs = {}
if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2:
import cv2
params = []
if not ext.startswith("."):
ext = "." + ext
if ext.upper() == ".JPG":
params = [int(cv2.IMWRITE_JPEG_QUALITY), kwargs.get("jpeg_quality", 75)]
image = image.astype(dtype)
success, result = cv2.imencode(ext, image, params=params)
if not success:
raise Exception("Failed to encode image to '%s' format" % (ext))
return result.tobytes()
elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL:
from PIL import Image
if ext.startswith("."):
ext = ext[1:]
params = {}
params["quality"] = kwargs.get("jpeg_quality")
if kwargs.get("jpeg_quality") == 100:
params["subsampling"] = 0
image = image.astype(dtype)
if len(image.shape) == 3 and image.shape[2] in {3, 4}:
image[:, :, :3] = image[:, :, 2::-1] # BGR to RGB
image = Image.fromarray(image)
with BytesIO() as buffer:
image.save(buffer, format=ext, **params)
return buffer.getvalue()
else:
raise NotImplementedError()
def decode_image(image_bytes, dtype=np.float32):
if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2:
import cv2
image = np.frombuffer(image_bytes, dtype=np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)
image = image.astype(dtype)
elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL:
from PIL import Image
image = Image.open(BytesIO(image_bytes))
image = np.asarray(image, dtype=dtype)
if len(image.shape) == 3 and image.shape[2] in {3, 4}:
image[:, :, :3] = image[:, :, 2::-1] # RGB to BGR
else:
raise NotImplementedError()
assert len(image.shape) in {2, 3}
if len(image.shape) == 3:
assert image.shape[2] in {3, 4}
return image
class lazy_image:
def __init__(self, path, loader=None, cache=None):
if loader is None:
loader = load_image
self.path = path
self.loader = loader
# Cache:
# - False: do not cache
# - None: use the global cache
# - object: an object to be used as cache
assert cache in {None, False} or isinstance(cache, object)
self.cache = cache
def __call__(self):
image = None
image_id = hash(self) # path is not necessary hashable or a file path
cache = self._get_cache(self.cache)
if cache is not None:
image = cache.get(image_id)
if image is None:
image = self.loader(self.path)
if cache is not None:
cache.push(image_id, image)
return image
@staticmethod
def _get_cache(cache):
if cache is None:
cache = _ImageCache.get_instance()
elif cache == False:
return None
return cache
def __hash__(self):
return hash((id(self), self.path, self.loader))
class Image:
def __init__(self, data=None, path=None, loader=None, cache=None, size=None):
assert size is None or len(size) == 2
if size is not None:
assert len(size) == 2 and 0 < size[0] and 0 < size[1], size
size = tuple(size)
self._size = size # (H, W)
assert path is None or isinstance(path, str)
if path is None:
path = ""
self._path = path
assert data is not None or path or loader, "Image can not be empty"
if data is not None:
assert callable(data) or isinstance(data, np.ndarray), type(data)
if data is None and (path or loader):
if osp.isfile(path) or loader:
data = lazy_image(path, loader=loader, cache=cache)
self._data = data
@property
def path(self):
return self._path
@property
def ext(self):
return osp.splitext(osp.basename(self.path))[1]
@property
def data(self):
if callable(self._data):
return self._data()
return self._data
@property
def has_data(self):
return self._data is not None
@property
def size(self):
if self._size is None:
data = self.data
if data is not None:
self._size = data.shape[:2]
return self._size
def __eq__(self, other):
if isinstance(other, np.ndarray):
return self.has_data and np.array_equal(self.data, other)
if not isinstance(other, __class__):
return False
return (
(np.array_equal(self.size, other.size))
and (self.has_data == other.has_data)
and (
self.has_data
and np.array_equal(self.data, other.data)
or not self.has_data
)
)
class ByteImage(Image):
def __init__(self, data=None, path=None, ext=None, cache=None, size=None):
loader = None
if data is not None:
if callable(data) and not isinstance(data, lazy_image):
data = lazy_image(path, loader=data, cache=cache)
loader = lambda _: decode_image(self.get_bytes())
super().__init__(path=path, size=size, loader=loader, cache=cache)
if data is None and loader is None:
# unset defaults for regular images
# to avoid random file reading to bytes
self._data = None
self._bytes_data = data
if ext:
ext = ext.lower()
if not ext.startswith("."):
ext = "." + ext
self._ext = ext
def get_bytes(self):
if callable(self._bytes_data):
return self._bytes_data()
return self._bytes_data
@property
def ext(self):
if self._ext:
return self._ext
return super().ext
def __eq__(self, other):
if not isinstance(other, __class__):
return super().__eq__(other)
return (
(np.array_equal(self.size, other.size))
and (self.has_data == other.has_data)
and (
self.has_data
and self.get_bytes() == other.get_bytes()
or not self.has_data
)
)
|
import os, sys
import warnings
from os.path import join as opj
import yaml
import numpy as np
import random
import pathlib
import subprocess
import torch
from torch.utils.data import Subset
def save_args(args, odir):
if type(args) != dict:
args = vars(args)
with open(opj(odir,"args.yaml"),mode="w") as f:
f.write(yaml.dump(args))
def make_folders(odir):
if not os.path.exists(odir):
os.makedirs(odir)
def is_absolute(path:str)->bool:
path_pl = pathlib.Path(path)
return path_pl.is_absolute()
def get_git_commit_hash():
cmd = "git rev-parse --short HEAD"
hash_code = subprocess.check_output(cmd.split()).strip().decode('utf-8')
return hash_code
class PytorchTools:
def __init__(self):
print("This class is for staticmethod.")
@staticmethod
def create_subset(dataset, subset):
if type(subset) is int:
subset_number_list = np.random.randint(0,len(dataset)-1,subset)
elif type(subset) is list:
subset_number_list = subset
else:
NotImplementedError()
return Subset(dataset,subset_number_list), subset_number_list
@staticmethod
def set_seed(seed, cuda=True, consistency=False):
""" Sets seeds in all frameworks"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed(seed)
if cuda and torch.cuda.is_available() and not consistency:
torch.backends.cudnn.enabled = True # use cuDNN
else:
torch.backends.cudnn.enabled = False
@staticmethod
def select_device(device_name):
if type(device_name) is str:
if device_name in ["cpu", "-1"]:
device = "cpu"
elif device_name in ["cuda", "gpu","0"]:
device = "cuda"
elif device_name in ["tpu"]:
raise NotImplementedError()
else:
raise NotImplementedError("1 Unknow device: {}".format(device_name))
elif type(device_name) is int:
if device_name < 0:
device = "cpu"
elif device_name >= 0:
device = "cuda"
else:
raise NotImplementedError("2 Unknow device: {}".format(device_name))
else:
raise NotImplementedError("0 Unknow device: {}".format(device_name))
return device
@staticmethod
def fix_model(model):
for param in model.parameters():
param.requires_grad = False
@staticmethod
def load_data(path):
print("-> loading data '{}'".format(path))
# https://discuss.pytorch.org/t/out-of-memory-error-when-resume-training-even-though-my-gpu-is-empty/30757
checkpoint = torch.load(path, map_location='cpu')
return checkpoint
@staticmethod
def resume(checkpoint, model, optimizer, scheduler):
"""
return: model, optimizer, scheduler
"""
model.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint["scheduler"])
return model, optimizer, scheduler
@staticmethod
def t2n(torch_tensor):
return torch_tensor.cpu().detach().numpy()
@staticmethod
def dict2tensorboard(log_dict, writer, step):
for key in log_dict:
writer.add_scalar(key, log_dict[key] ,step)
|
import hoomd
from hoomd.conftest import pickling_check
import numpy
import pytest
@pytest.fixture(scope='session')
def polymer_snapshot_factory(device):
"""Make a snapshot with polymers and distance constraints."""
def make_snapshot(polymer_length=10,
N_polymers=10,
polymer_spacing=1.2,
bead_spacing=1.1):
"""Make the snapshot.
Args:
polymer_length: Number of particles in each polymer
N_polymers: Number of polymers to place
polymer_spacing: distance between the polymers
bead_spacing: distance between the beads in the polymer
Place N_polymers polymers in a 2D simulation with distance constraints
between beads in each polymer.
"""
s = hoomd.Snapshot(device.communicator)
if s.communicator.rank == 0:
s.configuration.box = [
polymer_spacing * N_polymers, bead_spacing * polymer_length, 0,
0, 0, 0
]
s.particles.N = polymer_length * N_polymers
s.particles.types = ['A']
x_coords = numpy.linspace(-polymer_spacing * N_polymers / 2,
polymer_spacing * N_polymers / 2,
num=N_polymers,
endpoint=False) + polymer_spacing / 2
y_coords = numpy.linspace(-bead_spacing * polymer_length / 2,
bead_spacing * polymer_length / 2,
num=N_polymers,
endpoint=False) + bead_spacing / 2
position = []
constraint_values = []
constraint_groups = []
for x in x_coords:
for i, y in enumerate(y_coords):
position.append([x, y, 0])
if i & 1:
constraint_values.append(bead_spacing)
tag = len(position) - 1
constraint_groups.append([tag, tag - 1])
s.particles.position[:] = position
s.constraints.N = len(constraint_values)
s.constraints.value[:] = constraint_values
s.constraints.group[:] = constraint_groups
return s
return make_snapshot
def test_attach_detach(simulation_factory, polymer_snapshot_factory):
"""Ensure that md.constrain.Distance can be attached.
Also test that parameters can be set.
"""
# detached
d = hoomd.md.constrain.Distance(tolerance=1e-5)
assert d.tolerance == 1e-5
d.tolerance = 1e-3
assert d.tolerance == 1e-3
# attached
sim = simulation_factory(polymer_snapshot_factory())
integrator = hoomd.md.Integrator(dt=0.005)
nve = hoomd.md.methods.NVE(filter=hoomd.filter.All())
integrator.methods.append(nve)
integrator.constraints.append(d)
sim.run(0)
assert d.tolerance == 1e-3
d.tolerance = 1e-5
assert d.tolerance == 1e-5
def test_pickling(simulation_factory, polymer_snapshot_factory):
"""Test that md.constrain.Distance can be pickled and unpickled."""
# detached
d = hoomd.md.constrain.Distance(tolerance=1e-5)
pickling_check(d)
# attached
sim = simulation_factory(polymer_snapshot_factory())
integrator = hoomd.md.Integrator(dt=0.005)
nve = hoomd.md.methods.NVE(filter=hoomd.filter.All())
integrator.methods.append(nve)
integrator.constraints.append(d)
sim.run(0)
pickling_check(d)
def test_basic_simulation(simulation_factory, polymer_snapshot_factory):
"""Ensure that distances are constrained in a basic simulation."""
d = hoomd.md.constrain.Distance()
sim = simulation_factory(polymer_snapshot_factory())
integrator = hoomd.md.Integrator(dt=0.005)
nve = hoomd.md.methods.NVE(filter=hoomd.filter.All())
integrator.methods.append(nve)
integrator.constraints.append(d)
cell = hoomd.md.nlist.Cell(buffer=0.4)
lj = hoomd.md.pair.LJ(nlist=cell)
lj.params[('A', 'A')] = dict(epsilon=1, sigma=1)
lj.r_cut[('A', 'A')] = 2**(1 / 6)
integrator.forces.append(lj)
sim.operations.integrator = integrator
sim.state.thermalize_particle_momenta(filter=hoomd.filter.All(), kT=1.0)
sim.run(100)
snap = sim.state.get_snapshot()
if snap.communicator.rank == 0:
# compute bond lengths in unwrapped particle coordinates
box_lengths = snap.configuration.box[0:3]
r = snap.particles.position + snap.particles.image * box_lengths
constraints = snap.constraints.group
delta_r = r[constraints[:, 1]] - r[constraints[:, 0]]
bond_lengths = numpy.sqrt(numpy.sum(delta_r * delta_r, axis=1))
numpy.testing.assert_allclose(bond_lengths,
snap.constraints.value,
rtol=1e-5)
|
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from itsdangerous import BadSignature
from mediagoblin import mg_globals, messages
from mediagoblin.auth.tools import register_user, check_login_simple
from mediagoblin.db.models import User
from mediagoblin.decorators import allow_registration, auth_enabled
from mediagoblin.decorators import require_active_login
from mediagoblin.plugins.recaptcha import forms as auth_forms
from mediagoblin.plugins.recaptcha import tools
from mediagoblin.tools import pluginapi
from mediagoblin.tools.crypto import get_timed_signer_url
from mediagoblin.tools.mail import email_debug_message
from mediagoblin.tools.response import redirect, render_to_response, render_404
from mediagoblin.tools.translate import pass_to_ugettext as _
from recaptcha.client import captcha
_log = logging.getLogger(__name__)
@auth_enabled
def login(request):
"""
MediaGoblin login view.
If you provide the POST with 'next', it'll redirect to that view.
"""
#if 'pass_auth' not in request.template_env.globals:
# redirect_name = hook_handle('auth_no_pass_redirect')
# if redirect_name:
# return redirect(request, 'mediagoblin.plugins.{0}.login'.format(
# redirect_name))
# else:
# return redirect(request, 'index')
login_form = auth_forms.LoginForm(request.form)
login_failed = False
#if request.method == 'POST' and login_form.validate():
if request.method == 'POST':
if login_form.validate():
user = check_login_simple(
login_form.username.data,
login_form.password.data)
if user:
# set up login in session
if login_form.stay_logged_in.data:
request.session['stay_logged_in'] = True
request.session['user_id'] = unicode(user.id)
request.session.save()
if request.form.get('next'):
return redirect(request, location=request.form['next'])
else:
return redirect(request, "index")
login_failed = True
return render_to_response(
request,
'mediagoblin/plugins/recaptcha/login.html',
{'login_form': login_form,
'next': request.GET.get('next') or request.form.get('next'),
'login_failed': login_failed,
'post_url': request.urlgen('mediagoblin.plugins.recaptcha.login'),
'allow_registration': mg_globals.app_config["allow_registration"]})
@allow_registration
@auth_enabled
def register(request):
# if request.method == 'GET':
# return redirect(
# request,
# 'mediagoblin.plugins.recaptcha.register')
register_form = auth_forms.RegistrationForm(request.form)
config = pluginapi.get_config('mediagoblin.plugins.recaptcha')
recaptcha_protocol = ''
if config['RECAPTCHA_USE_SSL']:
recaptcha_protocol = 'https'
else:
recaptcha_protocol = 'http'
_log.debug("Connecting to reCAPTCHA service via %r", recaptcha_protocol)
if register_form.validate():
recaptcha_challenge = request.form['recaptcha_challenge_field']
recaptcha_response = request.form['recaptcha_response_field']
_log.debug("response field is: %r", recaptcha_response)
_log.debug("challenge field is: %r", recaptcha_challenge)
response = captcha.submit(
recaptcha_challenge,
recaptcha_response,
config.get('RECAPTCHA_PRIVATE_KEY'),
request.remote_addr,
)
goblin = response.is_valid
if response.error_code:
_log.warning("reCAPTCHA error: %r", response.error_code)
if goblin:
user = register_user(request, register_form)
if user:
# redirect the user to their homepage... there will be a
# message waiting for them to verify their email
return redirect(
request, 'mediagoblin.user_pages.user_home',
user=user.username)
else:
messages.add_message(
request,
messages.WARNING,
_('Sorry, captcha was incorrect. Please try again.'))
return render_to_response(
request,
'mediagoblin/plugins/recaptcha/register.html',
{'register_form': register_form,
'post_url': request.urlgen('mediagoblin.plugins.recaptcha.register'),
'recaptcha_public_key': config.get('RECAPTCHA_PUBLIC_KEY'),
'recaptcha_protocol' : recaptcha_protocol})
def forgot_password(request):
"""
Forgot password view
Sends an email with an url to renew forgotten password.
Use GET querystring parameter 'username' to pre-populate the input field
"""
fp_form = auth_forms.ForgotPassForm(request.form,
username=request.args.get('username'))
if not (request.method == 'POST' and fp_form.validate()):
# Either GET request, or invalid form submitted. Display the template
return render_to_response(request,
'mediagoblin/plugins/recaptcha/forgot_password.html',
{'fp_form': fp_form})
# If we are here: method == POST and form is valid. username casing
# has been sanitized. Store if a user was found by email. We should
# not reveal if the operation was successful then as we don't want to
# leak if an email address exists in the system.
found_by_email = '@' in fp_form.username.data
if found_by_email:
user = User.query.filter_by(
email=fp_form.username.data).first()
# Don't reveal success in case the lookup happened by email address.
success_message = _("If that email address (case sensitive!) is "
"registered an email has been sent with "
"instructions on how to change your password.")
else: # found by username
user = User.query.filter_by(
username=fp_form.username.data).first()
if user is None:
messages.add_message(request,
messages.WARNING,
_("Couldn't find someone with that username."))
return redirect(request, 'mediagoblin.auth.forgot_password')
success_message = _("An email has been sent with instructions "
"on how to change your password.")
if user and user.has_privilege(u'active') is False:
# Don't send reminder because user is inactive or has no verified email
messages.add_message(request,
messages.WARNING,
_("Could not send password recovery email as your username is in"
"active or your account's email address has not been verified."))
return redirect(request, 'mediagoblin.user_pages.user_home',
user=user.username)
# SUCCESS. Send reminder and return to login page
if user:
email_debug_message(request)
tools.send_fp_verification_email(user, request)
messages.add_message(request, messages.INFO, success_message)
return redirect(request, 'mediagoblin.auth.login')
def verify_forgot_password(request):
"""
Check the forgot-password verification and possibly let the user
change their password because of it.
"""
# get form data variables, and specifically check for presence of token
formdata = _process_for_token(request)
if not formdata['has_token']:
return render_404(request)
formdata_vars = formdata['vars']
# Catch error if token is faked or expired
try:
token = get_timed_signer_url("mail_verification_token") \
.loads(formdata_vars['token'], max_age=10*24*3600)
except BadSignature:
messages.add_message(
request,
messages.ERROR,
_('The verification key or user id is incorrect.'))
return redirect(
request,
'index')
# check if it's a valid user id
user = User.query.filter_by(id=int(token)).first()
# no user in db
if not user:
messages.add_message(
request, messages.ERROR,
_('The user id is incorrect.'))
return redirect(
request, 'index')
# check if user active and has email verified
if user.has_privilege(u'active'):
cp_form = auth_forms.ChangeForgotPassForm(formdata_vars)
if request.method == 'POST' and cp_form.validate():
user.pw_hash = tools.bcrypt_gen_password_hash(
cp_form.password.data)
user.save()
messages.add_message(
request,
messages.INFO,
_("You can now log in using your new password."))
return redirect(request, 'mediagoblin.auth.login')
else:
return render_to_response(
request,
'mediagoblin/plugins/recaptcha/change_fp.html',
{'cp_form': cp_form})
## Commenting this out temporarily because I'm checking into
## what's going on with user.email_verified.
##
## ... if this commit lasts long enough for anyone but me (cwebber) to
## notice it, they should pester me to remove this or remove it
## themselves ;)
#
# if not user.email_verified:
# messages.add_message(
# request, messages.ERROR,
# _('You need to verify your email before you can reset your'
# ' password.'))
if not user.status == 'active':
messages.add_message(
request, messages.ERROR,
_('You are no longer an active user. Please contact the system'
' admin to reactivate your account.'))
return redirect(
request, 'index')
def _process_for_token(request):
"""
Checks for tokens in formdata without prior knowledge of request method
For now, returns whether the userid and token formdata variables exist, and
the formdata variables in a hash. Perhaps an object is warranted?
"""
# retrieve the formdata variables
if request.method == 'GET':
formdata_vars = request.GET
else:
formdata_vars = request.form
formdata = {
'vars': formdata_vars,
'has_token': 'token' in formdata_vars}
return formdata
@require_active_login
def change_pass(request):
form = auth_forms.ChangePassForm(request.form)
user = request.user
if request.method == 'POST' and form.validate():
if not tools.bcrypt_check_password(
form.old_password.data, user.pw_hash):
form.old_password.errors.append(
_('Wrong password'))
return render_to_response(
request,
'mediagoblin/plugins/recaptcha/change_pass.html',
{'form': form,
'user': user})
# Password matches
user.pw_hash = tools.bcrypt_gen_password_hash(
form.new_password.data)
user.save()
messages.add_message(
request, messages.SUCCESS,
_('Your password was changed successfully'))
return redirect(request, 'mediagoblin.edit.account')
return render_to_response(
request,
'mediagoblin/plugins/recaptcha/change_pass.html',
{'form': form,
'user': user})
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
<comment-ja>
使用方法: add_disk.py [オプション]
オプション:
--version show program's version number and exit
-h, --help show this help message and exit
-t HOST, --target=HOST
ターゲットホスト名
-a AUTH, --auth=AUTH 認証タイプ
-u USER, --user=USER 認証ユーザー名
-p PASSWORD, --password=PASSWORD
認証パスワード
-w PASSWORD_FILE, --password-file=PASSWORD_FILE
認証パスワードファイル
-s, --autostart 自動起動
</comment-ja>
<comment-en>
Attach a new disk device to the domain.
usage: add_disk.py [options]
options:
--version show program's version number and exit
-h, --help show this help message and exit
-t HOST, --target=HOST
Target host name
-a AUTH, --auth=AUTH Authentication type
-u USER, --user=USER Authentication user name
-p PASSWORD, --password=PASSWORD
Authentication password
-w PASSWORD_FILE, --password-file=PASSWORD_FILE
Authentication password file
-s, --autostart Autostart
</comment-en>
"""
import os
import sys
import re
import logging
import fcntl
from optparse import OptionParser
from ksscommand import KssCommand, KssCommandException, KssCommandOptException
import __cmd__
try:
import karesansui
from karesansui import __version__
from karesansui.lib.utils import load_locale, execute_command, is_readable
from karesansui.lib.parser.iscsid import iscsidParser
from karesansui.lib.dict_op import DictOp
from karesansui.lib.iscsi import iscsi_parse_node, iscsi_print_format_node
from karesansui.lib.const import ISCSI_CONFIG_KEY_AUTH_METHOD, ISCSI_CONFIG_KEY_AUTH_USER, \
ISCSI_CONFIG_KEY_AUTH_PASSWORD, ISCSI_CONFIG_KEY_SATRTUP, ISCSI_CONFIG_VALUE_AUTH_METHOD_CHAP, \
ISCSI_CONFIG_VALUE_AUTH_METHOD_NONE, ISCSI_CONFIG_VALUE_SATRTUP_ON, ISCSI_CONFIG_VALUE_SATRTUP_OFF, \
ISCSI_CMD, ISCSI_CMD_OPTION_MODE, ISCSI_CMD_OPTION_MODE_DISCOVERY, ISCSI_CMD_OPTION_TYPE, \
ISCSI_CMD_OPTION_TYPE_SENDTARGETS, ISCSI_CMD_OPTION_PORTAL
except ImportError as e:
print("[Error] some packages not found. - %s" % e, file=sys.stderr)
sys.exit(1)
_ = load_locale()
usage = '%prog [options]'
def getopts():
optp = OptionParser(usage=usage, version=__version__)
optp.add_option('-t', '--target', dest='host', help=_('Target host name'), default=None)
optp.add_option('-a', '--auth', dest='auth', help=_('Authentication type'), default=None)
optp.add_option('-u', '--user', dest='user', help=_('Authentication user'), default=None)
optp.add_option('-p', '--password', dest='password', help=_('Authentication password'), default=None)
optp.add_option('-w', '--password-file', dest='password_file', help=_('Authentication password file'), default=None)
optp.add_option('-s', '--autostart', dest='autostart', action="store_true", help=_('Autostart'), default=False)
return optp.parse_args()
def chkopts(opts):
reg = re.compile("[^a-zA-Z0-9\./_:-]")
if opts.host:
if reg.search(opts.host):
raise KssCommandOptException('ERROR: Illigal option value. option=%s value=%s' % ('-t or --target', opts.host))
else:
raise KssCommandOptException('ERROR: %s option is required.' % '-t or --target')
if opts.auth:
if not opts.auth == ISCSI_CONFIG_VALUE_AUTH_METHOD_CHAP and not opts.auth == ISCSI_CONFIG_VALUE_AUTH_METHOD_NONE:
raise KssCommandOptException('ERROR: %s option is require %s or %s.' % '-a', ISCSI_CONFIG_VALUE_AUTH_METHOD_CHAP, ISCSI_CONFIG_VALUE_AUTH_METHOD_NONE)
if opts.auth == ISCSI_CONFIG_VALUE_AUTH_METHOD_CHAP:
if opts.user is None:
raise KssCommandOptException('ERROR: %s option is required.' % '-u or --user')
if opts.password is None and opts.password_file is None:
raise KssCommandOptException('ERROR: %s option is required.' % '-p or --password or -w or --password-file')
if opts.password_file is not None and not is_readable(opts.password_file):
raise KssCommandOptException('ERROR: %s is not found.' % opts.password_file)
else:
raise KssCommandOptException('ERROR: %s option is required.' % '-a or --auth')
class AddIscsi(KssCommand):
def process(self):
(opts, args) = getopts()
chkopts(opts)
self.up_progress(10)
original_parser = iscsidParser()
new_parser = iscsidParser()
dop = DictOp()
dop.addconf("original", original_parser.read_conf())
dop.addconf("new", new_parser.read_conf())
self.up_progress(10)
dop.cdp_set("new", ISCSI_CONFIG_KEY_AUTH_METHOD, opts.auth)
if opts.auth == ISCSI_CONFIG_VALUE_AUTH_METHOD_CHAP:
password = ""
if opts.password is not None:
password = opts.password
elif opts.password_file is not None and is_readable(opts.password_file):
try:
fp = open(opts.password_file, "r")
try:
fcntl.lockf(fp.fileno(), fcntl.LOCK_SH)
try:
password = fp.readline().strip("\n")
finally:
fcntl.lockf(fp.fileno(), fcntl.LOCK_UN)
self.up_progress(10)
finally:
fp.close()
except:
raise KssCommandException('Failed to read file. - target host=%s password_file=%s' \
% (opts.host,opts.password_file))
try:
os.remove(opts.password_file)
except:
raise KssCommandException('Failed to remove file. - target host=%s password_file=%s' \
% (opts.host,opts.password_file))
dop.cdp_set("new", ISCSI_CONFIG_KEY_AUTH_METHOD, opts.auth)
dop.cdp_set("new", ISCSI_CONFIG_KEY_AUTH_USER, opts.user)
dop.cdp_set("new", ISCSI_CONFIG_KEY_AUTH_PASSWORD, password)
else:
dop.comment("new", ISCSI_CONFIG_KEY_AUTH_USER)
dop.comment("new", ISCSI_CONFIG_KEY_AUTH_PASSWORD)
self.up_progress(10)
if opts.autostart:
dop.cdp_set("new", ISCSI_CONFIG_KEY_SATRTUP, ISCSI_CONFIG_VALUE_SATRTUP_ON)
else:
dop.cdp_set("new", ISCSI_CONFIG_KEY_SATRTUP, ISCSI_CONFIG_VALUE_SATRTUP_OFF)
new_parser.write_conf(dop.getconf("new"))
self.up_progress(10)
discovery_command_args = (ISCSI_CMD,
ISCSI_CMD_OPTION_MODE,
ISCSI_CMD_OPTION_MODE_DISCOVERY,
ISCSI_CMD_OPTION_TYPE,
ISCSI_CMD_OPTION_TYPE_SENDTARGETS,
ISCSI_CMD_OPTION_PORTAL,
opts.host
)
(discovery_rc,discovery_res) = execute_command(discovery_command_args)
self.up_progress(10)
original_parser.write_conf(dop.getconf("original"))
self.up_progress(10)
if discovery_rc != 0:
raise KssCommandException('Failed to add iSCSI. - host=%s message=%s' % (opts.host, discovery_res))
if discovery_res == []:
raise KssCommandException('Failed to add iSCSI. - host=%s message=No exist permit iSCSI disk for target.' % (opts.host))
for node_line in discovery_res:
if not node_line:
continue
try:
node = iscsi_parse_node(node_line)
except:
self.logger.warn('Failed to parse iSCSI discovery command response. message="%s"' % (node_line))
continue
self.logger.info("%s" % (iscsi_print_format_node(node)))
print(_("%s") % (iscsi_print_format_node(node)), file=sys.stdout)
return True
if __name__ == "__main__":
target = AddIscsi()
sys.exit(target.run())
|
#
# @lc app=leetcode id=700 lang=python3
#
# [700] Search in a Binary Search Tree
#
# https://leetcode.com/problems/search-in-a-binary-search-tree/description/
#
# algorithms
# Easy (73.58%)
# Total Accepted: 290.2K
# Total Submissions: 394.1K
# Testcase Example: '[4,2,7,1,3]\n2'
#
# You are given the root of a binary search tree (BST) and an integer val.
#
# Find the node in the BST that the node's value equals val and return the
# subtree rooted with that node. If such a node does not exist, return null.
#
#
# Example 1:
#
#
# Input: root = [4,2,7,1,3], val = 2
# Output: [2,1,3]
#
#
# Example 2:
#
#
# Input: root = [4,2,7,1,3], val = 5
# Output: []
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [1, 5000].
# 1 <= Node.val <= 10^7
# root is a binary search tree.
# 1 <= val <= 10^7
#
#
#
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def searchBST(self, root: TreeNode, val: int) -> TreeNode:
if root is None:
return None
elif root.val == val:
return root
elif root.val < val:
return self.searchBST(root.right, val)
else:
return self.searchBST(root.left, val)
|
from typing import Any
from convertible.Convert.ExceptionHandler.ConvertException import ConvertException
from .Convertible import Convertible
class Optional(Convertible):
"""
A Convertible that will either Convert the argument with the Convertible provided or return None.
"""
__slots__ = ("convertible",)
def __init__(self, convertible: Convertible):
"""
Initialize a Optional Convertible.
Parameters
----------
convertible : Convertible
The Convertible that convert or provide an exception.
"""
self.convertible = convertible
def __repr__(self) -> str:
if self.convertible is self:
return f"{self.__class__.__name__}(...)"
else:
return f"{self.__class__.__name__}({self.convertible})"
def convert(self, argument: Any) -> Any:
"""
Converts the argument to the specified type of the Convertible provided or returns None.
Parameters
----------
argument : Any
The argument to be converted.
Returns
-------
Any
The converted argument or None.
"""
try:
return self.convertible.convert(argument)
except ConvertException:
return None
except StopIteration:
return None
|
import math
a,b,c = map(float,input().split())
d = (b*b) - (4*a*c)
if(a==0 or d<0):
print("Impossivel calcular")
else:
d = math.sqrt(d)
r1 = (-b + d) / (2*a)
r2 = (-b -d) / (2*a)
print(f'R1 = {r1:.5f}')
print(f'R2 = {r2:.5f}')
|
from graphviz import Source
s = Source.from_file('fboaventuradev_terraform.dot', format='png')
s.save(filename='fboaventuradev_tf')
s.view()
|
from colorama import Fore, Style
import logging
LEVEL_NAMES = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
LEVEL_COLORS = {
"DEBUG": Style.DIM,
"INFO": '',
"WARNING": Fore.YELLOW,
"ERROR": Fore.RED,
"CRITICAL": Fore.RED
}
LEVEL_SHOW_LABEL = {
"DEBUG": False,
"INFO": False,
"WARNING": True,
"ERROR": True,
"CRITICAL": True
}
RESET_SEQ = Fore.RESET + Style.RESET_ALL
class ColoredFormatter(logging.Formatter):
def __init__(self, supports_color: bool, format_string: str, verbose: bool):
self.is_verbose = verbose
# logging.Formatter.__init__(self, "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logging.Formatter.__init__(self, format_string)
self.use_color = supports_color
def format(self, record: logging.LogRecord):
levelname = record.levelname
if self.use_color and levelname in LEVEL_COLORS:
formatted_level_name = f"[{levelname}]: " if LEVEL_SHOW_LABEL[levelname] or self.is_verbose else ""
colorized_level_name = f"{LEVEL_COLORS[levelname]}{formatted_level_name}"
record.levelname = colorized_level_name
record.msg = f"{colorized_level_name}{record.msg}{RESET_SEQ}"
return logging.Formatter.format(self, record)
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
SPDX document formatting
"""
# document level strings
spdx_version = 'SPDX-2.2'
data_license = 'CC0-1.0'
spdx_id = 'SPDXRef-DOCUMENT'
document_name = 'Tern report for {image_name}'
document_comment = 'This document was generated by ' \
'the Tern Project: https://github.com/tern-tools/tern'
document_namespace = 'https://spdx.org/spdxdocs/tern-' \
'report-{version}-{image}-{uuid}'
license_list_version = '3.8'
creator = 'Tool: tern-{version}'
created = '{timestamp}'
# Dictionary Formatting
def get_relationship_dict(element_id, related_element_id, relationship_type):
'''Given two SPDX element IDs and their relationship type, return a
dictionary that represents the relationship. Assume that the element_id
inputs are provided as SPDXRefs.
{
"spdxElementId" : "SPDXRef-element_id",
"relatedSpdxElement" : "SPDXRef-related_element_id",
"relationshipType" : "relationship_type"
}'''
return {
"spdxElementId": element_id,
"relatedSpdxElement": related_element_id,
"relationshipType": relationship_type
}
def get_extracted_text_dict(extracted_text, license_ref):
'''Given a plain text license string and the corresponding license_ref,
return a dictionary that describes the key-value pair:
{
"extractedText" : "extracted_text"
"licenseId": "license_ref"
}'''
return {
"extractedText": extracted_text,
"licenseId": license_ref
}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 15:36:42 2020
@author: nikbakht
"""
#---------------------------------
import tensorflow as tf
#import socket
GPU_mode = 0
if GPU_mode:
num_GPU =0# GPU to use, can be 0, 2
mem_growth = True
print('Tensorflow version: ', tf.__version__)
gpus = tf.config.experimental.list_physical_devices("GPU")
print('Number of GPUs available :', len(gpus))
tf.config.experimental.set_visible_devices(gpus[num_GPU], 'GPU')
tf.config.experimental.set_memory_growth(gpus[num_GPU], mem_growth)
print('Used GPU: {}. Memory growth: {}'.format(num_GPU, mem_growth))
import numpy as np
import os
import time
# import matplotlib.pyplot as plt
import scipy.io as sio
#import h5py
#import pandas as pd
from datetime import datetime
# from Data_conv import Data
from Data0 import Data
from Plot_results_downlink import Plot
# from UNNdebug import UNN
from UNN_downlink import UNN
from Loss_downlink import Loss
import pickle
#------------------------------------------
# tf.keras.backend.set_floatx('float64')
#train_iterations = 100
batch_size =100
# train_per_database=100
# database_size=batch_size*train_per_database
EPOCHS =int(10e3)
Nuser = 30
Nap = 30
#Lambda=.001
#alpha=1
Id_save='2'
save_model=1
P_over_noise=120 # dB
cost_type='maxmin'
# cost_type = 'maxproduct'
# load = True # set it False for training
load = False
# -----------------------------------------
#
def train(obj,Dataobj,epochs,mode):
# TF board logs
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = './logs/' + current_time + '/train'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
best_test_rate = -float('inf')
best_W = None
LR=np.logspace(-3,-4.5, num=epochs)
G_batch,_=Dataobj(5*batch_size)
SNR = np.power(10,P_over_noise/10)*G_batch
#--------------Uncomment one of the following options
Xin = np.reshape(np.log(SNR),[SNR.shape[0],-1])
# Xin=tf.linalg.diag_part(SNR)
#-----------------------------------
obj.Xin_av=np.mean(Xin,axis=0)
obj.Xin_std=np.std(Xin,axis=0)
J_total =[]
min_SINR_total=[]
try:
for i in range(epochs):
LR_i=LR[i ]
optimizer = tf.keras.optimizers.Adam(LR_i)
G_batch,_=Dataobj(5*batch_size)
SNR=tf.pow(10.0,P_over_noise/10.0)*G_batch
# --------------Uncomment one of the following options
xin=tf.reshape(tf.math.log(SNR),[SNR.shape[0],-1])
# xin=np.log(np.diagonal(SNR,axis1=1,axis2=2))
xin=(xin-obj.Xin_av)/obj.Xin_std
J=[]
min_SINR_vec =[]
for j in range(5):
index = tf.random.uniform([batch_size],0,xin.shape[0],dtype=tf.dtypes.int32)
xin_j = tf.gather(xin,index,axis=0)
SNR_j = tf.gather(SNR,index,axis=0)
with tf.GradientTape() as tape:
# Forward pass.
cost, _, min_SINR = obj(xin_j, SNR_j)
# Get gradients of loss wrt the weights.
gradients = tape.gradient(cost, obj.trainable_weights)
# Gradient clipping
gradients, grad_norm = tf.clip_by_global_norm(gradients, 1.0)
# Update the weights of our linear layer.
# grad_check = [0]*len(c_gradients)
# for grad_i in range(len(c_gradients)):
# # try:
# grad_check = tf.debugging.check_numerics(c_gradients[grad_i],'UNN: Gradient error')
# # except:
# # pass
# with tf.control_dependencies([grad_check]):
optimizer.apply_gradients(zip(gradients, obj.trainable_weights))
J.append(cost.numpy())
min_SINR_vec.append(min_SINR.numpy())
J_total.append(cost.numpy())
min_SINR_total.append(min_SINR.numpy())
# print(i)
if i % 10 == 0:
# test_rate=cost.numpy()[0]
test_rate = np.mean(J)
# bit2r.LR=bit2r.LR*.85
# print('iter i=',i,'average cost is ', test_rate)
print('Iteration = ', i, 'Cost = ', np.mean(J), 'sir_min_av = ', np.mean(min_SINR_vec))
# if test_rate > best_test_rate:
best_test_rate = test_rate
best_W = obj.get_weights()
save_model(obj, 'models/' + mode + 'UNN_' + current_time + '.mod')
with train_summary_writer.as_default():
tf.summary.scalar('test rate', test_rate, step=i)
tf.summary.scalar('best test rate', best_test_rate, step=i)
except KeyboardInterrupt:
pass
obj.set_weights(best_W)
return J_total,min_SINR_total
def save_model(model, fn):
# W = model.get_weights()
W = [model.get_weights(), model.Xin_av, model.Xin_std]
with open(fn, 'wb') as f:
pickle.dump(W, f)
def load_model(model, fn):
with open(fn, 'rb') as f:
W = pickle.load(f)
model.set_weights(W[0])
model.Xin_av = W[1]
model.Xin_std = W[2]
# ---------------------------------------------
data = Data(Nuser)
G_batch, p_frac = data(2 * batch_size, .3)
# xin=np.reshape(G_batch,[batch_size,-1])
SNR = np.power(10, P_over_noise / 10) * G_batch
xin = np.reshape(np.log(SNR),[SNR.shape[0],-1])
# xin = np.log(np.diagonal(SNR, axis1=1, axis2=2))
# xin = tf.linalg.diag_part(SNR)
#
######
unn = UNN(Nap, Nuser, cost_type)
if load:
cost, SINR, _ = unn(xin, SNR)
current_dir = os.getcwd()
path = os.path.join(current_dir, '../Downlink/models_trained', 'maxminUNN.mod')
# load_model(unn, 'C:\\Users\\nikbakht\\OneDrive - Nokia\\UPF\\Codes\\UNN\\Cellular\\python\\lib\\models\\xUNN.mod')
load_model(unn, path)
# xin=(xin-unn.Xin_av)/unn.Xin_std
else:
J_train, min_SINR_train = train(unn, data, EPOCHS, cost_type)
# tensorboard --logdir ./logs --bind_all
xin = (xin - unn.Xin_av) / unn.Xin_std
cost, SINR, min_SINR = unn.Loss(SNR, unn.Network(xin))
print('Test cost is ', cost.numpy(), ' min_SINR is ', min_SINR.numpy())
RP = Plot()
SIR_NN_clip = RP.sinr_av(SNR, unn.Network(xin), Nap, Nuser)
SIR_NN = RP.sinr_av(SNR, unn.Network(xin), Nap, Nuser, 'Noclip')
SIR_frac = RP.sinr_av(SNR, p_frac, Nap, Nuser)
plot = Plot()
sir_vec = [SIR_NN.numpy(), SIR_frac.numpy()]
plot.cdfplot(sir_vec)
# ----------------------------------------
# unique_name=time.ctime(time.time())
# unique_name=unique_name[0:19]
if not load:
sio.savemat('SIR' + 'Uplink' + cost_type + '.mat',
{'SIR_NN': SIR_NN.numpy(), 'SIR_NN_clip': SIR_NN_clip.numpy(), 'SIR_frac': SIR_frac.numpy(),
'J_train': J_train, 'min_SINR_train': min_SINR_train, 'Nap': Nap, 'Nuser': Nuser}) |
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from hustlers.constants import REGULAR_HUSTLER_PERMISSIONS
from hustlers.utils.permission_utils import (
assign_hustler_admin_panel_access,
assign_hustler_permission_group,
)
class Hustler(models.Model):
"""
One to One with the Django User model
Each hustler can have interests in zero or more categories
"""
bio = models.TextField(max_length=500, blank=True)
# todo consider remove this primary key contraint for uniformity across model pks
django_user = models.OneToOneField(
to=User, primary_key=True, related_name="hustler", on_delete=models.PROTECT
)
interests = models.ManyToManyField(
to="knowledge.Category", blank=True, related_name="hustlers"
)
created_by = models.ForeignKey(
to="self", null=True, blank=True, on_delete=models.PROTECT
)
modified_by = models.ForeignKey(
to="self",
null=True,
blank=True,
on_delete=models.PROTECT,
related_name="hustlers_modified",
)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class Meta:
db_table = "hustlers"
@classmethod
def from_db(cls, db, field_names, values):
new = super(Hustler, cls).from_db(db, field_names, values)
# cache existing value
new._updated_django_user_id = values[field_names.index("django_user_id")]
return new
def save(self, *args, **kwargs):
hustler_created = True if self._state.adding else False
if hasattr(self, "_updated_django_user_id"):
if (
not hustler_created
and self._updated_django_user_id is not None
and self._updated_django_user_id != self.django_user_id
):
raise ValidationError("You cannot reassign Hustler to different User!")
super(Hustler, self).save(*args, **kwargs)
if hustler_created:
assign_hustler_admin_panel_access(
self, **REGULAR_HUSTLER_PERMISSIONS.get("admin_panel")
)
assign_hustler_permission_group(
hustler_object=self,
permission_groups=REGULAR_HUSTLER_PERMISSIONS.get("groups"),
)
@property
def username(self):
return "{0}".format(self.django_user.username)
@property
def first_name(self):
return "{0}".format(self.django_user.first_name)
@property
def last_name(self):
return "{0}".format(self.django_user.last_name)
@property
def full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
@property
def email(self):
return "{0}".format(self.django_user.email)
@property
def active(self):
return "{0}".format(self.django_user.is_active)
@property
def superuser_access(self):
return self.django_user.is_superuser
def __str__(self):
return "{0}".format(self.username)
# signals
@receiver(post_save, sender=User)
def save_hustler(sender, instance, **kwargs):
"""
Saving/Updating Hustler every time user object is changed for syncing updated_at
"""
if hasattr(instance, "hustler"):
instance.hustler.save()
|
"""
机器学习是关于学习数据集的一些属性并将其应用于新数据。
这就是为什么在机器的普遍做法学习评价的算法是手头上的数据分成两组,
一个是我们所说的训练集上,
我们了解到,我们称之为数据属性和一个测试集 上,我们测试这些属性。
scikit-learn提供了一些标准数据集,例如:
用于分类的 虹膜和数字数据集和波士顿房价回归数据集。
数据集是一个类似字典的对象,它保存有关数据的所有数据和一些元数据。该数据存储在.data成员中,它是一个数组。
在监督问题的情况下,一个或多个响应变量存储在成员中。
有关不同数据集的更多详细信息,请参见 : 数据集加载工具一节。
""" |
import numpy
import theano
import theano.tensor as tensor
from neural_srl.theano.util import floatX
def adadelta(parameters, gradients, rho=0.95, eps=1e-6):
""" Reference: ADADELTA: An Adaptive Learning Rate Method,
Zeiler 2012. https://arxiv.org/abs/1212.5701
Adapted from the Adadelta implementation from Tensorflow.
"""
accum = [theano.shared(numpy.zeros(p.get_value().shape, floatX)) for p in parameters]
accum_updates = [theano.shared(numpy.zeros(p.get_value().shape, floatX)) for p in parameters]
new_accum = [rho * g0 + (1.0 - rho) * (g**2) for g0, g in zip(accum, gradients)]
updates = [tensor.sqrt(d0 + eps) / tensor.sqrt(g0 + eps) * g for d0, g0, g in zip(accum_updates,
new_accum,
gradients)]
new_accum_updates = [rho * d0 + (1.0 - rho) * (d**2) for d0, d in zip(accum_updates,
updates)]
accum_ = list(zip(accum, new_accum))
accum_updates_ = list(zip(accum_updates, new_accum_updates) )
parameters_ = [ (p, (p - d)) for p,d in zip(parameters, updates)]
return accum_ + accum_updates_ + parameters_
def gradient_clipping(gradients, max_norm=5.0):
global_grad_norm = tensor.sqrt(sum(map(lambda x: tensor.sqr(x).sum(), gradients)))
multiplier = tensor.switch(global_grad_norm < max_norm, 1.0, max_norm / global_grad_norm)
return [g * multiplier for g in gradients]
|
# -*- coding: utf-8 -*-
'''
Often-needed functions when using binet
Copyright © 2013-2015 Thomas Unterthiner.
Licensed under GPL, version 2 or a later (see LICENSE.rst)
'''
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import sys
if sys.version_info < (3,):
range = xrange
import cPickle as pickle
else:
import pickle
import numpy as np
import time
import os
import gc
import logging
import warnings
import copy
# Importing matplotlib might fail under special conditions
# e.g. when using ssh w/o X11 forwarding
try:
import matplotlib.pyplot as plt
except ImportError:
warnings.warn("matplotlib unavailable")
def generate_slices(n, size, ignore_last_minibatch_if_smaller=False):
"""Generates slices of given size up to n"""
start, end = 0, 0
for pack_num in range(int(n / size)):
end = start + size
yield slice(start, end, None)
start = end
# last slice might not be a full batch
if not ignore_last_minibatch_if_smaller:
if end < n:
yield slice(end, n, None)
def plot_images(data, nrows, ncols, is_color=False, axis=None,
local_norm="maxabs", **kwargs):
''' Plots several images stored in the rows of data.'''
nchannels = 3 if is_color else 1
ppi = int(np.sqrt(data.shape[-1]/nchannels) + 2) # pixel per image +2 for borders
imgshape = (nrows*ppi, ncols*ppi, nchannels)
# make sure border is black
img = {"maxabs": lambda s: (data.min() / np.abs(data).max()) * np.ones(imgshape, dtype=data.dtype),
"minmax": lambda s: np.zeros(imgshape, dtype=data.dtype),
"none": lambda s: np.ones(imgshape, dtype=data.dtype)*data.min()
}[local_norm.lower()](None)
if len(data.shape) < 3:
data = data.reshape(data.shape[0], nchannels, ppi-2, ppi-2)
n = min(nrows*ncols, data.shape[0])
normfunc = {"maxabs": lambda d: d / np.abs(d).max(),
"minmax": lambda d: (d - d.min()) / d.ptp(), # normalize to [0, 1]
"none": lambda d: d
}[local_norm.lower()]
idx = 0
for r in range(nrows):
for c in range(ncols):
if idx >= n:
break
d = np.rollaxis(data[idx, ], 0, 3)
d = normfunc(d)
img[r*ppi+1:(r+1)*ppi-1, c*ppi+1:(c+1)*ppi-1] = d
idx += 1
if axis==None:
fig = plt.figure(facecolor="black", **kwargs)
fig.subplots_adjust(hspace=0, top=1, bottom=0, wspace=0, left=0, right=1)
axis = fig.gca()
else:
fig = None
if is_color:
axis.imshow(img, interpolation="none")
else:
axis.imshow(img.reshape(nrows*ppi, ncols*ppi), interpolation="none", cmap="Greys_r")
axis.axis("off")
return fig
def heuristic_svm_c(x):
''' Heuristic for setting the C for linear SVMS proposed by Thorsten Joachims.'''
c = 0
n = x.shape[0]
for i in range(n):
c += np.sqrt(x[i, ].dot(x[i, ]))
c /= n
return 1.0 / c
def plot_learning_curves(net, start_idx=5, end_idx=None,
min_error=np.log(np.finfo(np.float32).tiny),
*args, **kwargs):
if end_idx is None or end_idx > net.statistics.shape[0]:
end_idx = net.statistics.shape[0]
if end_idx - start_idx <= 0:
warnings.warn("Not enough data to plot learning curves")
return
data = net.statistics.ix[start_idx:end_idx]
fig = plt.figure(*args, **kwargs)
ax1 = plt.gca()
np.log10(data[["train_error", "val_error"]]).plot(ax=ax1, legend=False)
ax1.set_xlabel("epoch")
ax1.set_ylabel("Cross-Entropy Error (log10)")
ax2 = ax1.twinx()
colcyc = ax2._get_lines.color_cycle # we need to jump 2 colors
col = [next(colcyc), next(colcyc), next(colcyc)]
data[['val_score']].plot(ax=ax2, color=col[2], linestyle=":", legend=False)
ax2.set_ylabel("Validationset Accuracy", color=col[2])
# we need to draw the legend separately, otherwise each axis would create
# its own legend
handles, labels = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
handles += h2
labels += l2
fig.legend(handles, labels, loc="lower left", prop={'size':9})
fig.tight_layout
return fig
def train(net, dataset, fname=None, skip_output=25,
show_plots=False, use_gpu=True, **kwargs):
''' Trains a neural network on the given dataset.
If desired, the log-statements during training can be buffered into a
StringIO object. This has the drawback that the output is only visible
once the net has been fully trained, but it allows to only print only every
n-th message.
Parameters
----------
net: the neural net.
dataset: tuple containing 'trainx', 'trainy', 'validx', 'validy'
fname: file-name in which to store the (pickled) network after training.
The file will be stored in the 'data' subfolder of the CWD.
skip_output: how many lines of output to skip between two lines that
will actually be printed.
show_plots: If True, plot the first 256 weights of the lowest layer.
use_gpu: if True, use gnumpy to run the code on the GPU.
**kwargs: additional parameters for the `plotImages` cool when
`plot_weights=True`.
'''
from binet import op
if use_gpu:
gc.collect()
if not op._IS_CUDA_INITIALIZED:
logger = logging.getLogger(__name__)
logger.warn("CUDA not initialized, initializing GPU 0")
op.init_gpu(0)
X, y, Xvalid, yvalid = [op.to_gpu(d) for d in dataset]
net = op.to_gpu(net)
else:
X, y, Xvalid, yvalid = dataset
try:
init_out = net.transform(X)
init_err = net._get_loss(y, init_out)
net.track_progress(time.time(), init_err, X, y, Xvalid, yvalid)
net.fit(X, y, Xvalid, yvalid, skip_output=skip_output)
#if net.verbose and net.current_epoch % skip_output != 0: # make sure we show the last line
# net.track_progress(time.time(), -1, X, y, Xvalid, yvalid)
except KeyboardInterrupt:
print("Intercepted KeyboardInterrupt, stopping... current status:")
net.track_progress(time.time(), -1, X, y, Xvalid, yvalid)
net.statistics = net.statistics[:-1] # we just added an invalid point
finally:
net = op.to_cpu(net)
if fname:
if not os.path.exists("data"):
warnings.warn("creating 'data' directory to store pickled net")
os.mkdir("data")
with open(os.path.join("data", fname), "wb") as f:
pickle.dump(net, f, -1)
if show_plots:
plot_images(net.weights[0], 16, 16, **kwargs)
plot_learning_curves(net, **kwargs)
return net
def train_ensemble(prototype_net, dataset, outfile=None, n_nets=10, use_gpu=True):
''' Trains a given number of networks on a given dataset.
All networks will be clones of the given prototoype, and they will all
be pickled into the given outfile.'''
from binet import op
if use_gpu:
gc.collect()
if not op._IS_CUDA_INITIALIZED:
logger = logging.getLogger(__name__)
logger.warn("CUDA not initialized, initializing GPU 0")
op.init_gpu(0)
X, y, Xvalid, yvalid = [op.to_gpu(d) for d in dataset]
prototype_net = op.to_gpu(prototype_net)
else:
X, y, Xvalid, yvalid = dataset
if outfile is not None:
f = open(outfile, "wb")
nets = []
try:
for i in range(n_nets):
prototype_net.reset()
if use_gpu:
prototype_net = op.to_gpu(prototype_net)
prototype_net.fit(X, y, Xvalid, yvalid)
prototype_net = op.to_cpu(prototype_net)
nets.append(copy.deepcopy(prototype_net))
if outfile is not None:
pickle.dump(prototype_net, f, -1)
finally:
if outfile is not None:
f.close()
return nets
def load_ensemble(fn):
nets = []
with open(fn) as f:
try:
while f:
nets.append(pickle.load(f))
except EOFError:
return nets
def print_system_information(additional_modules=[]):
'''Prints general system information.
Prints host information as well as version information about some of the
more important packages. This is useful in IPython notebooks.'''
import sys, os, datetime, platform
host_info = (platform.node(), platform.platform())
print("Host: ", "%s: %s" % host_info)
print("Date: ", str(datetime.datetime.now()))
print("Python version: ", sys.version.replace("\n", "\n" + " "*21))
repo_version = str(os.popen("git log | head -1").readline().strip())
if not repo_version.startswith("fatal:"):
print("repository version: ", repo_version)
print("\nloaded modules:")
# make sure most important modules are here, even if we only imported
# some submodules
import binet, numpy, scipy
modlist = ['scipy', 'numpy', 'sklearn', 'IPython', 'matplotlib',
'binet', 'pandas', 'joblib']
modlist.extend(additional_modules)
mod = [sys.modules[m]for m in modlist if m in sys.modules]
mod.sort(key = lambda x: x.__name__)
for m in mod:
try:
print("\t", m.__name__, m.__version__)
except AttributeError:
pass
def get_timestamp(fmt='%Y%m%d_%H%M%S'):
'''Returns a string that contains the current date and time.'''
import datetime
now = datetime.datetime.now()
return datetime.datetime.strftime(now, fmt)
|
coords = []
while True:
try:
pair = list(map(int, input().split(",")))
coords.append(pair)
except:
break
xmin = min([pair[0] for pair in coords])
xmax = max([pair[0] for pair in coords])
ymin = min([pair[1] for pair in coords])
ymax = max([pair[1] for pair in coords])
xd = xmax - xmin + 3
yd = ymax - ymin + 3
board = [[(-1, xmax + ymax) for _ in range(xd)] for _ in range(yd)]
def update(i, a, b):
for xRel in range(xd):
for yRel in range(yd):
x = xmin - 1 + xRel
y = ymin - 1 + yRel
d = abs(a-x) + abs(b-y)
if d < board[yRel][xRel][1]:
board[yRel][xRel] = (i, d)
elif d == board[yRel][xRel][1]:
board[yRel][xRel] = (-1, d)
for (j, (x, y)) in enumerate(coords):
update(j, x, y)
infinite = set([i[0] for i in board[0]])
infinite = infinite.union(set([i[0] for i in board[yd-1]]))
infinite = infinite.union(set([i[0][0] for i in board]))
infinite = infinite.union(set([i[xd-1][0] for i in board]))
biggest = -1
pos = 0
for i in range(len(coords)):
area = sum([[pair[0] for pair in board[j]].count(i) for j in range(yd)])
if (area > biggest) and (i not in infinite):
biggest = area
pos = i
print("a: " + str(biggest) + " (" + str(pos) + ")")
# b) ---
upperB = 10000
count = 0
for xRel in range(xd):
for yRel in range(yd):
x = xmin - 1 + xRel
y = ymin - 1 + yRel
sumDist = sum([abs(x - pair[0]) + abs(y - pair[1]) for pair in coords])
if sumDist < upperB:
count += 1
print("b: " + str(count))
|
from z3 import *
npcs = []
biomes = []
class npc(object):
def __init__(self, name):
self.name = name
self.sells = True
self.guide = False
self._loves = []
self._likes = []
self._dislikes = []
self._hates = []
self.near = {}
npcs.append(self)
def loves(self, *loves):
self._loves = loves
def likes(self, *likes):
self._likes = likes
def dislikes(self, *dislikes):
self._dislikes = dislikes
def hates(self, *hates):
self._hates = hates
guide = npc("Guide")
merchant = npc("Merchant")
zoologist = npc("Zoologist")
golfer = npc("Golfer")
nurse = npc("Nurse")
tavernkeep = npc("Tavernkeep")
party_girl = npc("Party girl")
wizard = npc("Wizard")
demolitionist = npc("Demolitionist")
goblin_tinkerer = npc("Goblin tinkerer")
clothier = npc("Clothier")
dye_trader = npc("Dye trader")
arms_dealer = npc("Arms dealer")
steampunker = npc("Steampunker")
dryad = npc("Dryad")
painter = npc("Painter")
witch_doctor = npc("Witch doctor")
stylist = npc("Stylist")
angler = npc("Angler")
pirate = npc("Pirate")
mechanic = npc("Mechanic")
tax_collector = npc("Tax collector")
cyborg = npc("Cyborg")
#santa = npc("Santa claus")
truffle = npc("Truffle")
class biome(object):
def __init__(self, name):
self.name = name
biomes.append(self)
forest = biome("Forest")
hallow = biome("Hallow")
underground = biome("Underground")
desert = biome("Desert")
jungle = biome("Jungle")
ocean = biome("Ocean")
snow = biome("Snow")
mushroom = biome("Mushroom")
guide.likes(forest, clothier, zoologist)
guide.dislikes(ocean, steampunker)
guide.hates(painter)
guide.sells = False
guide.guide = True
merchant.likes(forest, golfer, nurse)
merchant.dislikes(desert, tax_collector)
merchant.hates(angler)
zoologist.loves(witch_doctor)
zoologist.likes(forest, golfer)
zoologist.dislikes(desert, angler)
zoologist.hates(arms_dealer)
golfer.loves(angler)
golfer.likes(forest, painter, zoologist)
golfer.dislikes(underground, pirate)
golfer.hates(merchant)
nurse.loves(arms_dealer)
nurse.likes(hallow, wizard)
nurse.dislikes(snow, dryad, party_girl)
nurse.hates(zoologist)
nurse.sells = False
tavernkeep.loves(demolitionist)
tavernkeep.likes(hallow, goblin_tinkerer)
tavernkeep.dislikes(snow, guide)
tavernkeep.hates(dye_trader)
party_girl.loves(wizard, zoologist)
party_girl.likes(hallow, stylist)
party_girl.dislikes(underground, merchant)
party_girl.hates(tax_collector)
wizard.loves(golfer)
wizard.likes(hallow, merchant)
wizard.dislikes(ocean, witch_doctor)
wizard.hates(cyborg)
demolitionist.loves(tavernkeep)
demolitionist.likes(underground, mechanic)
demolitionist.dislikes(ocean, arms_dealer, goblin_tinkerer)
goblin_tinkerer.loves(mechanic)
goblin_tinkerer.likes(underground, dye_trader)
goblin_tinkerer.dislikes(jungle, clothier)
goblin_tinkerer.hates(stylist)
clothier.loves(truffle)
clothier.likes(underground, tax_collector)
clothier.dislikes(hallow, nurse)
clothier.hates(mechanic)
dye_trader.likes(desert, arms_dealer, painter)
dye_trader.dislikes(forest, steampunker)
dye_trader.hates(pirate)
arms_dealer.loves(nurse)
arms_dealer.likes(desert, steampunker)
arms_dealer.dislikes(snow, golfer)
arms_dealer.hates(demolitionist)
steampunker.loves(cyborg)
steampunker.likes(desert, painter)
steampunker.dislikes(jungle, dryad, wizard, party_girl)
dryad.likes(jungle, witch_doctor, truffle)
dryad.dislikes(desert, angler)
dryad.hates(golfer)
painter.loves(dryad)
painter.likes(jungle, party_girl)
painter.dislikes(forest, truffle, cyborg)
witch_doctor.likes(jungle, dryad, guide)
witch_doctor.dislikes(hallow, nurse)
witch_doctor.hates(truffle)
stylist.loves(dye_trader)
stylist.likes(ocean, pirate)
stylist.dislikes(snow, tavernkeep)
stylist.hates(goblin_tinkerer)
angler.likes(ocean, demolitionist, party_girl, tax_collector)
angler.hates(tavernkeep)
angler.sells = False
pirate.loves(angler)
pirate.likes(ocean, tavernkeep)
pirate.dislikes(underground, stylist)
pirate.hates(guide)
mechanic.loves(goblin_tinkerer)
mechanic.likes(snow, cyborg)
mechanic.dislikes(underground, arms_dealer)
mechanic.hates(clothier)
tax_collector.loves(merchant)
tax_collector.likes(snow, party_girl)
tax_collector.dislikes(hallow, demolitionist, mechanic)
#tax_collector.hates(santa)
tax_collector.sells = False
cyborg.likes(snow, steampunker, pirate, stylist)
cyborg.dislikes(jungle, zoologist)
cyborg.hates(wizard)
#santa.loves(snow)
#santa.hates(desert, tax_collector)
truffle.loves(guide)
truffle.likes(dryad)
truffle.dislikes(clothier)
truffle.hates(witch_doctor)
NPC = Datatype("NPC")
for n in npcs:
NPC.declare(n.name)
NPC = NPC.create()
for n in npcs:
n.ctr = getattr(NPC, n.name)
Biome = Datatype("Biome")
for b in biomes:
Biome.declare(b.name)
Biome = Biome.create()
for b in biomes:
b.ctr = getattr(Biome, b.name)
for n in npcs:
n.biome = Const(n.name + "_biome", Biome)
n.near = {}
for i in range(len(npcs)):
for j in range(i+1, len(npcs)):
near = Bool("near_" + npcs[i].name + "_" + npcs[j].name)
npcs[i].near[npcs[j].name] = near
npcs[j].near[npcs[i].name] = near
r = RealVal
def modifier(l, n, mod, result):
if isinstance(l, biome):
return result + If(l.ctr == n.biome, mod, 0)
elif isinstance(l, npc):
return result + If(n.near[l.name], mod, 0)
else:
raise
def happiness(npc):
result = 95
for l in npc._loves:
result = modifier(l, npc, -12, result)
for l in npc._likes:
result = modifier(l, npc, -6, result)
for l in npc._dislikes:
result = modifier(l, npc, 6, result)
for l in npc._hates:
result = modifier(l, npc, 12, result)
return If(result >= 150, 75,
If(result <= 75, 0,
result - 75))
def sells_pylon(npc):
if not npc.sells:
return False
return happiness(npc) <= 10
def biome_sells_pylon(biome):
accum = False
for n in npcs:
accum = Or(accum, And(sells_pylon(n), n.biome == biome.ctr))
return accum
total = 0
for n in npcs:
if not n.guide:
n.happiness = happiness(n)
total += n.happiness
o = Optimize()
o.add(truffle.biome == mushroom.ctr)
o.add(goblin_tinkerer.happiness <= 0)
o.add(tax_collector.happiness <= 2)
o.add(angler.happiness <= 2)
for b in biomes:
#o.add(biome_sells_pylon(b))
nbiome = 0
for n in npcs:
nbiome += If(n.biome == b.ctr, 1, 0)
o.add(nbiome <= 4)
for n in npcs:
nnear = 0
for n2 in npcs:
if n.name != n2.name:
o.add(Implies(n.near[n2.name], n.biome == n2.biome))
for n3 in npcs:
if n3.name != n2.name and n3.name != n.name:
o.add(Implies(And(n.near[n2.name], n2.near[n3.name]), n.near[n3.name]))
nnear += If(n.near[n2.name], 1, 0)
o.add(nnear < 3)
o.minimize(total)
print(o.check())
m = o.model()
print(m.eval(total))
for n in npcs:
print(n.name, ":", m[n.biome], "=", str(m.eval(happiness(n)).as_long() + 75) + "%", end=' ')
near=False
for n2 in npcs:
if n.name != n2.name:
if m[n.near[n2.name]]:
near=True
if near:
print("near", end=' ')
for n2 in npcs:
if n.name != n2.name:
if m[n.near[n2.name]]:
print(n2.name, end=' ')
print()
|
from django.shortcuts import render
from .models import Post
# Create your views here.
|
import sys
sys.path.insert(0, "multidoc")
from multidoc.generate import generate_pybind_documented, generate_cpp_documented
if __name__ == "__main__":
generate_pybind_documented(api_prefix="docstrings", target_src="../tudatpy")
generate_cpp_documented(api_prefix="docstrings", target_src="../tudat")
|
#!/usr/bin/env python
# coding: utf-8
'''
Usage:
process_task.py <config_file>
process_task.py (--help|--version)
Arguments:
config_file the path of config_file
Options:
-h --help show this help message and exit
-v --version show version and exit
'''
import time
import sys
import os
import json
import shutil
import docopt
from schema import Schema, SchemaError
import traceback
import dbpc
import rating_global_vars as gv
from task_rating import Worker
from kombu import Connection
from rating_util import *
from logging.handlers import SysLogHandler
import MySQLdb
import statsd
import random
def get_conf_abspath(args):
os.chdir(gv.run_dir)
conf_path = args['<config_file>']
conf_abs_path = os.path.abspath(conf_path)
os.chdir(gv.bin_dir)
return conf_abs_path
def check_conf_validation(cf):
try:
Schema(lambda x: os.path.exists(x),
error='config file should exists').validate(cf)
except SchemaError as e:
exit(e)
def parse_conf_file(cfg_file):
with open(cfg_file) as f:
return json.load(f)
def get_global_vars(cfg):
cas_cfg = cfg['casmq']
gv.cas_url = cas_cfg['url']
gv.cas_queue = cas_cfg['queue']
gv.cas_exchange = cas_cfg['exchange']
gv.cas_routing_key = cas_cfg['routing_key']
#gv.priority = cas_cfg['priority']
dbpc_cfg = cfg['dbpc']
gv.dbpc_host = dbpc_cfg['host']
gv.dbpc_port = dbpc_cfg['port']
gv.dppc_service = dbpc_cfg['service']
'''
gv.component = dbpc_cfg['component']
'''
gv.interval = dbpc_cfg['interval']
#gv.try_times_limit = dbpc_cfg['try_times_limit']
gv.dp = dbpc.dbpc(gv.dbpc_host,
int(gv.dbpc_port),
gv.dppc_service,
"query_broker.qb_rating",
int(gv.interval))
'''
swift_cfg = cfg['swift']
gv.st_auth = swift_cfg['ST_AUTH']
gv.st_user = swift_cfg['ST_USER']
gv.st_key = swift_cfg['ST_KEY']
'''
taskpriorit_cfg = cfg['taskprioritymq']
gv.taskpriorit_url = taskpriorit_cfg['url']
gv.taskpriorit_queue = taskpriorit_cfg['queue']
gv.taskpriorit_exchange = taskpriorit_cfg['exchange']
gv.taskpriorit_routing_key = taskpriorit_cfg['routing_key']
gv.databases = cfg['mysql']
gv.file_ext_list = cfg['filter']['file_ext']
gv.min_file_size = cfg['filter']['minfilesize']
gv.max_file_size = cfg['filter']['maxfilesize']
gv.suspicious_mime_types = cfg['filter']['suspicious_mime_types']
statsd_cfg = cfg['statsdserver']
gv.statsdhost = statsd_cfg['host']
gv.statsdport = statsd_cfg['port']
gv.score = cfg['filter']['score']
gv.video_rating_url = cfg['video_rating']
def init_statsd():
gv.statsd_conn = statsd.client.StatsClient(
host=gv.statsdhost, port=gv.statsdport)
def init_logger(cf):
#log_level_map = {'ERROR': 40, 'WARN': 30, 'INFO': 20, 'DEBUG': 10}
#module = cf['module']
log_level = cf['log']['level']
if cf['log'].has_key('logfile'):
gv.log_file = cf['log']['logfile']
g_logger.init_logger(
"query_broker#qb_rating", log_level, gv.log_file, SysLogHandler.LOG_LOCAL2)
g_logger_info.init_logger(
"query_broker#qb_rating", log_level, gv.log_file, SysLogHandler.LOG_LOCAL1)
else:
g_logger.init_logger(
"query_broker#qb_rating", log_level, 'syslog', SysLogHandler.LOG_LOCAL2)
g_logger_info.init_logger(
"query_broker#qb_rating", log_level, 'syslog', SysLogHandler.LOG_LOCAL1)
def main():
args = docopt.docopt(__doc__, version=gv.version)
cfg_file = get_conf_abspath(args)
check_conf_validation(cfg_file)
cfg = parse_conf_file(cfg_file)
init_logger(cfg)
get_global_vars(cfg)
init_statsd()
gv.dp.start()
while True:
with Connection(gv.taskpriorit_url) as conn:
try:
worker = Worker(
conn, gv.taskpriorit_exchange, gv.taskpriorit_queue, gv.taskpriorit_routing_key)
g_logger.info(trans2json('task priority escalator start'))
worker.run()
except Exception:
g_logger.error(
trans2json("task priority escalator %s happend!" % str(traceback.format_exc())))
gv.dp.join()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-13 00:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20171012_1411'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='phone',
field=models.CharField(blank=True, max_length=15, null=True, verbose_name='Telefone'),
),
]
|
VERSION = (0, 10, 0, 'alpha', 1)
|
from setuptools import setup
import httpie_jwt_auth
setup(
name='httpie-jwt-auth',
description='JWTAuth plugin for HTTPie.',
long_description=open('README.rst').read().strip(),
version=httpie_jwt_auth.__version__,
author=httpie_jwt_auth.__author__,
author_email='[email protected]',
license=httpie_jwt_auth.__license__,
url='https://github.com/teracyhq/httpie-jwt-auth',
download_url='https://github.com/teracyhq/httpie-jwt-auth',
py_modules=['httpie_jwt_auth'],
zip_safe=False,
entry_points={
'httpie.plugins.auth.v1': [
'httpie_jwt_auth = httpie_jwt_auth:JWTAuthPlugin'
]
},
install_requires=[
'httpie>=1.0.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Developers',
'Environment :: Plugins',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Utilities'
],
)
|
import cv2
import numpy as np
''' we can use OpenCV functions to draw different Shaps
like line, rectangle, circle etc
'''
# Create an image
# here i have created an image of 400*400 and having
# 3 channels and in OpenCV datatype = uint8
img = np.zeros((400, 400, 3), dtype = 'uint8')
a = img.copy()
line = cv2.line(a, (50,50), (350,350), (0, 0, 255), 3)
line_ = cv2.line(a, (50,350), (350,50), (0, 0, 255), 3)
''' here first argument is img itself then starting x,y ending x,y
after that color and finally thickness
'''
cv2.imshow('line', line)
# Drawing rectangle
b = img.copy()
rectangle = cv2.rectangle(b,(50, 50), (350, 350), (0, 0, 255), 3)
cv2.imshow('rectangle', rectangle)
# Drawing cirlce
c = img.copy()
# calculating the center of img
(x, y) = (int(img.shape[1]/2), int(img.shape[0]/2))
circle = cv2.circle(c, (x,y), (120), (0, 0, 255), 3)
cv2.imshow('circle', circle)
# a single image having line rectangle and circle
d = img.copy()
line_1 = cv2.line(d, (50,50), (350,350), (0, 0, 255), 3)
line_ = cv2.line(d, (50,350), (350,50), (0, 0, 255), 3)
rectangle_1 = cv2.rectangle(d,(50, 50), (350, 350), (0, 0, 255), 3)
circle_ = cv2.circle(d, (x,y), (60), (0, 0, 255), 3)
circle_1 = cv2.circle(d, (x,y), (120), (0, 0, 255), 3)
cv2.imshow('combined', circle_1)
cv2.waitKey(0)
cv2.destroyAllWindows() |
from django.conf.urls import url
from demo.views import (Home, QuestionDetail)
urlpatterns = [
url(r'^question/(?P<pk>[0-9]+)$', QuestionDetail.as_view(), name="questiondetail"),
url(r'^$', Home.as_view(), name="home"),
]
from django.conf.urls.static import static, settings
urlpatterns = urlpatterns + [
# ... the rest of your URLconf goes here ...
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
from machine import Pin
from time import ticks_us
from time import ticks_diff
IR_PIN = 14
timeStart = -1
data = 0
dataBitIndex = 0
irQueue = [ ]
pin = None
pinIR = None
lastData = 0
def onIRPinChange(p):
global timeStart, data, dataBitIndex, irQueue, lastData
if p.value() == 1: # 1
timeStart = ticks_us()
else: # 0
t = ticks_diff(ticks_us(), timeStart)
# print(t)
if t > 4000: # start signal
data = 0
dataBitIndex = 0
elif t > 2000 and t < 4000 and dataBitIndex == 0:
# print("RE")
irQueue.append(lastData)
else:
if dataBitIndex < 32:
data |= (1 if t > 1000 else 0) << (dataBitIndex)
dataBitIndex = dataBitIndex + 1
if dataBitIndex == 32:
addr = data & 0xFF
iaddr = (data >> 8) & 0xFF
cmd = (data >> 16) & 0xFF
icmd = (data >> 24) & 0xFF
# print(hex(data))
if addr == (iaddr ^ 0xFF) and cmd == (icmd ^ 0xFF):
# print("OK")
irQueue.append(cmd)
lastData = cmd
else:
print("ERROR")
lastData = 0
dataBitIndex = -1
pinIR = Pin(IR_PIN, Pin.IN, Pin.PULL_UP)
pinIR.irq(onIRPinChange, Pin.IRQ_FALLING|Pin.IRQ_RISING)
def read():
global irQueue
if len(irQueue):
data = irQueue[0]
irQueue = irQueue[1:]
return data
else:
return 0
|
import requests
from bs4 import BeautifulSoup
def fetch_reddit_posts():
news = []
community = []
media = []
art = []
funny = []
gaming = []
# def get_stories_from_url(url):
headers = { 'user-agent': 'the front page/0.0.1' }
url = 'https://www.reddit.com/r/news+worldnews+upliftingnews+funny+programmerhumor+jokes+blackpeopletwitter+gaming+leagueoflegends+hearthstone+askreddit+iama+tifu+videos+gifs+pics+movies+art+music+listentothis/.json?limit=75'
#
# if subreddit_name == None:
# r = requests.get(url, headers)
# else:
# r = requests.get('https://www.reddit.com/r/'+subreddit_name+'.json', headers)
r = requests.get(url, headers)
result = r.json()
stories = result['data']['children']
for story in stories:
story_data = story['data']
story_subreddit = story_data.get('subreddit', '').encode('utf-8').lower()
if story_subreddit == 'news' or story_subreddit == 'upliftingnews' or story_subreddit == 'worldnews':
news.append(story_data)
if story_subreddit == 'askreddit' or story_subreddit == 'iama' or story_subreddit == 'tifu':
community.append(story_data)
if story_subreddit == 'videos' or story_subreddit == 'pics' or story_subreddit == 'gifs':
media.append(story_data)
if story_subreddit == 'movies' or story_subreddit == 'art' or story_subreddit == 'music' or story_subreddit == 'listentothis':
art.append(story_data)
if story_subreddit == 'funny' or story_subreddit == 'programmerhumor' or story_subreddit == 'jokes' or story_subreddit == 'blackpeopletwitter':
funny.append(story_data)
if story_subreddit == 'gaming' or story_subreddit == 'leagueoflegends' or story_subreddit == 'hearthstone':
gaming.append(story_data)
return {'news':news, 'community':community, 'media':media, 'art':art, 'funny':funny, 'gaming':gaming}
# count = 25
#
|
#!/usr/bin/env python
#
# portable serial port access with python
# this is a wrapper module for different platform implementations
#
# (C) 2017-2017 Kenneth Ceyer <[email protected]>
# this is distributed under
# Apache 2.0 <https://www.apache.org/licenses/LICENSE-2.0>
|
import argparse
import socket
import datetime
import yaml
import torch
import numpy as np
import time
import random
import math
import os
import getpass
import glob
from functools import reduce
import operator
def pad_with_last_col(matrix, cols):
out = [matrix]
pad = [matrix[:, [-1]]] * (cols - matrix.size(1))
out.extend(pad)
return torch.cat(out, dim=1)
def pad_with_last_val(vect, k):
device = "cuda" if vect.is_cuda else "cpu"
pad = torch.ones(k - vect.size(0), dtype=torch.long, device=device) * vect[-1]
vect = torch.cat([vect, pad])
return vect
def sparse_prepare_tensor(tensor, torch_size, ignore_batch_dim=True):
if ignore_batch_dim:
tensor = sp_ignore_batch_dim(tensor)
tensor = make_sparse_tensor(tensor, tensor_type="float", torch_size=torch_size)
return tensor
def sp_ignore_batch_dim(tensor_dict):
tensor_dict["idx"] = tensor_dict["idx"][0]
tensor_dict["vals"] = tensor_dict["vals"][0]
return tensor_dict
def sort_by_time(data, time_col):
_, sort = torch.sort(data[:, time_col])
data = data[sort]
return data
def print_sp_tensor(sp_tensor, size):
print(
torch.sparse.FloatTensor(
sp_tensor["idx"].t(), sp_tensor["vals"], torch.Size([size, size])
).to_dense()
)
def reset_param(t):
stdv = 2.0 / math.sqrt(t.size(0))
t.data.uniform_(-stdv, stdv)
# Takes an edge list and turns it into an adjacency matrix
def make_sparse_tensor(adj, tensor_type, torch_size):
if len(torch_size) == 2:
tensor_size = torch.Size(torch_size)
elif len(torch_size) == 1:
tensor_size = torch.Size(torch_size * 2)
if tensor_type == "float":
return torch.sparse.FloatTensor(
adj["idx"].t(), adj["vals"].type(torch.float), tensor_size
)
elif tensor_type == "long":
return torch.sparse.LongTensor(
adj["idx"].t(), adj["vals"].type(torch.long), tensor_size
)
else:
raise NotImplementedError("only make floats or long sparse tensors")
def sp_to_dict(sp_tensor):
return {"idx": sp_tensor._indices().t(), "vals": sp_tensor._values()}
class Namespace(object):
"""
helps referencing object in a dictionary as dict.key instead of dict['key']
"""
def __init__(self, adict):
self.__dict__.update(adict)
def random_param_value(param, param_min, param_max, type="int"):
if str(param) is None or str(param).lower() == "none":
if type == "int":
return random.randrange(param_min, param_max + 1)
elif type == "logscale":
interval = np.logspace(np.log10(param_min), np.log10(param_max), num=100)
return np.random.choice(interval, 1)[0]
else:
return random.uniform(param_min, param_max)
else:
return param
def load_data(file):
with open(file) as file:
file = file.read().splitlines()
data = torch.tensor([[float(r) for r in row.split(",")] for row in file[1:]])
return data
def load_data_from_tar(
file,
tar_archive,
replace_unknow=False,
starting_line=1,
sep=",",
type_fn=float,
tensor_const=torch.DoubleTensor,
):
f = tar_archive.extractfile(file)
lines = f.read()
lines = lines.decode("utf-8")
if replace_unknow:
lines = lines.replace("unknow", "-1")
lines = lines.replace("-1n", "-1")
lines = lines.splitlines()
data = [[type_fn(r) for r in row.split(sep)] for row in lines[starting_line:]]
data = tensor_const(data)
return data
def create_parser():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--config_file",
default="experiments/parameters_example.yaml",
type=argparse.FileType(mode="r"),
help="optional, yaml file containing parameters to be used, overrides command line parameters",
)
parser.add_argument(
"--one_cell",
action="store_true",
help="optional, indicate whether to search just one grid cell in the grid search or all",
)
# parser.add_argument('--ncores', type=int, help='optional, indicate how many threads pytorch should spawn, note that dataloader has a num workers and joblib also spawn parallel processes')
return parser
def parse_args(parser):
args = parser.parse_args()
if args.config_file:
data = yaml.load(args.config_file, Loader=yaml.FullLoader)
# delattr(args, 'config_file')
arg_dict = args.__dict__
for key, value in data.items():
arg_dict[key] = value
arg_dict["config_file"] = arg_dict["config_file"].name
return arg_dict
def read_master_args(yaml_file):
try:
with open(yaml_file) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
arg_dict = {}
for key, value in data.items():
arg_dict[key] = value
return arg_dict
except FileNotFoundError as e:
raise type(e)(str(e) + " Master file not found in config folder")
def get_log_folder(args):
if hasattr(args, "log_folder"):
root_log_folder = args.log_folder
else:
root_log_folder = "log"
log_folder = root_log_folder + "/" + args.data + "-" + args.model + "/"
os.makedirs(log_folder, exist_ok=True)
return log_folder
def get_log_name(args, classifier_name="decoder", train_encoder=True):
log_folder = get_log_folder(args)
hostname = socket.gethostname()
if args.temporal_granularity == "continuous" and train_encoder == True:
gridcell = str(
args.learning_rate
) # Only learning rate is significant for this log, also used to distinguish between encoder and decoder logs
else:
gridcell = get_gridcell(args)
currdate = str(datetime.datetime.today().strftime("%Y%m%d%H%M%S"))
log_name = (
log_folder
+ "log_"
+ args.data
+ "_"
+ args.task
+ "_"
+ currdate
+ "_"
+ args.model
+ "_"
+ classifier_name
+ "_r"
+ str(args.rank)
+ "_"
+ hostname
+ "__grid_"
+ gridcell
+ ".log"
)
return log_name
def get_experiment_notification(args):
hostname = socket.gethostname()
username = getpass.getuser()
def arg2str(arg): # If list to string
if type(arg) is type([]):
return "&".join(arg)
else:
return arg
return (
arg2str(args.data) + "_" + arg2str(args.model) + "_" + hostname + "_" + username
)
def get_gridcell(args):
grid = args.grid.items()
if len(grid) <= 0:
grid_str = "nogrid"
else:
grid_str = "_".join(
["{}:{}".format(key, value) for key, value in args.grid.items()]
)
return grid_str
# Returns bool whether to skip a grid cell or not
def skip_cell(args):
log_folder = get_log_folder(args)
gridcell = get_gridcell(args)
for filename in glob.glob(log_folder + "*"):
if gridcell in filename:
return args.skip_computed_grid_cells == True
def add_log_lock(args):
if args.use_logfile:
open(get_log_folder(args) + "/" + get_gridcell(args) + "_lock.log", "a").close()
def remove_log_lock(args):
gridcell = get_gridcell(args)
for filename in glob.glob(get_log_folder(args) + "*"):
if gridcell in filename and "_lock" in filename:
os.remove(filename)
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def get_initial_features_continuous(
args, gcn_args, dataset, force_random_edge_features
):
## TGAT requires that node and edge features are the same size.
# if args.random_feats == True:
# num_feats = gcn_args.layer_2_feats
# edge_features = np.random.rand(dataset.num_edges, num_feats)
# node_features = np.zeros((dataset.num_nodes, edge_features.shape[1]))
# elif type(dataset.edge_features) == type(None):
# # Edge features don't exist, make random edge features, size defined by node features
# start_idx = tasker.data.min_time + args.num_hist_steps
# # Get initial node features
# s = tasker.get_sample(start_idx, partition='TRAIN', test = False, snapshot_based=False, split_start=tasker.data.min_time)
# assert(len(s['hist_ndFeats']) == 1)
# node_features = s['hist_ndFeats'][0]
# node_features = make_sparse_tensor(node_features, tensor_type='float',
# torch_size=[dataset.num_nodes, tasker.feats_per_node]).to_dense().cpu().numpy()
# # Random init of node features. The same number as the output layer should have.. no change of features through the model.
# gcn_args.layer_2_feats = tasker.feats_per_node
# #features_per_node = gcn_args.layer_2_feats
# #node_features = np.random.rand(dataset.num_nodes, features_per_node)
# # Random initiation of (all) edge features
# # Use same dimensions as node features, if not, the attention model breaks...
# features_per_edge = node_features.shape[1]
# num_edges = len(dataset.edges['vals'])
# edge_features = np.random.rand(num_edges, features_per_edge)
# else:
# Edge features exist, make zero node features, size defined by edge features
if type(dataset.edge_features) == type(None) and not force_random_edge_features:
num_feats = gcn_args.layer_2_feats
features_per_edge = num_feats
num_edges = len(dataset.edges["vals"])
edge_features = np.random.rand(num_edges, features_per_edge)
else:
edge_features = dataset.edge_features
node_features = np.zeros((dataset.num_nodes, edge_features.shape[1]))
return edge_features, node_features
|
# encoding=utf-8
# Project: transfer_cws
# Author: xingjunjie
# Create Time: 07/11/2017 2:35 PM on PyCharm
import argparse
from data_utils import load_pre_train, load_vocab, get_processing, Dataset, EvaluateSet
import tensorflow as tf
import os
import pickle
import json
from utils import get_logger
def train_pos(args):
src_embedding = None
target_embedding = None
logger = get_logger(args.log)
logger.info('Model Type: {}'.format(args.type))
if os.path.exists(args.config) and (not args.config == 'debug.json'):
logger.info('Loading config from {}'.format(args.config))
config = json.load(open(args.config, 'r'))
try:
vocab_word = pickle.load(open(config['word'], 'rb'))
vocab_tag = pickle.load(open(config['tag'], 'rb'))
target_vocab_word = pickle.load(open(config['target_word'], 'rb'))
assert len(vocab_word) == config['nword']
assert len(vocab_tag) == config['ntag']
assert len(target_vocab_word) == config['ntarword']
if args.use_pretrain_src:
_, src_embedding = load_pre_train(args.src_embedding)
if args.use_pretrain_target:
_, target_embedding = load_pre_train(args.target_embedding)
except Exception as e:
logger.error(e)
exit(0)
else:
if args.use_pretrain_src:
pre_dictionary, src_embedding = load_pre_train(args.src_embedding)
vocab_word, vocab_tag = load_vocab(args.train_file, pre_dictionary)
else:
vocab_word, vocab_tag = load_vocab(args.train_file)
if args.use_pretrain_target:
pre_dictionary, target_embedding = load_pre_train(args.target_embedding)
target_vocab_word, _ = load_vocab(args.train_file, pre_dictionary)
else:
target_vocab_word, _ = load_vocab(args.target_train_file)
i = 0
while os.path.exists('./.cache/vocab_{}.pickle'.format(str(i))) or os.path.exists(
'./.cache/tag_{}.pickle'.format(str(i))):
i += 1
if not os.path.exists('./.cache'):
os.makedirs('./.cache')
with open('./.cache/vocab_{}.pickle'.format(str(i)), 'wb') as vocab, open(
'./.cache/tag_{}.pickle'.format(str(i)), 'wb') as tag, open(
'./.cache/target_vocab_{}.pickle'.format(str(i)), 'wb') as tar_vocab:
pickle.dump(vocab_word, vocab)
pickle.dump(vocab_tag, tag)
pickle.dump(target_vocab_word, tar_vocab)
with open(args.config, 'w+') as config:
json.dump({
'word': './.cache/vocab_{}.pickle'.format(str(i)),
'tag': './.cache/tag_{}.pickle'.format(str(i)),
'target_word': './.cache/target_vocab_{}.pickle'.format(str(i)),
'nword': len(vocab_word),
'ntag': len(vocab_tag),
'ntarword': len(target_vocab_word)
}, config, indent='\t')
nword = len(vocab_word)
ntag = len(vocab_tag)
ntarword = len(target_vocab_word)
logger.info("Src: {} {}".format(nword, ntag))
logger.info("Target: {}".format(ntarword))
logger.info("Flag: {}".format(args.flag))
logger.info("Src embed trainable: {}".format(not args.disable_src_embed_training))
logger.info("\ntrain:{}\ndev :{}\ntest :{}\n\n".format(args.train_file, args.dev_file, args.test_file))
logger.info("\nTarget: \ntrain:{}\ndev :{}\ntest :{}\n".format(args.target_train_file, args.target_dev_file,
args.target_test_file))
logger.info("MSG: {}\n".format(args.msg))
logger.info("lr_ratio: {}\n".format(str(args.lr_ratio)))
logger.info("penalty_ratio: {}\n".format(str(args.penalty_ratio)))
logger.info("penalty: {}\n".format(str(args.penalty)))
processing_word = get_processing(vocab_word)
processing_tag = get_processing(vocab_tag)
processing_target_word = get_processing(target_vocab_word)
src_train = Dataset(args.train_file, processing_word, processing_tag, None)
src_dev = Dataset(args.dev_file, processing_word, processing_tag, None)
src_test = Dataset(args.test_file, processing_word, processing_tag, None)
target_train = Dataset(args.target_train_file, processing_target_word, processing_tag)
target_dev = Dataset(args.target_dev_file, processing_target_word, processing_tag)
target_test = Dataset(args.target_test_file, processing_target_word, processing_tag)
src_len = len(src_train)
target_len = len(target_train)
ratio = target_len / (src_len + target_len)
logger.info("\nsrc: {}\ntarget: {}\n".format(src_len, target_len))
# ratio = 0.1 if ratio < 0.1 else ratio
target_batch_size = int(ratio * args.batch_size)
target_batch_size = 1 if target_batch_size < 1 else target_batch_size
src_batch_size = args.batch_size - target_batch_size
logger.info("\nsrc_batch_size: {}\ntarget_batch_size: {}".format(src_batch_size, target_batch_size))
assert target_batch_size >= 0
model = Model(args, ntag, nword, ntarwords=ntarword, src_embedding=src_embedding, target_embedding=target_embedding,
logger=logger, src_batch_size=src_batch_size)
model.build()
try:
print("========If !!! it's debugging!==========")
print(args.debug)
if args.debug:
print("========it's debugging!==========")
model.train(src_dev, src_dev, vocab_tag, target_dev, target_dev, target_test, src_batch_size, target_batch_size)
else:
# model.train(src_train, src_dev, vocab_tag, target_train, target_dev, src_batch_size, target_batch_size)
model.train(src_train, src_dev, vocab_tag, target_train, target_dev, target_test, src_batch_size, target_batch_size)
except KeyboardInterrupt:
model.evaluate(target_dev, vocab_tag, target='target')
def predict(args):
config = json.load(open(args.config, 'r'))
try:
vocab_word = pickle.load(open(config['word'], 'rb'))
vocab_tag = pickle.load(open(config['tag'], 'rb'))
target_vocab_word = pickle.load(open(config['target_word'], 'rb'))
assert len(vocab_word) == config['nword']
assert len(vocab_tag) == config['ntag']
assert len(target_vocab_word) == config['ntarword']
except Exception as e:
print(e)
exit(0)
id_to_word = {value: key for key, value in vocab_word.items()}
id_to_tag = {value: key for key, value in vocab_tag.items()}
processing_word = get_processing(vocab_word)
predict = EvaluateSet(args.predict_file, processing_word)
model = Model(args, len(vocab_tag), len(vocab_word))
model.build()
saver = tf.train.Saver()
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.gpu_options.per_process_gpu_memory_fraction = model.args.gpu_frac
with tf.Session(config=tf_config) as sess:
saver.restore(sess, model.args.model_input)
model.predict(sess, predict, id_to_tag, id_to_word)
print('result saved in {}'.format(args.predict_out))
def main(args):
if args.func == 'train':
train_pos(args)
elif args.func == 'predict':
predict(args)
if __name__ == '__main__':
"""
Functions
"""
parser = argparse.ArgumentParser()
parser.add_argument('func', type=str, choices=['train', 'predict'], help='Function to run.')
"""
Several paths
"""
parser.add_argument('--log', type=str, default="./debug.log", help="path to log file")
parser.add_argument('--src_embedding', type=str, help="Path to pretrained embedding.")
parser.add_argument('--target_embedding', type=str, help="Path to pretrained embedding.")
"""
Model type
"""
parser.add_argument('-t', '--type', type=str, default='1', choices=['1', '2', '3'], help="Model type")
"""
Shared Hyper parameters
"""
# parser.add_argument('--batch_size', type=int, default=20, help="Training batch size")
parser.add_argument('--batch_size', type=int, default=50, help="Training batch size")
# parser.add_argument('--epoch', type=int, default=100, help="Training epoch")
parser.add_argument('--epoch', type=int, default=1, help="Training epoch")
parser.add_argument('--optim', type=str, default='Adam', help="optimizer, SGD or Adam")
parser.add_argument('--learning_rate', type=float, default=0.01, help="Learning rate")
parser.add_argument('--lr_decay', type=float, default=0.99, help="Learning rate decay rate")
parser.add_argument('--embedding_size', type=int, default=50,
help="Embedding size")
"""
training
"""
parser.add_argument('--lstm_hidden', type=int, default=50, help="Hidden dimension of lstm model.")
parser.add_argument('--dropout', type=float, default=0.8, help="Dropout rate of lstm.")
parser.add_argument('--model_output', type=str, default='./model/debug')
parser.add_argument('--model_input', type=str, default='./model/pku', help='path of model used for predict')
parser.add_argument('--train_file', type=str, default='./data/pku_train.txt')
parser.add_argument('--dev_file', type=str, default='./data/pku_dev.txt')
parser.add_argument('--test_file', type=str, default='./data/pku_dev.txt')
# parser.add_argument('--target_train_file', type=str, default='medical_data/forum_train_0.1.txt')
# parser.add_argument('--target_dev_file', type=str, default='medical_data/forum_dev.txt')
# parser.add_argument('--target_test_file', type=str, default='medical_data/forum_test.txt')
parser.add_argument('--target_train_file', type=str, default='./data/pku_train.txt')
parser.add_argument('--target_dev_file', type=str, default='./data/pku_dev.txt')
parser.add_argument('--target_test_file', type=str, default='medical_data/forum_test.txt')
parser.add_argument('--use_pretrain_src', action="store_true")
parser.add_argument('--use_pretrain_target', action="store_true")
parser.add_argument('--nepoch_no_imprv', type=int, default=5, help="Num of epoch with no improvement")
parser.add_argument('--gpu_frac', type=float, default=1.0)
parser.add_argument('-d', '--debug', action='store_true', help='Flag for debug.')
parser.add_argument('--config', type=str, default='debug.json', help='Path to saved config file')
parser.add_argument('--flag', type=int, default=0, help='training flag')
parser.add_argument('--disable_src_embed_training', action="store_true", default=False)
parser.add_argument('--msg', default='No msg.')
parser.add_argument('--matrix', default='matrix.p')
parser.add_argument('--use_adapt', action="store_true")
parser.add_argument('--lr_ratio', default=1.0, type=float)
parser.add_argument('--gpu_device', default=0, type=int)
parser.add_argument('--share_crf', action="store_true")
parser.add_argument('--share_embed', action="store_true")
parser.add_argument('--use_l2', action="store_true")
parser.add_argument('--l2_ratio', default=0.1, type=float)
parser.add_argument('--crf_l2_ratio', default=0.3, type=float)
parser.add_argument('-p', '--penalty', type=str, default='mmd', choices=['kl', 'mmd', 'cmd'])
# parser.add_argument('--penalty_ratio', default=0.05, type=float)
parser.add_argument('--penalty_ratio', default=0, type=float)
"""
Predict
"""
parser.add_argument('--predict_file', type=str, help='Path to file for prediction')
parser.add_argument('--predict_out', type=str, default='predict_out.txt', help='Path to save predict result.')
args = parser.parse_args()
global Model
Model = getattr(__import__('model_{}'.format(args.type)), 'Model')
main(args)
|
import datetime
import json
import os
import spotipy
from tzlocal import get_localzone
class StreamingHistory:
def __init__(self, path: str = '.') -> None:
self.current_year = datetime.date.today().year
self.end = None
self.minutes_listened = 0
self.hours_listened = 0
files = [file for file in os.listdir(path) if file[:16] + file[-5:] == 'StreamingHistory.json']
files.sort(key=lambda a: int(a[16:-5]))
if not files:
raise FileNotFoundError('The directory does not contain listening information')
self.data = ()
for file in files:
streaming_file = open(os.path.join(path, file), 'r', encoding='utf-8')
streaming_json = streaming_file.read()
streaming_file.close()
self.data += tuple(json.loads(streaming_json))
for song in self.data:
time_i = datetime.datetime.strptime(song['endTime'] + ' +00:00', '%Y-%m-%d %H:%M %z').astimezone(
get_localzone())
time = datetime.datetime(time_i.year, time_i.month, time_i.day, time_i.hour, time_i.minute)
song['endTime'] = time
self.data = tuple(song for song in self.data if
song['endTime'].year == self.current_year and song['msPlayed'] > 30000)
def activity_by_date(self) -> dict:
dates = {month: [0, 0] for month in range(1, 13)}
for song in self.data:
dates[song['endTime'].month][0] += 1
dates[song['endTime'].month][1] += song['msPlayed']
return dates
def activity_by_time(self) -> dict:
times = {hour: 0 for hour in range(24)}
for song in self.data:
times[song['endTime'].hour] += 1
return times
def retrieve_data(self) -> None:
self.end = self.data[-1]['endTime'].strftime('%b %-d')
self.minutes_listened = round(sum([s['msPlayed'] for s in self.data]) / (1000 * 60))
self.hours_listened = round(self.minutes_listened / 60)
class ListeningInformation:
def __init__(self, streaming_history: StreamingHistory) -> None:
self.data = {}
self.albums = 0
self.top_albums = ()
self.genres = 0
self.top_genres = ()
self.top_artists = ()
self.top_songs = ()
for song in streaming_history.data:
artist, time, duration, track = song['artistName'], song['endTime'], song['msPlayed'], song['trackName']
if artist not in self.data:
self.data[artist] = [0, {}]
if track not in self.data[artist][1]:
self.data[artist][1][track] = 0
self.data[artist][1][track] += 1
self.data[artist][0] += duration
def get_top_artists(self) -> None:
self.top_artists = tuple((artist, self.data[artist][0]) for artist in
sorted(self.data, key=lambda a: self.data[a][0], reverse=True)[:20])
def get_top_songs(self) -> None:
all_songs = {(artist, song): self.data[artist][1][song] for artist in self.data for song in
self.data[artist][1]}
self.top_songs = tuple((artist, all_songs[artist]) for artist in sorted(all_songs,
key=all_songs.get, reverse=True)[:100])
class SpotifyAPI:
# Add your own
client_id = ''
client_secret = ''
redirect_uri = 'http://localhost:7777/callback'
scope = 'user-read-recently-played'
def __init__(self, username):
self.username = username
self.authorization = spotipy.oauth2.SpotifyOAuth(username=username,
scope=SpotifyAPI.scope,
client_id=SpotifyAPI.client_id,
client_secret=SpotifyAPI.client_secret,
redirect_uri=SpotifyAPI.redirect_uri)
self.token = self.authorization.get_access_token()
def check_token(self):
if spotipy.SpotifyOAuth.is_token_expired(self.token):
self.token = self.authorization.refresh_access_token(self.token['refresh_token'])
def get_track(self, track_name: str, artist_name: str):
self.check_token()
sp = spotipy.Spotify(auth=self.token['access_token'])
while True:
try:
return sp.track(
sp.search(q=f'artist:{artist_name} track:{track_name}', type='track')['tracks']['items'][0]['id'])
except IndexError:
return None
except:
continue
def get_artist(self, artist_name: str):
self.check_token()
sp = spotipy.Spotify(auth=self.token['access_token'])
while True:
try:
return sp.artist(sp.search(q=f'artist:{artist_name}', type='artist')['artists']['items'][0]['id'])
except IndexError:
return None
except:
continue
def get_features(self, track_id: str):
self.check_token()
sp = spotipy.Spotify(auth=self.token['access_token'])
while True:
try:
features = sp.audio_features([track_id])[0]
return features
except:
continue
def top_albums(self, listening_information: ListeningInformation) -> None:
all_songs = {(artist, song) for artist in listening_information.data for song in
listening_information.data[artist][1]}
albums = {}
counter = 0
for artist, song in all_songs:
track = self.get_track(song, artist)
if not track:
counter += 1
print(counter, 'out of', len(all_songs), 'completed', end='\r')
continue
album = track['album']['name']
key = tuple(artist['name'] for artist in track['album']['artists']) + (album,)
if key not in albums:
albums[key] = 0
albums[key] += listening_information.data[artist][1][song]
counter += 1
print(counter, 'out of', len(all_songs), 'completed', end='\r')
print()
listening_information.albums = len(albums)
listening_information.top_albums = tuple((key[-1], key[:-1], albums[key]) for key in
sorted(albums, key=albums.get, reverse=True)[:10])
def top_genres(self, listening_information: ListeningInformation) -> None:
all_artists = {artist: sum(tuple(listening_information.data[artist][1][song] for song in
listening_information.data[artist][1])) for artist in
listening_information.data}
genres = {}
counter = 0
for artist in all_artists:
try:
a_genres = self.get_artist(artist)['genres']
except TypeError:
counter += 1
print(counter, 'out of', len(all_artists), 'completed', end='\r')
continue
for genre in a_genres:
if genre not in genres:
genres[genre] = 0
genres[genre] += all_artists[artist]
counter += 1
print(counter, 'out of', len(all_artists), 'completed', end='\r')
print()
listening_information.genres = len(genres)
listening_information.top_genres = tuple(genre for genre in sorted(genres, key=genres.get, reverse=True)[:10])
def analyse_listening(sAPI: SpotifyAPI, listening_information: ListeningInformation, sh: StreamingHistory) -> None:
listening_information.get_top_artists()
listening_information.get_top_songs()
sAPI.top_albums(listening_information)
sAPI.top_genres(listening_information)
sh.retrieve_data()
|
from typing import Union
import scipy.stats as stats
from beartype import beartype
from UQpy.distributions.baseclass import DistributionContinuous1D
class GeneralizedExtreme(DistributionContinuous1D):
@beartype
def __init__(
self,
c: Union[None, float, int],
loc: Union[None, float, int] = 0.0,
scale: Union[None, float, int] = 1.0,
):
"""
:param c: shape parameter
:param loc: location parameter
:param scale: scale parameter
"""
super().__init__(
c=c, loc=loc, scale=scale, ordered_parameters=("c", "loc", "scale")
)
self._construct_from_scipy(scipy_name=stats.genextreme)
|
import os
import strax
import numba
import numpy as np
export, __all__ = strax.exporter()
# (5-10x) faster than np.sort(order=...), as np.sort looks at all fields
# TODO: maybe this should be a factory?
@export
@numba.jit(nopython=True, nogil=True, cache=True)
def sort_by_time(x):
"""Sort pulses by time, then channel.
Assumes you have no more than 10k channels, and records don't span
more than 100 days. TODO: FIX this
"""
if len(x) == 0:
# Nothing to do, and .min() on empty array doesn't work, so:
return x
# I couldn't get fast argsort on multiple keys to work in numba
# So, let's make a single key...
sort_key = (x['time'] - x['time'].min()) * 10000 + x['channel']
sort_i = np.argsort(sort_key)
return x[sort_i]
# Getting endtime jitted is a bit awkward, especially since it has to
# keep working with NUMBA_DISABLE_JIT, which we use for coverage tests.
# See https://github.com/numba/numba/issues/4759
if os.environ.get("NUMBA_DISABLE_JIT"):
@export
def endtime(x):
"""Return endtime of intervals x"""
if 'endtime' in x.dtype.fields:
return x['endtime']
else:
return x['time'] + x['length'] * x['dt']
else:
@export
@numba.generated_jit(nopython=True, nogil=True)
def endtime(x):
"""Return endtime of intervals x"""
if 'endtime' in x.dtype.fields:
return lambda x: x['endtime']
else:
return lambda x: x['time'] + x['length'] * x['dt']
@export
@numba.jit(nopython=True, nogil=True, cache=True)
def from_break(x, safe_break, not_before=0, left=True, tolerant=False):
"""Return records on side of a break at least safe_break long
If there is no such break, return the best break found.
"""
if tolerant:
raise NotImplementedError
if not len(x):
raise NotImplementedError("Cannot find breaks in empty data")
if len(x) == 1:
raise NoBreakFound()
break_i = _find_break_i(x, safe_break=safe_break, not_before=not_before)
break_time = x[break_i]['time']
if left:
return x[:break_i], break_time
else:
return x[break_i:], break_time
@export
class NoBreakFound(Exception):
pass
@export
@numba.jit(nopython=True, nogil=True, cache=True)
def _find_break_i(data, safe_break, not_before):
"""Return first index of element right of the first gap
larger than safe_break in data.
Assumes all x have the same length and are sorted!
:param tolerant: if no break found, yield an as good as possible break
anyway.
"""
assert len(data) >= 2
latest_end_seen = max(not_before, strax.endtime(data[0]))
for i, d in enumerate(data):
if i == 0:
continue
if d['time'] >= latest_end_seen + safe_break:
return i
latest_end_seen = max(latest_end_seen,
strax.endtime(d))
raise NoBreakFound
@export
def fully_contained_in(things, containers):
"""Return array of len(things) with index of interval in containers
for which things are fully contained in a container, or -1 if no such
exists.
We assume all intervals are sorted by time, and b_intervals
nonoverlapping.
"""
result = np.ones(len(things), dtype=np.int32) * -1
a_starts = things['time']
b_starts = containers['time']
a_ends = strax.endtime(things)
b_ends = strax.endtime(containers)
_fc_in(a_starts, b_starts, a_ends, b_ends, result)
return result
@numba.jit(nopython=True, nogil=True, cache=True)
def _fc_in(a_starts, b_starts, a_ends, b_ends, result):
b_i = 0
for a_i in range(len(a_starts)):
# Skip ahead one or more b's if we're beyond them
# Note <= in second condition: end is an exclusive bound
while b_i < len(b_starts) and b_ends[b_i] <= a_starts[a_i]:
b_i += 1
if b_i == len(b_starts):
break
# Check for containment. We only need to check one b, since bs
# are nonoverlapping
if b_starts[b_i] <= a_starts[a_i] and a_ends[a_i] <= b_ends[b_i]:
result[a_i] = b_i
@export
def split_by_containment(things, containers):
"""Return list of thing-arrays contained in each container
Assumes everything is sorted, and containers are nonoverlapping
"""
if not len(containers):
return []
# Index of which container each thing belongs to, or -1
which_container = fully_contained_in(things, containers)
# Restrict to things in containers
mask = which_container != -1
things = things[mask]
which_container = which_container[mask]
if not len(things):
# np.split has confusing behaviour for empty arrays
return [things[:0] for _ in range(len(containers))]
# Split things up by container
split_indices = np.where(np.diff(which_container))[0] + 1
things_split = np.split(things, split_indices)
# Insert empty arrays for empty containers
empty_containers = np.setdiff1d(np.arange(len(containers)),
np.unique(which_container))
for c_i in empty_containers:
things_split.insert(c_i, things[:0])
return things_split
@export
@numba.jit(nopython=True, nogil=True, cache=True)
def overlap_indices(a1, n_a, b1, n_b):
"""Given interval [a1, a1 + n_a), and [b1, b1 + n_b) of integers,
return indices [a_start, a_end), [b_start, b_end) of overlapping region.
"""
if n_a < 0 or n_b < 0:
raise ValueError("Negative interval length passed to overlap test")
if n_a == 0 or n_b == 0:
return (0, 0), (0, 0)
# a: p, b: r
s = a1 - b1
if s <= -n_a:
# B is completely right of a
return (0, 0), (0, 0)
# Range in b that overlaps with a
b_start = max(0, s)
b_end = min(n_b, s + n_a)
if b_start >= b_end:
# B is completely left of a
return (0, 0), (0, 0)
# Range of a that overlaps with b
a_start = max(0, -s)
a_end = min(n_a, -s + n_b)
return (a_start, a_end), (b_start, b_end)
@export
def touching_windows(things, containers, window=0):
"""Return array of (start, exclusive end) indices into things which extend
to within window of the container, for each container in containers.
:param things: Sorted array of interval-like data
:param containers: Sorted array of interval-like data
:param window: threshold distance for touching check
For example:
- window = 0: things must overlap one sample
- window = -1: things can start right after container ends
(i.e. container endtime equals the thing starttime, since strax
endtimes are exclusive)
"""
return _touching_windows(
things['time'], strax.endtime(things),
containers['time'], strax.endtime(containers),
window=window)
@numba.njit(nogil=True, cache=True)
def _touching_windows(thing_start, thing_end,
container_start, container_end,
window=0):
result = np.zeros((len(container_start), 2), dtype=np.int32)
n = len(thing_start)
left_i = right_i = 0
for i, t0 in enumerate(container_start):
t1 = container_end[i]
while left_i <= n - 1 and thing_end[left_i] <= t0 - window:
# left_i ends before the window starts (so it's still outside)
left_i += 1
# Now left_i is the first index inside the window
# -- unless it is outside the array, in which case right_i
# will also be.
while right_i <= n - 1 and thing_start[right_i] < t1 + window:
# right_i starts before the window ends (so it could be inside)
right_i += 1
# Now right_i is the last index inside the window
# or outside the array.
result[i] = left_i, right_i
return result
|
from .TopDownCrawl import main
main() |
from collections import namedtuple
import logging
import os
import numpy as onp
from numpy.testing import assert_allclose
import pytest
from jax import device_put, disable_jit, grad, jit, random, tree_map
import jax.numpy as np
import numpyro.distributions as dist
from numpyro.infer.hmc_util import (
AdaptWindow,
_is_iterative_turning,
_leaf_idx_to_ckpt_idxs,
build_adaptation_schedule,
build_tree,
consensus,
dual_averaging,
find_reasonable_step_size,
parametric_draws,
velocity_verlet,
warmup_adapter,
welford_covariance
)
from numpyro.util import control_flow_prims_disabled, fori_loop, optional
logger = logging.getLogger(__name__)
@pytest.mark.parametrize('jitted', [True, False])
def test_dual_averaging(jitted):
def optimize(f):
da_init, da_update = dual_averaging(gamma=0.5)
da_state = da_init()
for i in range(10):
x = da_state[0]
g = grad(f)(x)
da_state = da_update(g, da_state)
x_avg = da_state[1]
return x_avg
f = lambda x: (x + 1) ** 2 # noqa: E731
fn = jit(optimize, static_argnums=(0,)) if jitted else optimize
x_opt = fn(f)
assert_allclose(x_opt, -1., atol=1e-3)
@pytest.mark.parametrize('jitted', [True, False])
@pytest.mark.parametrize('diagonal', [True, False])
@pytest.mark.parametrize('regularize', [True, False])
@pytest.mark.filterwarnings('ignore:numpy.linalg support is experimental:UserWarning')
def test_welford_covariance(jitted, diagonal, regularize):
with optional(jitted, disable_jit()), optional(jitted, control_flow_prims_disabled()):
onp.random.seed(0)
loc = onp.random.randn(3)
a = onp.random.randn(3, 3)
target_cov = onp.matmul(a, a.T)
x = onp.random.multivariate_normal(loc, target_cov, size=(2000,))
x = device_put(x)
@jit
def get_cov(x):
wc_init, wc_update, wc_final = welford_covariance(diagonal=diagonal)
wc_state = wc_init(3)
wc_state = fori_loop(0, 2000, lambda i, val: wc_update(x[i], val), wc_state)
cov, cov_inv_sqrt = wc_final(wc_state, regularize=regularize)
return cov, cov_inv_sqrt
cov, cov_inv_sqrt = get_cov(x)
if diagonal:
diag_cov = np.diagonal(target_cov)
assert_allclose(cov, diag_cov, rtol=0.06)
assert_allclose(cov_inv_sqrt, np.sqrt(np.reciprocal(diag_cov)), rtol=0.06)
else:
assert_allclose(cov, target_cov, rtol=0.06)
assert_allclose(cov_inv_sqrt, np.linalg.cholesky(np.linalg.inv(cov)), rtol=0.06)
########################################
# verlocity_verlet Test
########################################
TEST_EXAMPLES = []
EXAMPLE_IDS = []
ModelArgs = namedtuple('model_args', ['step_size', 'num_steps', 'q_i', 'p_i', 'q_f', 'p_f', 'm_inv', 'prec'])
Example = namedtuple('test_case', ['model', 'args'])
def register_model(init_args):
"""
Register the model along with each of the model arguments
as test examples.
"""
def register_fn(model):
for args in init_args:
test_example = Example(model, args)
TEST_EXAMPLES.append(test_example)
EXAMPLE_IDS.append(model.__name__)
return register_fn
@register_model([
ModelArgs(
step_size=0.01,
num_steps=100,
q_i={'x': 0.0},
p_i={'x': 1.0},
q_f={'x': np.sin(1.0)},
p_f={'x': np.cos(1.0)},
m_inv=np.array([1.]),
prec=1e-4
)
])
class HarmonicOscillator(object):
@staticmethod
def kinetic_fn(m_inv, p):
return 0.5 * np.sum(m_inv * p['x'] ** 2)
@staticmethod
def potential_fn(q):
return 0.5 * q['x'] ** 2
@register_model([
ModelArgs(
step_size=0.01,
num_steps=628,
q_i={'x': 1.0, 'y': 0.0},
p_i={'x': 0.0, 'y': 1.0},
q_f={'x': 1.0, 'y': 0.0},
p_f={'x': 0.0, 'y': 1.0},
m_inv=np.array([1., 1.]),
prec=5.0e-3
)
])
class CircularPlanetaryMotion(object):
@staticmethod
def kinetic_fn(m_inv, p):
z = np.stack([p['x'], p['y']], axis=-1)
return 0.5 * np.dot(m_inv, z**2)
@staticmethod
def potential_fn(q):
return - 1.0 / np.power(q['x'] ** 2 + q['y'] ** 2, 0.5)
@register_model([
ModelArgs(
step_size=0.1,
num_steps=1810,
q_i={'x': 0.02},
p_i={'x': 0.0},
q_f={'x': -0.02},
p_f={'x': 0.0},
m_inv=np.array([1.]),
prec=1.0e-4
)
])
class QuarticOscillator(object):
@staticmethod
def kinetic_fn(m_inv, p):
return 0.5 * np.sum(m_inv * p['x'] ** 2)
@staticmethod
def potential_fn(q):
return 0.25 * np.power(q['x'], 4.0)
@pytest.mark.parametrize('jitted', [True, False])
@pytest.mark.parametrize('example', TEST_EXAMPLES, ids=EXAMPLE_IDS)
def test_velocity_verlet(jitted, example):
def get_final_state(model, step_size, num_steps, q_i, p_i):
vv_init, vv_update = velocity_verlet(model.potential_fn, model.kinetic_fn)
vv_state = vv_init(q_i, p_i)
q_f, p_f, _, _ = fori_loop(0, num_steps,
lambda i, val: vv_update(step_size, args.m_inv, val),
vv_state)
return (q_f, p_f)
model, args = example
fn = jit(get_final_state, static_argnums=(0,)) if jitted else get_final_state
q_f, p_f = fn(model, args.step_size, args.num_steps, args.q_i, args.p_i)
logger.info('Test trajectory:')
logger.info('initial q: {}'.format(args.q_i))
logger.info('final q: {}'.format(q_f))
for node in args.q_f:
assert_allclose(q_f[node], args.q_f[node], atol=args.prec)
assert_allclose(p_f[node], args.p_f[node], atol=args.prec)
logger.info('Test energy conservation:')
energy_initial = model.kinetic_fn(args.m_inv, args.p_i) + model.potential_fn(args.q_i)
energy_final = model.kinetic_fn(args.m_inv, p_f) + model.potential_fn(q_f)
logger.info('initial energy: {}'.format(energy_initial))
logger.info('final energy: {}'.format(energy_final))
assert_allclose(energy_initial, energy_final, atol=1e-5)
logger.info('Test time reversibility:')
p_reverse = tree_map(lambda x: -x, p_f)
q_i, p_i = get_final_state(model, args.step_size, args.num_steps, q_f, p_reverse)
for node in args.q_i:
assert_allclose(q_i[node], args.q_i[node], atol=1e-4)
@pytest.mark.parametrize('jitted', [True, False])
@pytest.mark.parametrize('init_step_size', [0.1, 10.0])
def test_find_reasonable_step_size(jitted, init_step_size):
def kinetic_fn(m_inv, p):
return 0.5 * np.sum(m_inv * p ** 2)
def potential_fn(q):
return 0.5 * q ** 2
p_generator = lambda m_inv, rng_key: 1.0 # noqa: E731
q = 0.0
m_inv = np.array([1.])
fn = (jit(find_reasonable_step_size, static_argnums=(0, 1, 2))
if jitted else find_reasonable_step_size)
rng_key = random.PRNGKey(0)
step_size = fn(potential_fn, kinetic_fn, p_generator, m_inv, q, rng_key, init_step_size)
# Apply 1 velocity verlet step with step_size=eps, we have
# z_new = eps, r_new = 1 - eps^2 / 2, hence energy_new = 0.5 + eps^4 / 8,
# hence delta_energy = energy_new - energy_init = eps^4 / 8.
# We want to find a reasonable step_size such that delta_energy ~ -log(0.8),
# hence that step_size ~ the following threshold
threshold = np.power(-np.log(0.8) * 8, 0.25)
# Confirm that given init_step_size, we will doubly increase/decrease it
# until it passes threshold.
if init_step_size < threshold:
assert step_size / 2 < threshold
assert step_size > threshold
else:
assert step_size * 2 > threshold
assert step_size < threshold
@pytest.mark.parametrize('num_steps, expected', [
(18, [(0, 17)]),
(50, [(0, 6), (7, 44), (45, 49)]),
(100, [(0, 14), (15, 89), (90, 99)]),
(150, [(0, 74), (75, 99), (100, 149)]),
(200, [(0, 74), (75, 99), (100, 149), (150, 199)]),
(280, [(0, 74), (75, 99), (100, 229), (230, 279)]),
])
def test_build_adaptation_schedule(num_steps, expected):
adaptation_schedule = build_adaptation_schedule(num_steps)
expected_schedule = [AdaptWindow(i, j) for i, j in expected]
assert adaptation_schedule == expected_schedule
@pytest.mark.parametrize('jitted', [
True,
pytest.param(False, marks=pytest.mark.skipif("CI" in os.environ, reason="slow in Travis"))
])
def test_warmup_adapter(jitted):
def find_reasonable_step_size(m_inv, z, rng_key, step_size):
return np.where(step_size < 1, step_size * 4, step_size / 4)
num_steps = 150
adaptation_schedule = build_adaptation_schedule(num_steps)
init_step_size = 1.
mass_matrix_size = 3
wa_init, wa_update = warmup_adapter(num_steps, find_reasonable_step_size)
wa_update = jit(wa_update) if jitted else wa_update
rng_key = random.PRNGKey(0)
z = np.ones(3)
wa_state = wa_init(z, rng_key, init_step_size, mass_matrix_size=mass_matrix_size)
step_size, inverse_mass_matrix, _, _, _, window_idx, _ = wa_state
assert step_size == find_reasonable_step_size(inverse_mass_matrix, z, rng_key, init_step_size)
assert_allclose(inverse_mass_matrix, np.ones(mass_matrix_size))
assert window_idx == 0
window = adaptation_schedule[0]
for t in range(window.start, window.end + 1):
wa_state = wa_update(t, 0.7 + 0.1 * t / (window.end - window.start), z, wa_state)
last_step_size = step_size
step_size, inverse_mass_matrix, _, _, _, window_idx, _ = wa_state
assert window_idx == 1
# step_size is decreased because accept_prob < target_accept_prob
assert step_size < last_step_size
# inverse_mass_matrix does not change at the end of the first window
assert_allclose(inverse_mass_matrix, np.ones(mass_matrix_size))
window = adaptation_schedule[1]
window_len = window.end - window.start
for t in range(window.start, window.end + 1):
wa_state = wa_update(t, 0.8 + 0.1 * (t - window.start) / window_len, 2 * z, wa_state)
last_step_size = step_size
step_size, inverse_mass_matrix, _, _, _, window_idx, _ = wa_state
assert window_idx == 2
# step_size is increased because accept_prob > target_accept_prob
assert step_size > last_step_size
# Verifies that inverse_mass_matrix changes at the end of the second window.
# Because z_flat is constant during the second window, covariance will be 0
# and only regularize_term of welford scheme is involved.
# This also verifies that z_flat terms in the first window does not affect
# the second window.
welford_regularize_term = 1e-3 * (5 / (window.end + 1 - window.start + 5))
assert_allclose(inverse_mass_matrix,
np.full((mass_matrix_size,), welford_regularize_term),
atol=1e-7)
window = adaptation_schedule[2]
for t in range(window.start, window.end + 1):
wa_state = wa_update(t, 0.8, t * z, wa_state)
last_step_size = step_size
step_size, final_inverse_mass_matrix, _, _, _, window_idx, _ = wa_state
assert window_idx == 3
# during the last window, because target_accept_prob=0.8,
# log_step_size will be equal to the constant prox_center=log(10*last_step_size)
assert_allclose(step_size, last_step_size * 10)
# Verifies that inverse_mass_matrix does not change during the last window
# despite z_flat changes w.r.t time t,
assert_allclose(final_inverse_mass_matrix, inverse_mass_matrix)
@pytest.mark.parametrize('leaf_idx, ckpt_idxs', [
(6, (3, 2)),
(7, (0, 2)),
(13, (2, 2)),
(15, (0, 3)),
])
def test_leaf_idx_to_ckpt_idx(leaf_idx, ckpt_idxs):
assert _leaf_idx_to_ckpt_idxs(leaf_idx) == ckpt_idxs
@pytest.mark.parametrize('ckpt_idxs, expected_turning', [
((3, 2), False),
((3, 3), True),
((0, 0), False),
((0, 1), True),
((1, 3), True),
])
def test_is_iterative_turning(ckpt_idxs, expected_turning):
inverse_mass_matrix = np.ones(1)
r = 1.
r_sum = 3.
r_ckpts = np.array([1., 2., 3., -2.])
r_sum_ckpts = np.array([2., 4., 4., -1.])
actual_turning = _is_iterative_turning(inverse_mass_matrix, r, r_sum, r_ckpts, r_sum_ckpts,
*ckpt_idxs)
assert expected_turning == actual_turning
@pytest.mark.parametrize('step_size', [0.01, 1., 100.])
def test_build_tree(step_size):
def kinetic_fn(m_inv, p):
return 0.5 * np.sum(m_inv * p ** 2)
def potential_fn(q):
return 0.5 * q ** 2
vv_init, vv_update = velocity_verlet(potential_fn, kinetic_fn)
vv_state = vv_init(0.0, 1.0)
inverse_mass_matrix = np.array([1.])
rng_key = random.PRNGKey(0)
@jit
def fn(vv_state):
tree = build_tree(vv_update, kinetic_fn, vv_state, inverse_mass_matrix,
step_size, rng_key)
return tree
tree = fn(vv_state)
assert tree.num_proposals >= 2 ** (tree.depth - 1)
assert tree.sum_accept_probs <= tree.num_proposals
if tree.depth < 10:
assert tree.turning | tree.diverging
# for large step_size, assert that diverging will happen in 1 step
if step_size > 10:
assert tree.diverging
assert tree.num_proposals == 1
# for small step_size, assert that it should take a while to meet the terminate condition
if step_size < 0.1:
assert tree.num_proposals > 10
# TODO: raise this warning issue upstream, the issue is at this line
# https://github.com/google/jax/blob/master/jax/numpy/lax_numpy.py#L2732
@pytest.mark.filterwarnings('ignore:Explicitly requested dtype float64')
@pytest.mark.parametrize('method', [consensus, parametric_draws])
@pytest.mark.parametrize('diagonal', [True, False])
def test_gaussian_subposterior(method, diagonal):
D = 10
n_samples = 10000
n_draws = 9000
n_subs = 8
mean = np.arange(D)
cov = np.ones((D, D)) * 0.9 + np.identity(D) * 0.1
subcov = n_subs * cov # subposterior's covariance
subposteriors = list(dist.MultivariateNormal(mean, subcov).sample(
random.PRNGKey(1), (n_subs, n_samples)))
draws = method(subposteriors, n_draws, diagonal=diagonal)
assert draws.shape == (n_draws, D)
assert_allclose(np.mean(draws, axis=0), mean, atol=0.03)
if diagonal:
assert_allclose(np.var(draws, axis=0), np.diag(cov), atol=0.05)
else:
assert_allclose(np.cov(draws.T), cov, atol=0.05)
@pytest.mark.filterwarnings('ignore:Explicitly requested dtype float64')
@pytest.mark.parametrize('method', [consensus, parametric_draws])
def test_subposterior_structure(method):
subposteriors = [{'x': np.ones((100, 3)), 'y': np.zeros((100,))} for i in range(10)]
draws = method(subposteriors, num_draws=9)
assert draws['x'].shape == (9, 3)
assert draws['y'].shape == (9,)
|
from datetime import datetime, timedelta
import logging
import geopandas
import movingpandas
import pandas
import requests
from mesa.datacollection import DataCollector
from tqdm import tqdm
from pyproj import CRS
from dpd.modeling.agents.people import Pedestrian, Cyclist, Driver
from dpd.werkzeug import WerkzeugThread
from .people_flask_app import people_flask_app
from .agent_based_dict import AgentBasedDict
from .agent_based_intersections import AgentBasedIntersections
from .agent_based_links import AgentBasedLinks
from .mode_choice_model import ModeChoiceModel
class People(AgentBasedDict):
"""
A class to hold People.
"""
def __init__(self, map_, crs=None, *args, **kwargs):
super().__init__(crs=crs, *args, **kwargs)
self.intersections = AgentBasedIntersections(map_.intersections)
self.links = AgentBasedLinks(map_.links)
self.links.update_intersections(self.intersections)
self.data_collector = DataCollector(agent_reporters={"geometry": "geometry"})
def to_crs(self, crs):
""" """
raise NotImplementedError(
"I'm not able to change the crs on People. Maybe create a GeoDataFrame and then change the crs."
)
def add_person(self, person):
self[person.name] = person
self.model.schedule.add(person)
def create_people_from_od(self, od):
mode_choice_model = ModeChoiceModel()
mode_choice_model.add_mode(Driver, 0.8)
mode_choice_model.add_mode(Cyclist, 0.1)
mode_choice_model.add_mode(Pedestrian, 0.1)
for _, person in tqdm(od.iterrows(), total=len(od)):
route = self.intersections.nodes_to_links(
person.routes[0]["legs"][0]["annotation"]["nodes"]
)
mode = mode_choice_model.predict()
person = mode(self.model, person.home_geometry, route)
self.add_person(person)
def post_people(self, url):
people = self.to_geopandas()
people.to_crs("EPSG:4326")
return requests.post(url, data={"people": people.to_json()})
def get_agent_vars_geodataframe(self, start_time=datetime(1970, 1, 1, 0, 0, 0)):
gdf = geopandas.GeoDataFrame(self.data_collector.get_agent_vars_dataframe())
gdf.crs = self.crs
one_day = timedelta(1)
index = pandas.date_range(start_time, start_time + one_day, freq="S")[
0 : len(gdf)
]
gdf.index = gdf.index.set_levels(index, level=0)
return gdf
def get_trajectories(self):
gdf = self.get_agent_vars_geodataframe()
gdf.reset_index(level="AgentID", inplace=True)
return movingpandas.TrajectoryCollection(gdf, "AgentID")
def simulate(
self,
number_of_rounds=10,
post_people_url=None,
):
aea = CRS.from_string("North America Albers Equal Area Conic")
self.intersections.to_crs(aea)
self.links.to_crs(aea)
self.crs = self.links.crs
self.data_collector.collect(self.model)
if post_people_url:
werkzeug_thread = WerkzeugThread(people_flask_app())
werkzeug_thread.start()
self.people.post_people(post_people_url)
for round_number in range(number_of_rounds):
logging.info("Simulating round %s" % (round_number,))
self.model.step()
self.intersections.model.step()
self.data_collector.collect(self.model)
if post_people_url:
self.post_people(post_people_url)
if post_people_url:
werkzeug_thread.stop()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, with_statement
import os
import shutil
import kaptan
import tempfile
from .. import config, cli
from ..util import tmux
from .helpers import TestCase
import logging
logger = logging.getLogger(__name__)
TMUXP_DIR = os.path.join(os.path.dirname(__file__), '.tmuxp')
class StartupTest(TestCase):
"""test startup_cli()."""
def setUp(self):
if os.path.isdir(TMUXP_DIR):
shutil.rmtree(TMUXP_DIR)
def test_creates_config_dir_not_exists(self):
"""cli.startup() creates config dir if not exists."""
self.assertFalse(os.path.exists(TMUXP_DIR))
cli.startup(TMUXP_DIR)
self.assertTrue(os.path.exists(TMUXP_DIR))
@classmethod
def tearDownClass(cls):
if os.path.isdir(TMUXP_DIR):
shutil.rmtree(TMUXP_DIR)
logger.debug('wiped %s' % TMUXP_DIR)
class FindConfigsTest(TestCase):
"""test in_dir() test."""
def setUp(self):
if os.path.isdir(TMUXP_DIR):
shutil.rmtree(TMUXP_DIR)
def test_in_dir_from_config_dir(self):
"""config.in_dir() finds configs config dir."""
cli.startup(TMUXP_DIR)
config1 = tempfile.NamedTemporaryFile(
dir=TMUXP_DIR,
prefix='myconfig',
suffix='.yaml'
)
config2 = tempfile.NamedTemporaryFile(
dir=TMUXP_DIR,
prefix='myconfig',
suffix='.json'
)
configs_found = config.in_dir(TMUXP_DIR)
self.assertEqual(len(configs_found), 2)
def test_in_dir_from_current_dir(self):
"""config.in_dir() find configs config dir."""
cli.startup(TMUXP_DIR)
config1 = tempfile.NamedTemporaryFile(
dir=TMUXP_DIR,
prefix='myconfig',
suffix='.yaml'
)
config2 = tempfile.NamedTemporaryFile(
dir=TMUXP_DIR,
prefix='myconfig',
suffix='.json'
)
configs_found = config.in_dir(TMUXP_DIR)
self.assertEqual(len(configs_found), 2)
def test_ignore_non_configs_from_current_dir(self):
"""cli.in_dir() ignore non-config from config dir."""
cli.startup(TMUXP_DIR)
badconfig = tempfile.NamedTemporaryFile(
dir=TMUXP_DIR,
prefix='myconfig',
suffix='.psd'
)
config1 = tempfile.NamedTemporaryFile(
dir=TMUXP_DIR,
prefix='watmyconfig',
suffix='.json'
)
configs_found = config.in_dir(TMUXP_DIR)
self.assertEqual(len(configs_found), 1)
def test_get_configs_cwd(self):
"""config.in_cwd() find config in shell current working directory."""
current_dir = os.getcwd()
configs_found = config.in_cwd()
# create a temporary folder and change dir into it
tmp_dir = tempfile.mkdtemp(suffix='tmuxp')
os.chdir(tmp_dir)
try:
config1 = open('.tmuxp.json', 'w+b')
config1.close()
configs_found = config.in_cwd()
finally:
os.remove(config1.name)
self.assertEqual(len(configs_found), 1)
self.assertIn('.tmuxp.json', configs_found)
# clean up
os.chdir(current_dir)
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
@classmethod
def tearDownClass(cls):
if os.path.isdir(TMUXP_DIR):
shutil.rmtree(TMUXP_DIR)
logger.debug('wiped %s' % TMUXP_DIR)
sampleconfigdict = {
'session_name': 'sampleconfig',
'start_directory': '~',
'windows': [
{
'window_name': 'editor',
'panes': [
{
'start_directory': '~',
'shell_command': ['vim'],
},
{
'shell_command': ['cowsay "hey"']
},
],
'layout': 'main-verticle'
},
{
'window_name': 'logging', 'panes': [
{
'shell_command': ['tail -F /var/log/syslog'],
'start_directory':'/var/log'
}
]
}, {
'options': {'automatic_rename': True, },
'panes': [
{
'shell_command': ['htop']
}
]
}
]
}
|
import socket
from krgram.utils.bytes import Bytes
from krgram.utils.stream import QueueByteStream, IByteStream
_debug= True
class TCPByteStream(IByteStream):
"""
Represents a TCP connection as a bidirectional stream (readable/writable)
Example usage::
tcpstream = TCPByteStream(host, port)
tcpstream.write("Hi, this a message from client")
tcpstream.write("and asterisc close message*")
# now the message is in a buffer. We must send it...
tcpstream.send()
# ... and read server response. Here
msg_len = int(tcpstream.read(4))
tcp_stream.read(msg_len)
"""
_DEF_READ_TIMEOUT = 10
def __init__(self, host, port):
super(TCPByteStream, self).__init__()
self.host= host
self.port= port
self.sock= socket.socket( socket.AF_INET, socket.SOCK_STREAM )
self.sock.settimeout(self._DEF_READ_TIMEOUT)
# server data buffer
self._in = QueueByteStream()
# client data buffer
self._out = QueueByteStream()
def open(self):
self.sock.connect( (self.host, self.port) )
def read(self, count=0):
"""
Read data from buffer (or/and tcp socket if necessary). If count is 0, reading will be only
from memory buffer (without reading from socket) and return all buffer content
:param count: bytes to read
:return: readed bytes
"""
if count < 0:
raise ValueError("count must be an integer >= 0")
if count == 0:
return self._in.read(0)
buff_data_count = len(self._in)
if buff_data_count < count:
# size is minimum size
if count > 4096:
size = 4096 << 1 # TODO: size MUST be nearest greater pow of 2
else:
size = 4096
self._read_remote_chunk(size)
buff_data_count = len(self._in)
if buff_data_count < count:
self.close()
raise Exception("server not sended bytes count requested")
data = self._in.read(count)
return data
def read_all(self, reader):
# TODO: implement me
raise NotImplementedError()
def write(self, data):
#self.sock.send( data )
self._out.write(data)
print repr(Bytes(data))
def send(self):
data = self._out.read()
self.sock.sendall(data)
print repr(data)
def _read_remote_chunk(self, chunk_size):
data = Bytes(self.sock.recv(chunk_size))
self._in.write(data)
return len(data)
'''def write_byte(self, b):
self.write(b)
def write_int(self, n, size=4, big_endian=False):
self.write(Bytes.from_int(n, size, big_endian))
def read_byte(self):
return self.read(1)
def read_int(self, size=4, big_endian=False, signed=False):
d= self.read(size)
return Bytes(d).to_int(big_endian, signed)'''
def close(self):
if self.sock is not None:
self.sock.close()
|
#**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import bpy
from ..utils import stage_cache
from ..utils import logging
log = logging.Log('properties')
from pxr import UsdImagingLite, Tf
def _createGatlingRenderSettingsClass():
renderer = UsdImagingLite.Engine()
renderer.SetRendererPlugin('HdGatlingRendererPlugin')
props = {}
for setting in renderer.GetRendererSettingsList():
name_str = str(setting.name)
key_str = str(setting.key)
type_str = str(setting.type)
value = renderer.GetRendererSetting(Tf.MakeValidIdentifier(name_str))
if value is None:
value = setting.defValue
if type_str == 'FLAG':
props[key_str] = bpy.props.BoolProperty(name=name_str, default=value)
elif type_str == 'INT':
props[key_str] = bpy.props.IntProperty(name=name_str, default=value)
elif type_str == 'FLOAT':
props[key_str] = bpy.props.FloatProperty(name=name_str, default=value)
elif type_str == 'STRING':
props[key_str] = bpy.props.StringProperty(name=name_str, default=value)
else:
log.warn("Render setting {} of type {} not displayed".format(name_str, type_str))
return type('GatlingRenderSettings', (bpy.types.PropertyGroup,), {'__annotations__': props})
GatlingRenderSettings = _createGatlingRenderSettingsClass()
class HdUSDProperties(bpy.types.PropertyGroup):
bl_type = None
@classmethod
def register(cls):
cls.bl_type.hdusd = bpy.props.PointerProperty(
name="HdUSD properties",
description="HdUSD properties",
type=cls,
)
@classmethod
def unregister(cls):
del cls.bl_type.hdusd
class CachedStageProp(bpy.types.PropertyGroup, stage_cache.CachedStage):
id: bpy.props.IntProperty(default=stage_cache.ID_NO_STAGE)
is_owner: bpy.props.BoolProperty(default=False)
def __del__(self):
pass
from . import (
scene,
object,
node,
usd_list,
material,
hdrpr_render,
matlib
)
register, unregister = bpy.utils.register_classes_factory((
CachedStageProp,
hdrpr_render.QualitySettings,
hdrpr_render.InteractiveQualitySettings,
hdrpr_render.ContourSettings,
hdrpr_render.DenoiseSettings,
hdrpr_render.RenderSettings,
GatlingRenderSettings,
usd_list.PrimPropertyItem,
usd_list.UsdListItem,
usd_list.UsdList,
node.NodeProperties,
scene.FinalRenderSettings,
scene.ViewportRenderSettings,
scene.SceneProperties,
object.ObjectProperties,
material.MaterialProperties,
matlib.MatlibProperties,
matlib.WindowManagerProperties,
))
|
from .site_settings import SiteSettingsSerializer
|
import pygame.mixer
from pygame.mixer import Sound
from gpiozero import Button
from signal import pause
pygame.mixer.init()
sound_pins = {
2: Sound("test soundeff.wav"),
}
buttons = [Button(pin) for pin in sound_pins]
for button in buttons:
sound = sound_pins[button.pin]
button.when_pressed = sound.play
pause()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
The module provides a means to process Azure Event Hubs events at scale.
"""
try:
from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor
from azure.eventprocessorhost.azure_storage_checkpoint_manager import AzureStorageCheckpointLeaseManager
from azure.eventprocessorhost.azure_blob_lease import AzureBlobLease
from azure.eventprocessorhost.checkpoint import Checkpoint
from azure.eventprocessorhost.eh_config import EventHubConfig
from azure.eventprocessorhost.eh_partition_pump import EventHubPartitionPump, PartitionReceiver
from azure.eventprocessorhost.eph import EventProcessorHost, EPHOptions
from azure.eventprocessorhost.partition_manager import PartitionManager
from azure.eventprocessorhost.partition_context import PartitionContext
from azure.eventprocessorhost.partition_pump import PartitionPump
except (SyntaxError, ImportError):
raise ImportError("EventProcessHost is only compatible with Python 3.5 and above.")
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import matplotlib.pyplot as pyplot
import myplot
import Pmf
def NormalPdf(x):
"""Computes the PDF of x in the standard normal distribution."""
return math.exp(-x**2/2) / math.sqrt(2 * math.pi)
def Linspace(start, stop, n):
"""Makes a list of n floats from start to stop.
Similar to numpy.linspace()
"""
return [start + (stop-start) * float(i)/(n-1) for i in range(n)]
def RenderPdf(mu, sigma, n=101):
"""Makes xs and ys for a normal PDF with (mu, sigma).
n: number of places to evaluate the PDF
"""
xs = Linspace(mu-4*sigma, mu+4*sigma, n)
ys = [NormalPdf((x-mu) / sigma) for x in xs]
return xs, ys
def main():
xs, ys = RenderPdf(100, 15)
n = 34
pyplot.fill_between(xs[-n:], ys[-n:], y2=0.0001, color='blue', alpha=0.2)
s = 'Congratulations!\nIf you got this far,\nyou must be here.'
d = dict(shrink=0.05)
pyplot.annotate(s, [127, 0.02], xytext=[80, 0.05], arrowprops=d)
myplot.Plot(xs, ys,
clf=False,
show=True,
title='Distribution of IQ',
xlabel='IQ',
ylabel='PDF',
legend=False
)
if __name__ == "__main__":
main()
|
#
# Copyright (c) 2019 Juniper Networks, Inc. All rights reserved.
#
from cfgm_common.exceptions import NoIdError
from vnc_api.gen.resource_client import RouteTarget
from schema_transformer.resources._resource_base import ResourceBaseST
class RouteTargetST(ResourceBaseST):
_dict = {}
obj_type = 'route_target'
@classmethod
def reinit(cls):
asn = ResourceBaseST.get_obj_type_map().get(
'global_system_config').get_autonomous_system()
for obj in cls.list_vnc_obj():
try:
if (obj.get_routing_instance_back_refs() or
obj.get_logical_router_back_refs()):
cls.locate(obj.get_fq_name_str(), obj)
else:
cls.delete_vnc_obj(obj.get_fq_name_str())
except Exception as e:
cls._logger.error("Error in reinit for %s %s: %s" % (
cls.obj_type, obj.get_fq_name_str(), str(e)))
for ri, val in cls._object_db._rt_cf.get_range():
rt = val['rtgt_num']
rt_key = "target:%s:%s" % (asn, rt)
if rt_key not in cls:
cls._object_db.free_route_target(ri, asn)
# When upgrade happens from earlier releases to a release that
# supports 4 byte ASN, we need to take care of changing the
# zookeeper path for route-targets
# it will now be in /id/bgp/route-targets/type0
old_path = '%s%s' % (cls._object_db._zk_path_pfx,
"/id/bgp/route-targets")
cls._object_db.populate_route_target_directory(old_path, asn)
# This is to handle upgrade scenarios.
# In case we upgrade to a release containing support to 4 Byte ASN
# Once all the RTs are recreated in ZK in their new path, delete
# the old path for RTs in ZK
cls._object_db.delete_route_target_directory(
'%s%s' % (cls._object_db._zk_path_pfx,
"/id/bgp/route-targets"))
# end reinit
def __init__(self, rt_key, obj=None):
self.name = rt_key
try:
self.obj = obj or self.read_vnc_obj(fq_name=[rt_key])
except NoIdError:
self.obj = RouteTarget(rt_key)
self._vnc_lib.route_target_create(self.obj)
# end __init__
def update(self, obj=None):
return False
@classmethod
def delete_vnc_obj(cls, key):
try:
cls._vnc_lib.route_target_delete(fq_name=[key])
except NoIdError:
pass
cls._dict.pop(key, None)
# end delete_vnc_obj
# end RoutTargetST
|
import os
import shutil
import tempfile
from pathlib import Path
from unittest.mock import patch, MagicMock
from pipeline.recon.web import WebanalyzeScan, GatherWebTargets
from pipeline.tools import tools
webanalyze_results = Path(__file__).parent.parent / "data" / "recon-results" / "webanalyze-results"
class TestWebanalyzeScan:
def setup_method(self):
self.tmp_path = Path(tempfile.mkdtemp())
self.scan = WebanalyzeScan(
target_file=__file__, results_dir=str(self.tmp_path), db_location=str(self.tmp_path / "testing.sqlite")
)
self.scan.exception = False
def teardown_method(self):
shutil.rmtree(self.tmp_path)
def test_scan_requires(self):
with patch("pipeline.recon.web.GatherWebTargets"):
with patch("pipeline.recon.web.webanalyze.meets_requirements"):
retval = self.scan.requires()
assert isinstance(retval, GatherWebTargets)
def test_scan_creates_results_dir(self):
assert self.scan.results_subfolder == self.tmp_path / "webanalyze-results"
def test_scan_creates_database(self):
assert self.scan.db_mgr.location.exists()
assert self.tmp_path / "testing.sqlite" == self.scan.db_mgr.location
def test_scan_creates_results(self):
self.scan.results_subfolder = webanalyze_results
self.scan.parse_results()
assert self.scan.output().exists()
def test_scan_run(self):
with patch("concurrent.futures.ThreadPoolExecutor.map") as mocked_map, patch(
"subprocess.run"
) as mocked_run, patch("pathlib.Path.cwd", return_value="/"):
self.scan.parse_results = MagicMock()
self.scan.db_mgr.get_all_web_targets = MagicMock()
self.scan.db_mgr.get_all_web_targets.return_value = [
"13.56.144.135",
"2606:4700:10::6814:3c33",
"google.com",
]
self.scan.run()
assert mocked_map.called
assert mocked_run.called
assert self.scan.parse_results.called
def test_scan_run_with_wrong_threads(self, caplog):
self.scan.threads = "a"
retval = self.scan.run()
assert retval is None
assert "The value supplied to --threads must be a non-negative integer" in caplog.text
def test_wrapped_subprocess(self):
with patch("subprocess.run") as mocked_run:
self.scan.results_subfolder.mkdir()
os.chdir(self.scan.results_subfolder)
assert len([x for x in self.scan.results_subfolder.iterdir()]) == 0
cmd = [tools.get("webanalyze").get("path"), "-host", "https://google.com", "-output", "csv"]
self.scan._wrapped_subprocess(cmd)
assert len([x for x in self.scan.results_subfolder.iterdir()]) == 1
assert next(self.scan.results_subfolder.iterdir()).name == "webanalyze-https_google.com.csv"
assert mocked_run.called
|
# Signatures
from DSGRN import *
import sqlite3
def SaveDatabase(filename, data, pg):
# print("Save Database")
conn = sqlite3.connect(filename)
conn.executescript("""
create table if not exists Signatures (ParameterIndex INTEGER PRIMARY KEY, MorseGraphIndex INTEGER);
create table if not exists MorseGraphViz (MorseGraphIndex INTEGER PRIMARY KEY, Graphviz TEXT);
create table if not exists MorseGraphVertices (MorseGraphIndex INTEGER, Vertex INTEGER);
create table if not exists MorseGraphEdges (MorseGraphIndex INTEGER, Source INTEGER, Target INTEGER);
create table if not exists MorseGraphAnnotations (MorseGraphIndex INTEGER, Vertex INTEGER, Label TEXT);
create table if not exists Network ( Name TEXT, Dimension INTEGER, Specification TEXT, Graphviz TEXT);
""")
# Postprocessing to give Morse Graphs indices
morsegraphs = []
def signatures_table(data):
morsegraphindices = {}
for (pi, mg) in data:
if mg in morsegraphindices: # ideally I'd have a graph isomorphism check
mgi = morsegraphindices[mg]
else:
mgi = len(morsegraphindices)
morsegraphindices[mg] = mgi
morsegraphs.append(mg)
yield (pi,mgi)
def MG(mgi):
return MorseGraph().parse(morsegraphs[mgi])
name = filename
if filename[-3:] == '.db':
name = filename[:-3]
# print("Inserting Network table into Database", flush=True)
conn.execute("insert into Network ( Name, Dimension, Specification, Graphviz) values (?, ?, ?, ?);", (name, pg.network().size(), pg.network().specification(), pg.network().graphviz()))
# print("Inserting Signatures table into Database", flush=True)
conn.executemany("insert into Signatures (ParameterIndex, MorseGraphIndex) values (?, ?);", signatures_table(data))
# print("Inserting MorseGraphViz table into Database", flush=True)
conn.executemany("insert into MorseGraphViz (MorseGraphIndex, Graphviz) values (?, ?);",
( (mgi, MG(mgi).graphviz()) for mgi in range(0, len(morsegraphs))) )
# print("Inserting MorseGraphVertices table into Database", flush=True)
conn.executemany("insert into MorseGraphVertices (MorseGraphIndex, Vertex) values (?, ?);",
( (mgi, v) for mgi in range(0, len(morsegraphs)) for v in range(0,MG(mgi).poset().size()) ))
# print("Inserting MorseGraphEdges table into Database", flush=True)
conn.executemany("insert into MorseGraphEdges (MorseGraphIndex, Source, Target) values (?, ?, ?);",
( (mgi, s, t) for mgi in range(0, len(morsegraphs)) for s in range(0,MG(mgi).poset().size()) for t in MG(mgi).poset().children(s) ))
# print("Inserting MorseGraphAnnotations table into Database", flush=True)
conn.executemany("insert into MorseGraphAnnotations (MorseGraphIndex, Vertex, Label) values (?, ?, ?);",
( (mgi, v, label) for mgi in range(0, len(morsegraphs)) for v in range(0,MG(mgi).poset().size()) for label in MG(mgi).annotation(v) ))
# print("Indexing Database.", flush=True)
conn.executescript("""
create index if not exists Signatures2 on Signatures (MorseGraphIndex, ParameterIndex);
create index if not exists MorseGraphAnnotations3 on MorseGraphAnnotations (Label, MorseGraphIndex);
create index if not exists MorseGraphViz2 on MorseGraphViz (Graphviz, MorseGraphIndex);
create index if not exists MorseGraphVertices1 on MorseGraphVertices (MorseGraphIndex, Vertex);
create index if not exists MorseGraphVertices2 on MorseGraphVertices (Vertex, MorseGraphIndex);
create index if not exists MorseGraphEdges1 on MorseGraphEdges (MorseGraphIndex);
create index if not exists MorseGraphAnnotations1 on MorseGraphAnnotations (MorseGraphIndex);
""")
conn.commit()
conn.close()
def make_db(specfile, outfile):
gpg = ParameterGraph(Network(specfile))
results = []
# print("Computing Morse Graphs")
for pi in range(gpg.size()):
results.append((pi, MorseGraph(DomainGraph(gpg.parameter(pi))).stringify()))
SaveDatabase(outfile, results, gpg)
|
#!/usr/bin/env python
# Use Netmiko to change the logging buffer size and to disable console logging
# from a file on both pynet-rtr1 and pynet-rtr2.
from netmiko import ConnectHandler
def main():
# Definition of rtr2.
rtr1 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': '88newclass',
}
rtr2 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': '88newclass',
'port': 8022,
}
# Create a list of all the routers.
all_routers = [rtr1, rtr2]
# Loop through all the routers and show arp.
for a_router in all_routers:
net_connect = ConnectHandler(**a_router)
# Check current logging buffer size.
print "\n>>>>>>>>> Device {0} <<<<<<<<<".format(a_router['device_type'])
output = net_connect.send_command("show run | inc logging")
print "Initial logging config: "
print output
print
# Enter config mode, change logging buffer and console logging from file,
# exit config mode.
output = net_connect.config_mode()
output = net_connect.send_config_from_file(config_file='config_file.txt')
output = net_connect.exit_config_mode()
# Check logging buffer size again.
output = net_connect.send_command("show run | inc logging")
print "Final logging config: "
print output
print ">>>>>>>>> End <<<<<<<<<\n"
if __name__ == "__main__":
main()
|
from core import Locust, TaskSet, WebLocust, SubLocust, task
from exception import InterruptTaskSet, ResponseError, RescheduleTaskImmediately
version = "0.6.2"
|
import sys
import tkinter
from PIL import Image, ImageTk
import threading
import time
import urllib.request
import io
import requests
import json
def show_image():
global item, canvas
root = tkinter.Tk()
root.attributes('-fullscreen', True)
# root.bind('', lambda e: root.destroy())
root.title('Status')
root.geometry("1920x1080")
img = Image.open('image/display_locked_qr.jpeg')
img = ImageTk.PhotoImage(img)
canvas = tkinter.Canvas(bg = "black", width=1920, height=1080)
canvas.place(x=0, y=0)
item = canvas.create_image(0, 0, image=img, anchor=tkinter.NW)
root.mainloop()
thread1 = threading.Thread(target=show_image)
thread1.start()
while(True):
url = "http://192.168.10.15:8080/api/products"
products_get = requests.get(url)
product_dict = products_get.json()[-1] #最新のレコードを辞書型で取得
image_url = product_dict['image'] #imageのURLを取得
img_read = urllib.request.urlopen(image_url).read()
img_bin = io.BytesIO(img_read)
img2 = Image.open(img_bin) # PILで開く
# img2 = Image.open('image/display_unlocked_qr.jpeg')
img2 = ImageTk.PhotoImage(img2)
time.sleep(3)
canvas.itemconfig(item,image=img2)
time.sleep(3)
img = Image.open('image/display_locked_qr.jpeg')
img = ImageTk.PhotoImage(img)
canvas.itemconfig(item,image=img) |
import pyxel
pyxel.init(200, 200)
pyxel.cls(7)
for i in range(0, 110, 10):
pyxel.line(i, 0, 100 + i, 200, 0)
pyxel.show()
|
#!/usr/bin/env python
from _menu import *
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego ([email protected])
# @Date: 2018-01-16
# @Filename: command.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego ([email protected])
# @Last modified time: 2019-04-27 12:35:22
import asyncio
import collections
import logging
import pathlib
import ruamel.yaml
from asyncioActor.command import Command
from asyncioActor.core import exceptions
from asyncioActor.misc import logger
from asyncioActor.protocol import TCPStreamPeriodicServer, TCPStreamServer
#: The default status delay.
DEFAULT_STATUS_DELAY = 1
class Actor(object):
"""An actor based in asyncio.
This class defines a new actor. Normally a new instance is created by
passing a configuration file path which defines how the actor must
be started.
The TCP servers need to be started by awaiting the coroutine `.run`. The
following is an example of a basic actor instantiation: ::
loop = asyncio.get_event_loop()
my_actor = Actor('my_actor', '127.0.0.1', 9999)
loop.run_until_complete(my_actor.run())
Parameters
----------
name : str
The name of the actor.
host : str
The host where the TCP server will run.
port : int
The port of the TCP server.
version : str
The version of the actor.
loop
The event loop. If `None`, the current event loop will be used.
config : dict or str
A configuration dictionary or the path to a YAML configuration
file that must contain a section ``'actor'`` (if the section is
not present, the whole file is assumed to be the actor
configuration).
status_port : int
If defined, the port on which the status server will run.
status_callback : function
The function to be called by the status server.
status_delay : float
The delay, in seconds, between successive calls to ``status_callback``.
Defaults to `.DEFAULT_STATUS_DELAY`.
log_dir : str
The directory where to store the logs. Defaults to ``$HOME/.<name>``
where ``<name>`` is the name of the actor.
"""
def __init__(self, name=None, host=None, port=None, version=None,
loop=None, config=None, status_port=None, status_callback=None,
status_delay=None, log_dir=None):
self.config = self._parse_config(config)
self.name = name or self.config['name']
assert self.name, 'name cannot be empty.'
self.log = self._setup_logger(log_dir)
self.loop = loop or asyncio.get_event_loop()
self.user_dict = dict()
self.version = version or self.config['version'] or '?'
host = host or self.config['host']
port = port or self.config['port']
self.server = TCPStreamServer(host, port, loop=self.loop,
connection_callback=self.new_user,
data_received_callback=self.new_command)
self.status_server = None
status_port = status_port or self.config['status_port']
sleep_time = status_delay or self.config['status_delay'] or DEFAULT_STATUS_DELAY
if status_port:
self.status_server = TCPStreamPeriodicServer(
host, status_port, loop=self.loop,
periodic_callback=status_callback,
sleep_time=sleep_time)
def __repr__(self):
if self.server and self.server.server:
host, port = self.server.server.sockets[0].getsockname()
else:
host = port = None
return f'<{str(self)} (name={self.name}, host={host!r}, port={port})>'
def __str__(self):
return self.__class__.__name__
async def run(self):
"""Starts the servers."""
await self.server.start_server()
socket = self.server.server.sockets[0]
host, port = socket.getsockname()
self.log.info(f'starting TCP server on {host}:{port}')
if self.status_server:
await self.status_server.start_server()
socket_status = self.status_server.server.sockets[0]
host, port = socket_status.getsockname()
self.log.info(f'starting status server on {host}:{port}')
await self.server.server.serve_forever()
async def shutdown(self):
"""Shuts down all the remaining tasks."""
self.log.info('cancelling all pending tasks and shutting down.')
tasks = [task for task in asyncio.Task.all_tasks(loop=self.loop)
if task is not asyncio.tasks.Task.current_task(loop=self.loop)]
list(map(lambda task: task.cancel(), tasks))
await asyncio.gather(*tasks, return_exceptions=True)
self.loop.stop()
def _parse_config(self, config):
"""Parses the configuration file."""
if config is None:
# Returns a defaultdict that returns None if the key is not present.
return collections.defaultdict(lambda: None)
if not isinstance(config, dict):
assert config.exists(), 'configuration path does not exist.'
yaml = ruamel.yaml.add_implicit_resolverYAML(typ='safe')
config = yaml.load(open(str(config)))
if 'actor' in config:
config = config['actor']
return config
def _setup_logger(self, log_dir, file_level=10, shell_level=20):
"""Starts the file logger."""
orig_logger = logging.getLoggerClass()
logging.setLoggerClass(logger.MyLogger)
log = logging.getLogger(self.name + '_actor')
log._set_defaults() # Inits sh handler
logging.setLoggerClass(orig_logger)
if log_dir is None:
if 'logging' in self.config:
log_dir = self.config['logging'].get('log_dir', None)
if log_dir is None:
log_dir = pathlib.Path(f'~/.{self.name}/').expanduser()
else:
log_dir = pathlib.Path(log_dir)
if not log_dir.exists():
log_dir.mkdir(parents=True)
log.start_file_logger(log_dir / f'{self.name}.log')
if 'logging' in self.config:
file_level = self.config['logging'].get('file_level', None) or file_level
shell_level = self.config['logging'].get('shell_level', None) or shell_level
log.sh.setLevel(shell_level)
log.fh.setLevel(file_level)
log.info('logging system initiated.')
return log
def new_user(self, transport):
"""Assigns userID to new client connection."""
curr_ids = set(self.user_dict.keys())
user_id = 1 if len(curr_ids) == 0 else max(curr_ids) + 1
transport.user_id = user_id
self.user_dict[user_id] = transport
# report user information and additional info
self.show_new_user_info(user_id)
return
def new_command(self, transport, command_str):
"""Handles a new command received by the actor."""
command_str = command_str.decode().strip()
if not command_str:
return
user_id = transport.user_id
print(user_id)
try:
command = Command(command_str, user_id=user_id, actor=self, loop=self.loop)
except exceptions.CommandError as ee:
self.write('f', f'Could not parse the following as a command: {ee!r}')
return
# try:
# self.dispatch(command)
# except exceptions.CommandError as ee:
# command.set_status(command.status.Failed,
# message=f'Command {command.command_body!r} failed: {ee}')
return command
def show_new_user_info(self, user_id):
"""Shows information for new users. Called when a new user connects."""
self.show_user_info(user_id)
self.show_version(user_id=user_id)
def show_user_info(self, user_id):
"""Shows user information including your user_id."""
num_users = len(self.user_dict)
if num_users == 0:
return
msg_data = [f'yourUserID={user_id}', f'num_users={num_users}']
msg_str = '; '.join(msg_data)
self.write('i', msg_str, user_id=user_id)
self.show_user_list()
def show_user_list(self):
"""Shows a list of connected users. Broadcast to all users."""
user_id_list = sorted(self.user_dict.keys())
for user_id in user_id_list:
transport = self.user_dict[user_id]
peername = transport.get_extra_info('peername')[0]
msg_str = f'UserInfo={user_id}, {peername}'
self.write('i', msg_str)
def show_version(self, user_id=None):
"""Shows actor version."""
msg_str = f'version={self.version!r}'
self.write('i', msg_str, user_id=user_id)
@staticmethod
def get_user_command_id(command=None, user_id=None, command_id=None):
"""Returns user_id, command_id based on user-supplied information.
Parameters
----------
command : Command
User command; used as a default for ``user_id`` and ``command_id``.
If the command is done, it is ignored.
user_id : int
If `None` then use ``command.user_id``.
command_id : int
If `None` then use ``command.command_id``.
"""
if command is not None and command.is_done:
command = None
user_id = user_id or (command.user_id if command else 0)
command_id = command_id or (command.command_id if command else 0)
return (user_id, command_id)
@staticmethod
def format_user_output(msg_code, msg_str=None, user_id=None, command_id=None):
"""Formats a string to send to users."""
msg_str = '' if msg_str is None else ' ' + msg_str
return f'{command_id:d} {user_id:d} {msg_code:s}{msg_str:s}'
def write(self, msg_code, msg_str, command=None, user_id=None, command_id=None):
"""Writes a message to user(s).
Parameters
----------
msg_code : str
The message code (e.g., ``'i'`` or ``':'``).
msg_str : str
The text to be output. If `None`, only the code will be written.
command : Command
User command; used as a default for ``user_id`` and ``command_id``.
If the command is done, it is ignored.
user_id : int
If `None` then use ``command.user_id``.
command_id : int
If `None` then use ``command.command_id``.
"""
user_id, command_id = self.get_user_command_id(command=command,
user_id=user_id,
command_id=command_id)
full_msg_str = self.format_user_output(msg_code, msg_str,
user_id=user_id,
command_id=command_id)
msg = (full_msg_str + '\n').encode()
if user_id is None or user_id == 0:
for transport in self.user_dict.values():
transport.write(msg)
else:
transport = self.user_dict[user_id]
transport.write(msg)
|
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# magic id to use before we know a peer's id
NULL_ID = 20 * '\0'
# Kademlia "K" constant, this should be an even number
K = 8
# SHA1 is 160 bits long
HASH_LENGTH = 160
# checkpoint every this many seconds
CHECKPOINT_INTERVAL = 60 * 5 # five minutes
# how often to find our own nodes
FIND_CLOSE_INTERVAL = 60 * 15 # fifteen minutes
### SEARCHING/STORING
# concurrent krpc calls per find node/value request!
CONCURRENT_REQS = K
# how many hosts to post to
STORE_REDUNDANCY = 3
### ROUTING TABLE STUFF
# how many times in a row a node can fail to respond before it's booted from the routing table
MAX_FAILURES = 3
# never ping a node more often than this
MIN_PING_INTERVAL = 60 * 15 # fifteen minutes
# refresh buckets that haven't been touched in this long
BUCKET_STALENESS = 60 * 15 # fifteen minutes
### KEY EXPIRER
# time before expirer starts running
KEINITIAL_DELAY = 15 # 15 seconds - to clean out old stuff in persistent db
# time between expirer runs
KE_DELAY = 60 * 5 # 5 minutes
# expire entries older than this
KE_AGE = 60 * 30 # 30 minutes
## krpc errback codes
KRPC_TIMEOUT = 20
KRPC_ERROR = 1
KRPC_ERROR_METHOD_UNKNOWN = 2
KRPC_ERROR_RECEIVED_UNKNOWN = 3
KRPC_ERROR_TIMEOUT = 4
KRPC_SOCKET_ERROR = 5
KRPC_CONNECTION_CACHE_TIME = KRPC_TIMEOUT * 2
## krpc erorr response codes
KERR_ERROR = (201, "Generic Error")
KERR_SERVER_ERROR = (202, "Server Error")
KERR_PROTOCOL_ERROR = (203, "Protocol Error")
KERR_METHOD_UNKNOWN = (204, "Method Unknown")
KERR_INVALID_ARGS = (205, "Invalid Argements")
KERR_INVALID_TOKEN = (206, "Invalid Token")
|
from decimal import *
class DecimalStack(object):
def __init__(self, *d):
self.data = list(d)
def __getitem__(self, id):
return self.data[id]
def add(self):
self.data.append(self.data.pop() + self.data.pop())
def sub(self):
self.data.append(0 - (self.data.pop() - self.data.pop()))
def mul(self):
self.data.append(self.data.pop() * self.data.pop())
def div(self):
a, b = self.data.pop(), self.data.pop()
self.data.append(b / a)
def ceil(self):
self.data.append(math.ceil(self.data.pop()))
def floor(self):
self.data.append(math.floor(self.data.pop()))
def eq(self):
return 0 - (self.data.pop() == self.data.pop())
def neq(self):
return 0 - (self.data.pop() != self.data.pop())
def gt(self):
a, b = self.data.pop(), self.data.pop()
return 0 - (b > a)
def lt(self):
a, b = self.data.pop(), self.data.pop()
return 0 - (b < a)
def depth(self):
return len(self.data)
def drop(self):
self.data.pop()
def pop(self):
return self.data.pop()
def swap(self):
a, b = self.data.pop(), self.data.pop()
self.data += [a, b]
def push(self, n):
self.data.append(n)
def log(self):
a, b = self.data.pop(), self.data.pop()
self.data.append(math.log(b, a))
def power(self):
a, b = self.data.pop(), self.data.pop()
self.data.append(math.pow(a, b))
def sin(self):
self.data.append(math.sin(self.data.pop()))
def cos(self):
self.data.append(math.cos(self.data.pop()))
def tan(self):
self.data.append(math.tan(self.data.pop()))
def asin(self):
self.data.append(math.asin(self.data.pop()))
def acos(self):
self.data.append(math.acos(self.data.pop()))
def atan(self):
self.data.append(math.atan(self.data.pop()))
|
import pytest
from ctrlibrary.threatresponse.enrich import enrich_observe_observables
from ctrlibrary.core.utils import get_observables
from tests.functional.tests.constants import (
MODULE_NAME,
CTR_ENTITIES_LIMIT
)
@pytest.mark.parametrize(
'observable, observable_type',
(('a23-38-112-137.deploy.static.akamaitechnologies.com', 'domain'),
('23.38.112.137', 'ip'),
('701fb8ed9d1f72c901e207dd01b481266be8458f6e03750c1a139c901f2995fa',
'sha256'),
('415e5cc23e106483711abe70ad78c8e2', 'md5'),
('MSFTHISTORY!', 'mutex'),
(r'C:\Users\User01\Downloads\Malware', 'file_path'),
('buzus.exe', 'file_name'))
)
def test_positive_enrich_observe_observables_relationships(
module_headers, observable, observable_type):
""" Perform testing for enrich observe observables endpoint to get
relationships for observable Qualys module
ID: CCTRI-798-bcb33509-c153-4436-93c3-7345e7704b9d
Steps:
1. Send request to enrich observe observable endpoint
Expectedresults:
1. Check that data in response body contains expected information
in relationships from Qualys module
Importance: Critical
"""
observables = [{"value": observable, "type": observable_type}]
response_from_all_modules = enrich_observe_observables(
payload=observables,
**{'headers': module_headers}
)
response_from_qualys_ioc = get_observables(
response_from_all_modules, MODULE_NAME)
assert response_from_qualys_ioc['module'] == MODULE_NAME
assert response_from_qualys_ioc['module_instance_id']
assert response_from_qualys_ioc['module_type_id']
relationships = response_from_qualys_ioc['data']['relationships']
sightings = response_from_qualys_ioc['data']['sightings']
indicators = response_from_qualys_ioc['data']['indicators']
judgements = response_from_qualys_ioc['data']['judgements']
indicators_ids = frozenset(
indicator['id'] for indicator in indicators['docs'])
judgements_ids = frozenset(
judgement['id'] for judgement in judgements['docs'])
sightings_ids = frozenset(
sighting['id'] for sighting in sightings['docs'])
assert len(relationships['docs']) > 0
for relationship in relationships['docs']:
assert relationship['schema_version']
assert relationship['type'] == 'relationship'
assert relationship['source'] == MODULE_NAME
assert relationship['id'].startswith('transient:relationship-')
assert 'external_ids' in relationship
assert 'source_uri' in relationship
if relationship['relationship_type'] == 'based-on':
if relationship['target_ref'].startswith('transient:indicator-'):
assert relationship['target_ref'] in indicators_ids
assert relationship['source_ref'] in judgements_ids
elif relationship['target_ref'].startswith('transient:judgement-'):
assert relationship['target_ref'] in judgements_ids
assert relationship['source_ref'] in sightings_ids
elif relationship['relationship_type'] == 'sighting-of':
assert relationship['target_ref'] in indicators_ids
assert relationship['source_ref'] in sightings_ids
else:
raise AssertionError('Unsupported relationship type')
assert relationships['count'] == len(relationships['docs']) <= (
CTR_ENTITIES_LIMIT)
|
from quart import Quart
from db import Session
from blueprints import auth_blueprint, websockets_blueprint
app = Quart(__name__)
app.register_blueprint(auth_blueprint)
app.register_blueprint(websockets_blueprint)
@app.teardown_appcontext
async def teardown_db(resp_or_exc):
Session.remove()
app.run() |
def remove_all_occurences(list, remove_value):
return None
def is_leap(list, remove_value):
return None
def add(a, b):
return None
def g(list, remove_value):
return None
def t(list, remove_value):
return None
print(2 in [1,2])
def if_funtion():
if 2 in [1,2]:
return True
print(if_funtion())
if 2 in [1,2]:
print(True)
|
# -*- coding: utf-8 -*-
# @Time : 2017/12/17 12:47
# @Author : Xiaofeifei
# @File : evaluation.py
from sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, confusion_matrix, precision_recall_curve
import numpy as np
import matplotlib.pyplot as plt
import itertools
def auc(y_true, y_pred):
y_pred = np.squeeze(np.reshape(y_pred, [-1, 1]))
y_true = np.squeeze(np.reshape(y_true, [-1, 1]))
return roc_auc_score(y_true, y_pred)
def plot_roc(y_true, y_pred, title):
y_pred = np.squeeze(np.reshape(y_pred, [-1, 1]))
y_true = np.squeeze(np.reshape(y_true, [-1, 1]))
fpr, tpr, _ = roc_curve(y_true, y_pred)
auc = roc_auc_score(y_true, y_pred)
plt.figure()
plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([-0.01, 1.0])
plt.ylim([-0.01, 1.01])
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.title(title)
plt.savefig('../pic/' + title + '.png')
plt.show()
"""
def average_precision(y_true, y_pred):
y_pred = np.squeeze(np.reshape(y_pred, [-1, 1]))
y_true = np.squeeze(np.reshape(y_true, [-1, 1]))
average_precision_ = average_precision_score(y_true, y_pred) # 计算平均准确率
return average_precision_
"""
def precision_recall(y_true, y_pred):
y_pred = np.squeeze(np.reshape(y_pred, [-1, 1]))
y_true = np.squeeze(np.reshape(y_true, [-1, 1]))
cm = confusion_matrix(y_true, y_pred)
recall = cm[1, 1] / (cm[1, 0] + cm[1, 1])
precision = cm[1, 1] / (cm[0, 1] + cm[1, 1])
return precision, recall
def plot_prc(y_true, y_pred, title):
y_pred = np.squeeze(np.reshape(y_pred, [-1, 1]))
y_true = np.squeeze(np.reshape(y_true, [-1, 1]))
precision, recall, _ = precision_recall_curve(y_true, y_pred)
average_precision = average_precision_score(y_true, y_pred)
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2,
color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title(title + ': AP={0:0.2f}'.format(
average_precision))
plt.savefig('../pic/' + title + '.png')
plt.show()
def plot_confusion_matric(y_true, y_pred, classes, normalize=False, title='Confusion matrix'):
y_pred = np.squeeze(np.reshape(y_pred, [-1, 1]))
y_true = np.squeeze(np.reshape(y_true, [-1, 1]))
cm = confusion_matrix(y_true, y_pred) # 计算混淆矩阵
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes)) # 伪坐标轴
plt.xticks(tick_marks, classes, rotation=0)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('../pic/' + title + '.png')
plt.show()
|
def args(method):
pass
def fred(method):
breakpoint()
args_ = method() # noqa
|
#!/usr/bin/env python2
"""
This is a template file of abt command
"""
import argparse
import abt.cli as cli
import abt.rpc_client as client
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=cli.progname, description=__doc__.strip())
parser.add_argument('arg1', action='store', help="balabalah")
parser.add_argument('arg2', action='store', help="balabalah")
parser.add_argument('--opt1', action='store', help="balabalah")
parser.add_argument('--opt2', action='store', help="balabalah")
args = parser.parse_args()
|
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from django.contrib import admin
from .models import AppTemplate
@admin.register(AppTemplate)
class AppTemplateAdmin(PlaceholderAdminMixin, admin.ModelAdmin):
list_display = ('date', 'title', 'published', )
fieldsets = (
('', {
'fields': (
'published',
(
'title',
'seo_title',
'date'
),
'meta_description',
# 'preview_text',
'preview_image',
)
}),
)
|
#Faça um programa que mostre a tabuada de vários números, um de cada vez,
#para cada valor digitado pelo usuário.
#O programa será interrompido quando o número solicitado for negativo.
cont = 1
c = 1
print('\033[37mCaso digite um número negativo o programa será encerrado.\033[m')
while True:
num = int(input('\033[1mDigite um número para saber sua tabuada: \033[m'))
if num < 0:
break
print('-' * 12)
for c in range(1, 11):
multi = num * c
print(f'\033[34m{num} x {c} = {multi}\033[m')
print('-' * 12)
print('\n\033[31mPrograma encerrado.')
|
###############################################################################
# Core Python Wrapper for RP1210 dlls
###############################################################################
from struct import pack, unpack
from ctypes import windll, byref, c_char_p, c_int, c_short, c_long, c_void_p, create_string_buffer
import ConfigParser
"""
Class: RP1210
"""
class RP1210:
def __init__(self):
"""
RP1210 Constructor
"""
config = ConfigParser.RawConfigParser()
# Todo: 1) Determine os 2) Load our own config, 3) determine windows root, 4) open ini
config.read('c:\windows\RP121032.ini')
# Todo: Select the correct dll and dll config name by matching config or if there is only one
dllname = config.get('RP1210Support', 'APIImplementations').split(',')[0]
# Todo: Open dll ini file for details of allowed configurations
# Load the correct dll
self.dll = windll.LoadLibrary(dllname)
# Define Function Prototypes for python type checking - otherwise python depends on before and after stack pointer comparisons
self.dll.RP1210_ClientConnect.argtypes = [c_long, c_short, c_char_p, c_long, c_long, c_short]
self.dll.RP1210_ClientDisconnect.argtypes = [c_short]
self.dll.RP1210_SendMessage.argtypes = [c_short, c_char_p, c_short, c_short, c_short]
self.dll.RP1210_ReadMessage.argtypes = [c_short, c_char_p, c_short, c_short]
self.dll.RP1210_ReadVersion.argtypes = [c_char_p, c_char_p, c_char_p, c_char_p]
self.dll.RP1210_ReadDetailedVersion.argtypes = [c_short, c_char_p, c_char_p, c_char_p]
self.dll.RP1210_GetErrorMsg.argtypes = [c_short, c_char_p]
self.dll.RP1210_GetLastErrorMsg.argtypes = [c_short, c_void_p, c_char_p]
self.dll.RP1210_GetHardwareStatus.argtypes = [c_short, c_char_p, c_short, c_short]
self.dll.RP1210_SendCommand.argtypes = [c_short, c_short, c_char_p, c_short]
def ClientConnect(self, Device, Protocol, TxBufSize, RxBufSize):
return self.dll.RP1210_ClientConnect(c_long(0), Device, Protocol, TxBufSize, RxBufSize, 0)
def ClientDisconnect(self, ClientId):
return self.dll.RP1210_ClientDisconnect(ClientId)
def SendMessage(self, ClientId, Message, MessageSize, Block):
return self.dll.RP1210_SendMessage(ClientId, Message, MessageSize, 0, Block)
def ReadMessage(self, ClientId, Block):
p1 = create_string_buffer(100)
self.dll.RP1210_ReadMessage(ClientId, p1, len(p1), Block)
return p1.value
def ReadVersion(self):
p1 = create_string_buffer(5)
p2 = create_string_buffer(5)
p3 = create_string_buffer(5)
p4 = create_string_buffer(5)
self.dll.RP1210_ReadVersion(p1, p2, p3, p4)
return (p1.value, p2.value, p3.value, p4.value)
def ReadDetailedVersion(self, ClientId):
p1 = create_string_buffer(17)
p2 = create_string_buffer(17)
p3 = create_string_buffer(17)
self.dll.RP1210_ReadDetailedVersion(ClientId, p1, p2, p3)
return (p1.value, p2.value, p3.value)
def GetErrorMsg(self, ErrorCode):
p1 = create_string_buffer(80)
self.dll.RP1210_GetErrorMsg(ErrorCode, p1)
return p1.value
def GetLastErrorMsg(self, ErrorCode):
p1 = c_int(0)
p2 = create_string_buffer(80)
self.dll.RP1210_GetLastErrorMsg(ErrorCode, byref(p1), p2)
return (p1, p2.value)
def GetHardwareStatus(self, ClientId, Block):
p1 = create_string_buffer(18)
self.dll.RP1210_GetHardwareStatus(ClientId, p1, 18, Block)
return p1.value
def SendCommand(self, ClientId, CommandNumber, CommandString, CommandSize):
return self.dll.RP1210_SendCommand(CommandNumber, ClientId, CommandString, CommandSize)
class j1587Message:
def __init__(self, Timestamp, Priority, Mid, Data):
"""Standard Constructor"""
self.Timestamp = Timestamp
self.Priority = Priority
self.Mid = Mid
self.Data = Data
def fromString(self, StringForm):
fmt = '<IB%ds' % (len(StringForm) - 5)
(self.Timestamp, self.Mid, self.Data) = unpack(fmt, StringForm)
return self
def __str__(self):
fmt = '<BB%ds' % len(self.Data)
return pack(fmt, self.Priority, self.Mid, self.Data)
"""Convert the Message to a string RP1210 functions will understand"""
class j1939Message:
def __init__(self, Timestamp, Pgn, Priority, Source, Destination, Data):
"""Standard Constructor"""
self.TimeStamp = Timestamp
self.Pgn = Pgn
self.Priority = Priority
self.Source= Source
self.Destination = Destination
self.Data = Data
def fromString(self, StringForm):
fmt = '<IHBBBB%ds' % (len(StringForm) - 10)
(self.TimeStamp, PgnLo, PgnHi, self.Priority, self.Source, self.Destination, self.Data) = unpack(fmt, StringForm)
self.Pgn = ((PgnHi << 16) & 0x0F0000) | (PgnLo & 0xFFFF)
return self
def __str__(self):
"""Convert the Message to a string RP1210 functions will understand"""
PgnLo = self.Pgn & 0xFFFF
PgnHi = ((self.Pgn & 0x0F0000) >> 16)
fmt = '<HBBBB%ds' % len(self.Data)
return pack(fmt, PgnLo, PgnHi, self.Priority, self.Source, self.Destination, self.Data)
|
# Beautiful Soup 4で「技評ねこ部通信」を取得
import requests
from bs4 import BeautifulSoup
r = requests.get('http://gihyo.jp/lifestyle/clip/01/everyday-cat')
soup = BeautifulSoup(r.content, 'html.parser')
title = soup.title # titleタグの情報を取得
print(type(title)) # オブジェクトの型は Tag 型
# <class 'bs4.element.Tag'>
print(title) # タイトルの中身を確認
# <title>技評ねこ部通信|gihyo.jp … 技術評論社</title>
print(title.text) # タイトルの中のテキストを取得
# 技評ねこ部通信|gihyo.jp … 技術評論社
div = soup.find('div', class_='readingContent01')
for li in div.find_all('li'): # divタグの中の全liタグを取得
url = li.a['href']
date, text = li.a.text.split()
print('{},{},{}'.format(date, text, url))
|
import FWCore.ParameterSet.Config as cms
# Single muon for Wjets
isomuons = cms.EDFilter(
"MuonSelector",
src = cms.InputTag('muons'),
cut = cms.string("(isTrackerMuon) && std::abs(eta) < 2.5 && pt > 9.5"+#17. "+
"&& isPFMuon"+
"&& globalTrack.isNonnull"+
"&& innerTrack.hitPattern.numberOfValidPixelHits > 0"+
"&& innerTrack.normalizedChi2 < 10"+
"&& numberOfMatches > 0"+
"&& innerTrack.hitPattern.numberOfValidTrackerHits>5"+
"&& globalTrack.hitPattern.numberOfValidHits>0"+
"&& (pfIsolationR03.sumChargedHadronPt+pfIsolationR03.sumNeutralHadronEt+pfIsolationR03.sumPhotonEt)/pt < 0.3"+
"&& std::abs(innerTrack().dxy)<2.0"
),
filter = cms.bool(False)
)
isoelectrons = cms.EDFilter(
"GsfElectronSelector",
src = cms.InputTag('gsfElectrons'),
cut = cms.string("std::abs(eta) < 2.5 && pt > 9.5" +
"&& gsfTrack.trackerExpectedHitsInner.numberOfHits == 0" +
# "&& (pfIsolationVariables.chargedHadronIso+pfIsolationVariables.neutralHadronIso)/et < 0.3" +
"&& (isolationVariables03.tkSumPt)/et < 0.2" +
"&& ((std::abs(eta) < 1.4442 " +
"&& std::abs(deltaEtaSuperClusterTrackAtVtx) < 0.007"+
"&& std::abs(deltaPhiSuperClusterTrackAtVtx) < 0.8" +
"&& sigmaIetaIeta < 0.01" +
"&& hcalOverEcal < 0.15" +
"&& std::abs(1./superCluster.energy - 1./p) < 0.05)"+
"|| (std::abs(eta) > 1.566 "+
"&& std::abs(deltaEtaSuperClusterTrackAtVtx) < 0.009"+
"&& std::abs(deltaPhiSuperClusterTrackAtVtx) < 0.10" +
"&& sigmaIetaIeta < 0.03" +
"&& hcalOverEcal < 0.10" +
"&& std::abs(1./superCluster.energy - 1./p) < 0.05))"
),
filter = cms.bool(False)
)
from RecoJets.Configuration.RecoPFJets_cff import kt6PFJets as dummy
kt6PFJetsForRhoComputationVoronoiMet = dummy.clone(
doRhoFastjet = True,
voronoiRfact = 0.9
)
from RecoTauTag.RecoTau.PFRecoTauDiscriminationByHPSSelection_cfi import hpsSelectionDiscriminator
hpsPFTauDiscriminationByDecayModeFinding = hpsSelectionDiscriminator.clone(
PFTauProducer = 'hpsPFTauProducer'
)
from RecoTauTag.RecoTau.TauDiscriminatorTools import requireLeadTrack
# Define decay mode prediscriminant
requireDecayMode = cms.PSet(
BooleanOperator = cms.string("and"),
decayMode = cms.PSet(
Producer = cms.InputTag('hpsPFTauDiscriminationByDecayModeFinding'),
cut = cms.double(0.5)
)
)
from RecoTauTag.Configuration.HPSPFTaus_cff import hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits
import RecoTauTag.RecoTau.pfRecoTauDiscriminationAgainstMuon2_cfi as _mod
hpsPFTauDiscriminationAgainstMuon2 = _mod.pfRecoTauDiscriminationAgainstMuon2.clone(
PFTauProducer = 'hpsPFTauProducer',
Prediscriminants = requireDecayMode.clone(),
discriminatorOption = 'loose', # available options are: 'loose', 'medium', 'tight'
)
hpsPFTauDiscriminationByMVAIsolation = cms.EDProducer(
"PFRecoTauDiscriminationByMVAIsolation",
PFTauProducer = cms.InputTag('hpsPFTauProducer'),
rhoProducer = cms.InputTag('kt6PFJetsForRhoComputationVoronoiMet','rho'),
Prediscriminants = requireDecayMode.clone(),
gbrfFilePath = cms.FileInPath('RecoTauTag/RecoTau/data/gbrfTauIso_v2.root'),
returnMVA = cms.bool(False),
mvaMin = cms.double(0.8),
)
isotaus = cms.EDFilter(
"PFTauSelector",
src = cms.InputTag('hpsPFTauProducer'),
BooleanOperator = cms.string("and"),
discriminators = cms.VPSet(
cms.PSet( discriminator=cms.InputTag("hpsPFTauDiscriminationByDecayModeFinding"), selectionCut=cms.double(0.5)),
#cms.PSet( discriminator=cms.InputTag("hpsPFTauDiscriminationByMVAIsolation"), selectionCut=cms.double(0.5)),
cms.PSet( discriminator=cms.InputTag("hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits"), selectionCut=cms.double(0.5)),
cms.PSet( discriminator=cms.InputTag("hpsPFTauDiscriminationByLooseElectronRejection"), selectionCut=cms.double(0.5)),
cms.PSet( discriminator=cms.InputTag("hpsPFTauDiscriminationAgainstMuon2"), selectionCut=cms.double(0.5))
),
cut = cms.string("std::abs(eta) < 2.3 && pt > 19.0 "),
filter = cms.bool(False)
)
isomuonTask = cms.Task(isomuons)
isomuonseq = cms.Sequence(isomuonsTask)
isoelectronTask = cms.Task(isoelectrons)
isoelectronseq = cms.Sequence(isoelectronsTask)
isotauTask = cms.Task(
hpsPFTauDiscriminationByLooseCombinedIsolationDBSumPtCorr3Hits,
#kt6PFJetsForRhoComputationVoronoiMet,
#hpsPFTauDiscriminationByMVAIsolation,
hpsPFTauDiscriminationAgainstMuon2,
isotaus
)
isotauseq = cms.Sequence(isotauTask)
leptonSelection = cms.PSet(
SelectEvents = cms.PSet(
SelectEvents = cms.vstring(
'isomuonseq',
'isoelectronseq',
'isotauseq')
)
)
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial._dist.disttrial}.
"""
import os
import sys
from twisted.internet.protocol import Protocol, ProcessProtocol
from twisted.internet.defer import fail, gatherResults, maybeDeferred, succeed
from twisted.internet.task import Cooperator, deferLater
from twisted.internet.main import CONNECTION_DONE
from twisted.internet import reactor, interfaces, error
from twisted.python.compat import NativeStringIO as StringIO
from twisted.python.failure import Failure
from twisted.python.lockfile import FilesystemLock
from twisted.test.test_cooperator import FakeScheduler
from twisted.test.proto_helpers import MemoryReactorClock
from twisted.trial.unittest import SynchronousTestCase, TestCase
from twisted.trial.reporter import Reporter, TreeReporter
from twisted.trial.reporter import UncleanWarningsReporterWrapper
from twisted.trial.runner import TrialSuite, ErrorHolder
from twisted.trial._dist.disttrial import DistTrialRunner
from twisted.trial._dist.distreporter import DistReporter
from twisted.trial._dist.worker import LocalWorker
from zope.interface import implementer, verify
class FakeTransport(object):
"""
A simple fake process transport.
"""
def writeToChild(self, fd, data):
"""
Ignore write calls.
"""
@implementer(interfaces.IReactorProcess)
class CountingReactor(MemoryReactorClock):
"""
A fake reactor that counts the calls to L{IReactorCore.run},
L{IReactorCore.stop}, and L{IReactorProcess.spawnProcess}.
"""
spawnCount = 0
stopCount = 0
runCount = 0
def __init__(self, workers):
MemoryReactorClock.__init__(self)
self._workers = workers
def spawnProcess(self, worker, *args, **kwargs):
"""
See L{IReactorProcess.spawnProcess}.
@param worker: See L{IReactorProcess.spawnProcess}.
@param args: See L{IReactorProcess.spawnProcess}.
@param kwargs: See L{IReactorProcess.spawnProcess}.
"""
self._workers.append(worker)
worker.makeConnection(FakeTransport())
self.spawnCount += 1
def stop(self):
"""
See L{IReactorCore.stop}.
"""
MemoryReactorClock.stop(self)
self.stopCount += 1
def run(self):
"""
See L{IReactorCore.run}.
"""
self.runCount += 1
# The same as IReactorCore.run, except no stop.
self.running = True
self.hasRun = True
for f, args, kwargs in self.whenRunningHooks:
f(*args, **kwargs)
class CountingReactorTests(SynchronousTestCase):
"""
Tests for L{CountingReactor}.
"""
def setUp(self):
self.workers = []
self.reactor = CountingReactor(self.workers)
def test_providesIReactorProcess(self):
"""
L{CountingReactor} instances provide L{IReactorProcess}.
"""
verify.verifyObject(interfaces.IReactorProcess, self.reactor)
def test_spawnProcess(self):
"""
The process protocol for a spawned process is connected to a
transport and appended onto the provided C{workers} list, and
the reactor's C{spawnCount} increased.
"""
self.assertFalse(self.reactor.spawnCount)
proto = Protocol()
for count in [1, 2]:
self.reactor.spawnProcess(proto, sys.executable,
arg=[sys.executable])
self.assertTrue(proto.transport)
self.assertEqual(self.workers, [proto] * count)
self.assertEqual(self.reactor.spawnCount, count)
def test_stop(self):
"""
Stopping the reactor increments its C{stopCount}
"""
self.assertFalse(self.reactor.stopCount)
for count in [1, 2]:
self.reactor.stop()
self.assertEqual(self.reactor.stopCount, count)
def test_run(self):
"""
Running the reactor increments its C{runCount}, does not imply
C{stop}, and calls L{IReactorCore.callWhenRunning} hooks.
"""
self.assertFalse(self.reactor.runCount)
whenRunningCalls = []
self.reactor.callWhenRunning(whenRunningCalls.append, None)
for count in [1, 2]:
self.reactor.run()
self.assertEqual(self.reactor.runCount, count)
self.assertEqual(self.reactor.stopCount, 0)
self.assertEqual(len(whenRunningCalls), count)
class EternalTerminationPredicateFactory(object):
"""
A rigged terminationPredicateFactory for which time never pass.
"""
def __call__(self):
"""
See: L{task._Timer}
"""
return False
class DistTrialRunnerTests(TestCase):
"""
Tests for L{DistTrialRunner}.
"""
def setUp(self):
"""
Create a runner for testing.
"""
self.runner = DistTrialRunner(TreeReporter, 4, [],
workingDirectory=self.mktemp())
self.runner._stream = StringIO()
def reap(self, workers):
"""
Reap the workers and trap L{ConnectionDone} failures on their
C{endDeferred}s.
@param workers: The workers to reap.
@type workers: An iterable of L{LocalWorker}
"""
for worker in workers:
worker.endDeferred.addErrback(Failure.trap, error.ConnectionDone)
worker.processEnded(Failure(CONNECTION_DONE))
def getFakeSchedulerAndEternalCooperator(self):
"""
Helper to create fake scheduler and cooperator in tests.
The cooperator has a termination timer which will never inform
the scheduler that the task needs to be terminated.
@return: L{tuple} of (scheduler, cooperator)
"""
scheduler = FakeScheduler()
cooperator = Cooperator(
scheduler=scheduler,
terminationPredicateFactory=EternalTerminationPredicateFactory,
)
return scheduler, cooperator
def test_writeResults(self):
"""
L{DistTrialRunner.writeResults} writes to the stream specified in the
init.
"""
stringIO = StringIO()
result = DistReporter(Reporter(stringIO))
self.runner.writeResults(result)
self.assertTrue(stringIO.tell() > 0)
def test_createLocalWorkers(self):
"""
C{createLocalWorkers} iterates the list of protocols and create one
L{LocalWorker} for each.
"""
protocols = [object() for x in range(4)]
workers = self.runner.createLocalWorkers(protocols, "path")
for s in workers:
self.assertIsInstance(s, LocalWorker)
self.assertEqual(4, len(workers))
def test_launchWorkerProcesses(self):
"""
Given a C{spawnProcess} function, C{launchWorkerProcess} launches a
python process with an existing path as its argument.
"""
protocols = [ProcessProtocol() for i in range(4)]
arguments = []
environment = {}
def fakeSpawnProcess(processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=0,
childFDs=None):
arguments.append(executable)
arguments.extend(args)
environment.update(env)
self.runner.launchWorkerProcesses(
fakeSpawnProcess, protocols, ["foo"])
self.assertEqual(arguments[0], arguments[1])
self.assertTrue(os.path.exists(arguments[2]))
self.assertEqual("foo", arguments[3])
self.assertEqual(os.pathsep.join(sys.path),
environment["TRIAL_PYTHONPATH"])
def test_run(self):
"""
C{run} starts the reactor exactly once and spawns each of the workers
exactly once.
"""
workers = []
fakeReactor = CountingReactor(workers)
self.addCleanup(self.reap, workers)
suite = TrialSuite()
for i in range(10):
suite.addTest(TestCase())
self.runner.run(suite, fakeReactor)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, self.runner._workerNumber)
def test_runUsedDirectory(self):
"""
L{DistTrialRunner} checks if the test directory is already locked, and
if it is generates a name based on it.
"""
class CountingReactorWithLock(CountingReactor):
def spawnProcess(oself, worker, *args, **kwargs):
oself._workers.append(worker)
self.assertEqual(os.path.abspath(worker._logDirectory),
os.path.abspath(
os.path.join(workingDirectory + "-1",
str(oself.spawnCount))))
localLock = FilesystemLock(workingDirectory + "-1.lock")
self.assertFalse(localLock.lock())
oself.spawnCount += 1
worker.makeConnection(FakeTransport())
worker._ampProtocol.run = lambda *args: succeed(None)
newDirectory = self.mktemp()
os.mkdir(newDirectory)
workingDirectory = os.path.join(newDirectory, "_trial_temp")
lock = FilesystemLock(workingDirectory + ".lock")
lock.lock()
self.addCleanup(lock.unlock)
self.runner._workingDirectory = workingDirectory
workers = []
fakeReactor = CountingReactorWithLock(workers)
self.addCleanup(self.reap, workers)
suite = TrialSuite()
for i in range(10):
suite.addTest(TestCase())
self.runner.run(suite, fakeReactor)
def test_minimalWorker(self):
"""
L{DistTrialRunner} doesn't try to start more workers than the number of
tests.
"""
workers = []
fakeReactor = CountingReactor(workers)
self.addCleanup(self.reap, workers)
self.runner.run(TestCase(), fakeReactor)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, 1)
def test_runUncleanWarnings(self):
"""
Running with the C{unclean-warnings} option makes L{DistTrialRunner}
uses the L{UncleanWarningsReporterWrapper}.
"""
workers = []
fakeReactor = CountingReactor(workers)
self.addCleanup(self.reap, workers)
self.runner._uncleanWarnings = True
result = self.runner.run(TestCase(), fakeReactor)
self.assertIsInstance(result, DistReporter)
self.assertIsInstance(result.original,
UncleanWarningsReporterWrapper)
def test_runWithoutTest(self):
"""
When the suite contains no test, L{DistTrialRunner} takes a shortcut
path without launching any process or starting the reactor.
"""
fakeReactor = object()
suite = TrialSuite()
result = self.runner.run(suite, fakeReactor)
self.assertIsInstance(result, DistReporter)
output = self.runner._stream.getvalue()
self.assertIn("Running 0 test", output)
self.assertIn("PASSED", output)
def test_runWithoutTestButWithAnError(self):
"""
Even if there is no test, the suite can contain an error (most likely,
an import error): this should make the run fail, and the error should
be printed.
"""
fakeReactor = object()
error = ErrorHolder("an error", Failure(RuntimeError("foo bar")))
result = self.runner.run(error, fakeReactor)
self.assertIsInstance(result, DistReporter)
output = self.runner._stream.getvalue()
self.assertIn("Running 0 test", output)
self.assertIn("foo bar", output)
self.assertIn("an error", output)
self.assertIn("errors=1", output)
self.assertIn("FAILED", output)
def test_runUnexpectedError(self):
"""
If for some reasons we can't connect to the worker process, the test
suite catches and fails.
"""
class CountingReactorWithFail(CountingReactor):
def spawnProcess(self, worker, *args, **kwargs):
self._workers.append(worker)
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.failingRun
def failingRun(self, case, result):
return fail(RuntimeError("oops"))
scheduler, cooperator = self.getFakeSchedulerAndEternalCooperator()
workers = []
fakeReactor = CountingReactorWithFail(workers)
self.addCleanup(self.reap, workers)
result = self.runner.run(TestCase(), fakeReactor,
cooperator.cooperate)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, 1)
scheduler.pump()
self.assertEqual(1, len(result.original.failures))
def test_runStopAfterTests(self):
"""
L{DistTrialRunner} calls C{reactor.stop} and unlocks the test directory
once the tests have run.
"""
class CountingReactorWithSuccess(CountingReactor):
def spawnProcess(self, worker, *args, **kwargs):
self._workers.append(worker)
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.succeedingRun
def succeedingRun(self, case, result):
return succeed(None)
workingDirectory = self.runner._workingDirectory
workers = []
fakeReactor = CountingReactorWithSuccess(workers)
self.runner.run(TestCase(), fakeReactor)
def check():
localLock = FilesystemLock(workingDirectory + ".lock")
self.assertTrue(localLock.lock())
self.assertEqual(1, fakeReactor.stopCount)
self.assertEqual(list(fakeReactor.triggers.keys()), ["before"])
self.assertEqual(list(fakeReactor.triggers["before"]), ["shutdown"])
self.reap(workers)
return deferLater(reactor, 0, check)
def test_runWaitForProcessesDeferreds(self):
"""
L{DistTrialRunner} waits for the worker processes to stop when the
reactor is stopping, and then unlocks the test directory, not trying to
stop the reactor again.
"""
workers = []
workingDirectory = self.runner._workingDirectory
fakeReactor = CountingReactor(workers)
self.runner.run(TestCase(), fakeReactor)
def check(ign):
# Let the AMP deferreds fire
return deferLater(reactor, 0, realCheck)
def realCheck():
localLock = FilesystemLock(workingDirectory + ".lock")
self.assertTrue(localLock.lock())
# Stop is not called, as it ought to have been called before
self.assertEqual(0, fakeReactor.stopCount)
self.assertEqual(list(fakeReactor.triggers.keys()), ["before"])
self.assertEqual(list(fakeReactor.triggers["before"]), ["shutdown"])
self.reap(workers)
return gatherResults([
maybeDeferred(f, *a, **kw)
for f, a, kw in fakeReactor.triggers["before"]["shutdown"]
]).addCallback(check)
def test_runUntilFailure(self):
"""
L{DistTrialRunner} can run in C{untilFailure} mode where it will run
the given tests until they fail.
"""
called = []
class CountingReactorWithSuccess(CountingReactor):
def spawnProcess(self, worker, *args, **kwargs):
self._workers.append(worker)
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.succeedingRun
def succeedingRun(self, case, result):
called.append(None)
if len(called) == 5:
return fail(RuntimeError("oops"))
return succeed(None)
workers = []
fakeReactor = CountingReactorWithSuccess(workers)
self.addCleanup(self.reap, workers)
scheduler, cooperator = self.getFakeSchedulerAndEternalCooperator()
result = self.runner.run(
TestCase(), fakeReactor, cooperate=cooperator.cooperate,
untilFailure=True)
scheduler.pump()
self.assertEqual(5, len(called))
self.assertFalse(result.wasSuccessful())
output = self.runner._stream.getvalue()
self.assertIn("PASSED", output)
self.assertIn("FAIL", output)
|
import numpy as np
import tensorflow as tf
import keras
from keras.datasets import mnist
from keras.layers import Dense, Flatten, Dropout
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Sequential
import matplotlib.pylab as plt
import load_data as ld
from keras.callbacks import TensorBoard
tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,
write_graph=True, write_images=False)
batch_size = 128
epochs = 10
width = 56
height = 56
classes = ['square', 'circle']
num_classes = len(classes)
print('Generating training data')
(x_train, y_train) = ld.load_data(10000, width, height)
(x_test, y_test) = ld.load_data(3000, width, height)
input_shape = (width, height, 3)
print('Completed generation of training data')
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices - this is for use in the
# categorical_crossentropy loss below
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(8, kernel_size=(5, 5), strides=(2, 2),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(16, (5, 5), strides=(2, 2), activation='relu'))
model.add(Flatten())
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
class AccuracyHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
def on_epoch_end(self, batch, logs={}):
self.acc.append(logs.get('acc'))
history = AccuracyHistory()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[history, tensorboard])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
plt.plot(range(1, 11), history.acc)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show() |
from hashlib import sha256
from chinilla.types.blockchain_format.sized_bytes import bytes32
def std_hash(b, skip_bytes_conversion: bool = False) -> bytes32:
"""
The standard hash used in many places.
"""
if skip_bytes_conversion:
return bytes32(sha256(b).digest())
else:
return bytes32(sha256(bytes(b)).digest())
|
# -*- coding: utf-8 -*-
from gluon.contrib.appconfig import AppConfig
myconf = AppConfig(reload=True)
db = DAL(myconf.take('db.uri'), pool_size=myconf.take('db.pool_size', cast=int), check_reserved=['all'])
dboee = DAL('sqlite://oee.db', pool_size=0, migrate=False)
response.generic_patterns = ['*'] if request.is_local else []
## choose a style for forms
response.formstyle = myconf.take('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.take('forms.separator')
from gluon.tools import Auth, Service, PluginManager
service = Service()
plugins = PluginManager()
## configure email
#mail = auth.settings.mailer
#mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server')
#mail.settings.sender = myconf.take('smtp.sender')
#mail.settings.login = myconf.take('smtp.login')
## Add colorwidget
import uuid
colorpicker_js = URL(r=request,c='static/mColorPicker', f='mColorPicker.min.js')
class ColorPickerWidget(object):
"""
Colorpicker widget based on http://code.google.com/p/mcolorpicker/
"""
def __init__ (self, js = colorpicker_js, button=True, style="", transparency=False):
uid = str(uuid.uuid4())[:8]
self._class = "_%s" % uid
self.style = style
if transparency == False:
self.transparency = 'false'
else:
self.transparency = 'true'
if button == True:
self.data = 'hidden'
if self.style == "":
self.style = "height:28px;width:28px;"
else:
self.data = 'display'
if not js in response.files:
response.files.append(js)
def widget(self, f, v):
wrapper = DIV()
inp = SQLFORM.widgets.string.widget(f,v, _value=v, _type='color',\
_data_text='hidden', _style=self.style, _hex='true', _class=self._class)
scr = SCRIPT("$.fn.mColorPicker.init.replace = false; \
$.fn.mColorPicker.init.allowTransparency=%s; \
$('input.%s').mColorPicker({'imageFolder': '/%s/static/mColorPicker/'});"\
% (self.transparency, self._class, request.application))
wrapper.components.append(inp)
wrapper.components.append(scr)
return wrapper
color_widget = ColorPickerWidget()
##Defined OEE tables
dboee.define_table('tblOee_Country', \
Field('fldOeeCountryTableKeyID', 'id', readable=False), \
Field('fldOeeCountryNr', 'integer', label='Country nr', readable=True, writable=False), \
Field('fldOeeCountryDescription', 'string', label='Country description'), \
Field('fldOeeCountryInformation', 'string', label='Country information'), \
Field('fldOeeCountryLanguageID', 'integer', label='Language ID', readable=False, writable=False), \
Field('fldOeeCountryHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.define_table('tblOee_Plant', \
Field('fldOeePlantTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantNr', 'integer', label='Plant nr', readable=True, writable=False), \
Field('fldOeePlantDescription', 'string', label='Plant description'), \
Field('fldOeePlantInformation', 'string', label='Plant information'), \
Field('fldOeePlantHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_Plant.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.define_table('tblOee_SubPlant', \
Field('fldOeeSubPlantTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantNr', 'integer', label='Sub-Plant nr', readable=True, writable=False), \
Field('fldOeeSubPlantDescription', 'string', label='Sub-Plant description'), \
Field('fldOeeSubPlantInformation', 'string', label='Sub-Plant information'), \
Field('fldOeeSubPlantHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_SubPlant.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_SubPlant.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.define_table('tblOee_Department', \
Field('fldOeeDepartmentTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentNr', 'integer', label='Department nr', readable=True, writable=False), \
Field('fldOeeDepartmentDescription', 'string', label='Department department'), \
Field('fldOeeDepartmentInformation', 'string', label='Department information'), \
Field('fldOeeDepartmentHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_Department.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_Department.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Department.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.define_table('tblOee_ActivityGroup', \
Field('fldOeeActivityGroupTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeActivityGroupNr', 'integer', label='Activitygroup', readable=True, writable=False), \
Field('fldOeeActivityGroupDescription', 'text', label='Activitygroup description'), \
Field('fldOeeActivityGroupInformation', 'text', label='Activitygroup information'), \
Field('fldOeeActivityGroupColorNr', 'integer', label='Activitygroup color'), \
Field('fldOeeActivityGroupCalcForOee', 'integer', label='Calculate OEE'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_ActivityGroup.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_ActivityGroup.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_ActivityGroup.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_ActivityGroup.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_Activity', \
Field('fldOeeActivityTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeActivityNr', 'integer', label='Activity nr', readable=True, writable=False), \
Field('fldOeeActivityGroupID', 'integer', label='Activitygroup'), \
Field('fldOeeActivityDescription', 'string', label='Activity description'), \
Field('fldOeeActivityInformation', 'string', label='Activity information'), \
Field('fldOeeActivityHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_Activity.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_Activity.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Activity.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_Activity.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.tblOee_Activity.fldOeeActivityGroupID.requires = IS_IN_DB(dboee(), dboee.tblOee_ActivityGroup.fldOeeActivityGroupNr, '%(fldOeeActivityGroupDescription)s')
dboee.define_table('tblOee_ModuleSensorStyle', \
Field('fldOeeModuleSensorStyleTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeeModuleSensorStyleNr', 'integer', label='Sensor-style nr', readable=True, writable=False), \
Field('fldOeeModuleSensorStyleDescription', 'string', label='Sensor-style'), \
Field('fldOeeModuleSensorStyleInformation', 'string', label='Sensor-style information'), \
Field('fldOeeModuleSensorStyleHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.define_table('tblOee_ModuleType', \
Field('fldOeeModuleTypeTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeModuleTypeNr', 'integer', label='Module-type nr'), \
Field('fldOeeModuleTypeConnection', 'string', label='Connection-type'), \
Field('fldOeeModuleTypeDescription', 'string', label='Module-type description'), \
Field('fldOeeModuleTypeInformation', 'string', label='Connection-type information'), \
Field('fldOeeModuleTypeHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.define_table('tblOee_Module', \
Field('fldOeeModuleTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeModuleNr', 'integer', label='Module nr', readable=True, writable=False), \
Field('fldOeeModuleTypeID', 'integer', label='Module-type'), \
Field('fldOeeModuleSensorStyleID', 'integer', label='Sensor-style'), \
Field('fldOeeModuleDescription', 'string', label='Module description'), \
Field('fldOeeModuleInformation', 'string', label='Module information'), \
Field('fldOeeModuleSensorAddress', 'integer', label='Sensor address'), \
Field('fldOeeModuleSensorResetAddress', 'integer', label='Sensor reset address'), \
Field('fldOeeModuleIpAddress', 'string', label='IP address'), \
Field('fldOeeModuleIpAddressPort', 'integer', label='IP Port'), \
Field('fldOeeModuleComPort', 'string', label='Com port'), \
Field('fldOeeModuleBitsPerSecond', 'integer', label='Bits per Second'), \
Field('fldOeeModuleDatabits', 'integer', label='Databits'), \
Field('fldOeeModuleStopBits', 'integer', label='StopBits'), \
Field('fldOeeModuleFlowControl', 'string', label='Flowcontrol'), \
Field('fldOeeModuleParity', 'string', label='Parity'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeModuleHistory', 'boolean', label='History'), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_Module.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_Module.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Module.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_Module.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.tblOee_Module.fldOeeModuleSensorStyleID.requires = IS_IN_DB(dboee(), dboee.tblOee_ModuleSensorStyle.fldOeeModuleSensorStyleNr, '%(fldOeeModuleSensorStyleDescription)s')
dboee.tblOee_Module.fldOeeModuleTypeID.requires = IS_IN_DB(dboee(), dboee.tblOee_ModuleType.fldOeeModuleTypeNr, '%(fldOeeModuleTypeDescription)s')
dboee.define_table('tblOee_MachineIOFailure', \
Field('fldOeeMachineIOFailureTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeMachineIOFailureNr', 'integer', label='I/O failure nr'), \
Field('fldOeeMachineIOFailureDescription', 'string', label='I/O failure'), \
Field('fldOeeMachineIOFailureInformation', 'string', label='I/O failure information'), \
Field('fldOeeMachineIOFailureHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_MachineIOFailure.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_MachineIOFailure.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_MachineIOFailure.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_MachineIOFailure.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_MachineShortbreak', \
Field('fldOeeMachineShortBreakTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeMachineShortBreakNr', 'integer', label='Shortbreak nr', readable=True, writable=False), \
Field('fldOeeMachineShortBreakDescription', 'string', label='Shortbreak description'), \
Field('fldOeeMachineShortBreakInformation', 'string', label='Shortbreak information'), \
Field('fldOeeMachineShortBreakHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_MachineShortbreak.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_MachineShortbreak.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_MachineShortbreak.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_MachineShortbreak.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_MachineStatus', \
Field('fldOeeMachineStatusTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeMachineStatusNr', 'integer', label='Machine status nr'), \
Field('fldOeeMachineStatusDescription', 'string', label='Machine status description'), \
Field('fldOeeMachineStatusInformation', 'string', label='Machine status information'), \
Field('fldOeeMachineStatusHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_MachineStatus.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_MachineStatus.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_MachineStatus.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_MachineStatus.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_MachineUndefinedProduction', \
Field('fldOeeMachineUndefinedProductionTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeMachineUndefinedProductionNr', 'integer', label='Undefined Production nr', readable=True, writable=False), \
Field('fldOeeMachineUndefinedProductionDescription', 'string', label='Undefined Production description'), \
Field('fldOeeMachineUndefinedProductionInformation', 'string', label='Undefined Production information'), \
Field('fldOeeMachineUndefinedProductionHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_MachineUndefinedProduction.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_MachineUndefinedProduction.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_MachineUndefinedProduction.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_MachineUndefinedProduction.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_MachineUndefinedStandstill', \
Field('fldOeeMachineUndefinedStandstillTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeMachineUndefinedStandstillNr', 'integer', label='Undefined standstill nr', readable=True, writable=False), \
Field('fldOeeMachineUndefinedStandstillDescription', 'string', label='Undefined standstill description'), \
Field('fldOeeMachineUndefinedStandstillInformation', 'string', label='Undefined standstill information'), \
Field('fldOeeMachineUndefinedStandstillHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_MachineUndefinedStandstill.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_MachineUndefinedStandstill.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_MachineUndefinedStandstill.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_MachineUndefinedStandstill.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_MachineUnscheduled', \
Field('fldOeeMachineUnscheduledTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeMachineUnscheduledNr', 'integer', label='Unscheduled nr', readable=True, writable=False), \
Field('fldOeeMachineUnscheduledDescription', 'string', label='Unscheduled description'), \
Field('fldOeeMachineUnscheduledInformation', 'string', label='Unscheduled information'), \
Field('fldDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeMachineUnscheduledHistory', 'boolean', label='History'), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_MachineUnscheduled.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_MachineUnscheduled.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_MachineUnscheduled.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_MachineUnscheduled.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_MachineUnit', \
Field('fldOeeMachineUnitTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeMachineUnitNr', 'integer', label='Machine-unit nr'), \
Field('fldOeeMachineUnitDescription', 'string', label='Machine-unit description'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeMachineUnitHistory', 'boolean', label='History'), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_MachineUnit.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_MachineUnit.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_MachineUnit.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_MachineUnit.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_Machine', \
Field('fldOeeMachineTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeMachineNr', 'integer', label='Machine nr', readable=True, writable=False), \
Field('fldOeeMachineCode', 'integer', label='Machine code'), \
Field('fldOeeMachineDescription', 'string', label='Machine description'), \
Field('fldOeeMachineInformation', 'string', label='Machine information'), \
Field('fldOeeModuleID', 'integer', label='Module'), \
Field('fldOeeMachineShortBreakID', 'integer', label='Shortbreak'), \
Field('fldOeeMachineUndefinedProdID', 'integer', label='Undefined Production'), \
Field('fldOeeMachineUndefinedStandStillID', 'integer', label='Undefined Standstill'), \
Field('fldOeeMachineUnscheduledID', 'integer', label='Unscheduled'), \
Field('fldOeeMachineIOFailureID', 'integer', label='I/O failure'), \
Field('fldOeeMachineUnitID', 'integer', label='Machine-unit'), \
Field('fldOeeMachineSortOrder', 'integer', label='Machine sort order'), \
Field('fldOeeMachineProductionBoundaryTimer', 'integer', label='Production timer (sec)'), \
Field('fldOeeMachineProductionShortbreakTimer', 'integer', label='Shortbreak timer (sec)'), \
Field('fldOeeMachineStopCodeTimer', 'integer', label='Stopcode timer (sec)'), \
Field('fldOeeMachineSpeed', 'integer', label='Default speed (per min)'), \
Field('fldOeeMachineDevider', 'decimal(2,2)', label='Pulse factor', default=1), \
Field('fldOeeMachineOperatorFactor', 'decimal(2,2)', label='Operator factor', default=1), \
Field('fldOeeMachineTarget1OEE', 'integer', label='OEE target 1'), \
Field('fldOeeMachineTarget2OEE', 'integer', label='OEE target 2'), \
Field('fldOeeMachineWorkstationDescription', 'string', label='Workstation description'), \
Field('fldOeeMachineHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_Machine.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_Machine.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Machine.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_Machine.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.tblOee_Machine.fldOeeModuleID.requires = IS_IN_DB(dboee(), dboee.tblOee_Module.fldOeeModuleNr, '%(fldOeeModuleDescription)s')
dboee.tblOee_Machine.fldOeeMachineShortBreakID.requires = IS_IN_DB(dboee(), dboee.tblOee_MachineShortbreak.fldOeeMachineShortBreakNr, '%(fldOeeMachineShortBreakDescription)s')
dboee.tblOee_Machine.fldOeeMachineUndefinedProdID.requires = IS_IN_DB(dboee(), dboee.tblOee_MachineUndefinedProduction.fldOeeMachineUndefinedProductionNr, '%(fldOeeMachineUndefinedProductionDescription)s')
dboee.tblOee_Machine.fldOeeMachineUndefinedStandStillID.requires = IS_IN_DB(dboee(), dboee.tblOee_MachineUndefinedStandstill.fldOeeMachineUndefinedStandstillNr, '%(fldOeeMachineUndefinedStandstillDescription)s')
dboee.tblOee_Machine.fldOeeMachineUnscheduledID.requires = IS_IN_DB(dboee(), dboee.tblOee_MachineUnscheduled.fldOeeMachineUnscheduledNr, '%(fldOeeMachineUnscheduledDescription)s')
dboee.tblOee_Machine.fldOeeMachineIOFailureID.requires = IS_IN_DB(dboee(), dboee.tblOee_MachineIOFailure.fldOeeMachineIOFailureNr, '%(fldOeeMachineIOFailureDescription)s')
dboee.tblOee_Machine.fldOeeMachineUnitID.requires = IS_IN_DB(dboee(), dboee.tblOee_MachineUnit.fldOeeMachineUnitNr, '%(fldOeeMachineUnitDescription)s')
dboee.define_table('tblOee_MachineActivity', \
Field('fldOeeMachineActivityTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeMachineID', 'integer', label='Machine'), \
Field('fldOeeMachineActivityID', 'integer', label='Activity'), \
Field('fldOeeMachineActivitySortOrder', 'integer', label='Sort order'), \
Field('fldOeeMachineActivityHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_MachineActivity.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_MachineActivity.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_MachineActivity.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_MachineActivity.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.tblOee_MachineActivity.fldOeeMachineActivityID.requires = IS_IN_DB(dboee(), dboee.tblOee_Activity.fldOeeActivityNr, '%(fldOeeActivityDescription)s')
dboee.tblOee_MachineActivity.fldOeeMachineID.requires = IS_IN_DB(dboee(), dboee.tblOee_Machine.fldOeeMachineNr, '%(fldOeeMachineDescription)s')
dboee.define_table('tblOee_DailySchedule', \
Field('fldOeeDailyScheduleTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeDailyScheduleNr', 'integer', label='Daily schedule nr', readable=True, writable=False), \
Field('fldOeeTeamID', 'integer', label='Team'), \
Field('fldOeeShiftTimeID', 'integer', label='Shifttime'), \
Field('fldOeeDailyScheduleDescription', 'string', label='Daily schedule description'), \
Field('fldOeeDailyScheduleInformation', 'string', label='Daily schedule information'), \
Field('fldOeeDailyScheduleStartDate', 'datetime', label='Starttime'), \
Field('fldOeeDailyScheduleEndDate', 'datetime', label='Endtime'), \
Field('fldOeeDailyScheduleHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_DailySchedule.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.define_table('tblOee_Article', \
Field('fldOeeArticleTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeArticleNr', 'string', label='Article nr'), \
Field('fldOeeArticleDescription', 'string', label='Article description'), \
Field('fldOeeArticleInformation', 'string', label='Article information'), \
Field('fldOeeArticleNormSpeed', 'integer', label='Norm speed'), \
Field('fldOeeArticleHistory', 'boolean', label='History', default = False), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_Article.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_Article.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Article.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_Article.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_Order', \
Field('fldOeeOrderTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeArticleID', 'string', label='Article'), \
Field('fldOeeOrderNr', 'string', label='Order nr'), \
Field('fldOeeOrderDescription', 'string', label='Order description'), \
Field('fldOeeOrderInformation', 'string', label='Order information'), \
Field('fldOeeOrderHistory', 'boolean', label='History', default = False), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_Order.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_Order.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Order.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_Order.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.tblOee_Order.fldOeeArticleID.requires = IS_IN_DB(dboee(), dboee.tblOee_Article.fldOeeArticleNr)
dboee.define_table('tblOee_ShiftTime', \
Field('fldOeeShiftTimeTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeShiftTimeNr', 'integer', label='Shifttime nr', readable=True, writable=False), \
Field('fldOeeShiftTimeDescription', 'string', label='Shifttime description'), \
Field('fldOeeShiftTimeInformation', 'string', label='Shifttime information'), \
Field('fldOeeShiftTimeStart', 'datetime', label='Starttime'), \
Field('fldOeeShiftTimeEnd', 'datetime', label='Endtime'), \
Field('fldOeeShiftTimeHistory', 'boolean', label='History'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_ShiftTime.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_ShiftTime.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_ShiftTime.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_ShiftTime.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_Team', \
Field('fldOeeTeamTableKeyID', 'id', readable=False), \
Field('fldOeeCountryID', 'integer', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department'), \
Field('fldOeeTeamNr', 'integer', label='Team nr', readable=True, writable=False), \
Field('fldOeeTeamDescription', 'string', label='Team description'), \
Field('fldOeeTeamInformation', 'string', label='Team information'), \
Field('fldOeeTeamColorNr', 'integer', label='Team Color'), \
Field('fldOeeDateModified', 'datetime', label='Date modified', default = request.now), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.tblOee_Team.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
dboee.tblOee_Team.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
dboee.tblOee_Team.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
dboee.tblOee_Team.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
dboee.define_table('tblOee_Reg', \
Field('fldOeeRegTableKeyID', 'id', readable=False), \
Field('fldOeeRegNr', 'integer', label='Reg nr'), \
Field('fldOeeMachineCode', 'integer', label='Machine code'), \
Field('fldOeeMachineID', 'integer', label='Machine ID'), \
Field('fldOeeMachineDescription', 'string', label='Machine'), \
Field('fldOeeMachineStatusID', 'integer', label='Machine status ID'), \
Field('fldOeeMachineStatusDescription', 'string', label='Machine status description'), \
Field('fldOeeCountryID', 'integer', label='Country ID'), \
Field('fldOeeCountryDescription', 'string', label='Country'), \
Field('fldOeePlantID', 'integer', label='Plant ID'), \
Field('fldOeePlantDescription', 'string', label='Plant'), \
Field('fldOeeSubPlantID', 'integer', label='Sub-Plant ID'), \
Field('fldOeeSubPlantDescription', 'string', label='Sub-Plant'), \
Field('fldOeeDepartmentID', 'integer', label='Department ID'), \
Field('fldOeeDepartmentDescription', 'string', label='Department'), \
Field('fldOeeStartDateTime', 'datetime', label='Start date'), \
Field('fldOeeEndDateTime', 'datetime', label='End date'), \
Field('fldOeeActivityDuration', 'integer', label='Duration in sec.'), \
Field('fldOeeTeamID', 'integer', label='Team ID'), \
Field('fldOeeTeamDescription', 'string', label='Team'), \
Field('fldOeeTeamColorID', 'integer', label='Team color ID'), \
Field('fldOeeTeamColorDescription', 'string', label='Team color'), \
Field('fldOeeShiftTimeID', 'integer', label='Shift ID'), \
Field('fldOeeShiftTimeDescription', 'string', label='Shift'), \
Field('fldOeeShiftStartDateTime', 'datetime', label='Shift starttime'), \
Field('fldOeeShiftEndDateTime', 'datetime', label='Shift endtime'), \
Field('fldOeeShiftDuration', 'integer', label='Shift duration'), \
Field('fldOeeAverageSpeed', 'integer', label='Average speed'), \
Field('fldOeeNormSpeed', 'integer', label='Norm speed'), \
Field('fldOeeCounter', 'integer', label='Counter'), \
Field('fldOeeCounterUnitID', 'integer', label='Counter-unit ID'), \
Field('fldOeeCounterUnitDescription', 'string', label='Counter-unit'), \
Field('fldOeeActivityGroupID', 'integer', label='Activitygroup ID'), \
Field('fldOeeActivityGroupDescription', 'string', label='Activitygroup'), \
Field('fldOeeActivityID', 'integer', label='Activity ID'), \
Field('fldOeeActivityDescription', 'string', label='Activity'), \
Field('fldOeeArticleNr', 'string', label='Article nr'), \
Field('fldOeeArticleDescription', 'string', label='Article description'), \
Field('fldOeeOrderNr', 'string', label='Order nr'), \
Field('fldOeeOrderDescription', 'string', label='Order description'), \
Field('fldOeeUserLogInformation', 'string', label='Activity log'), \
Field('fldOeeUserShiftLogInformation', 'string', label='Shift log'), \
Field('fldOeeCurrentPerformance', 'integer', label='Performance'), \
Field('fldOeeCurrentAvailability', 'integer', label='Availability'), \
Field('fldOeeCurrentQuality', 'integer', label='Quality'), \
Field('fldOeeCurrentOee', 'integer', label='OEE'), \
Field('fldOeeActivityGroupCalcForOee', 'integer', label='Calculate for OEE'), \
Field('fldOeeDateModified', 'datetime', label='Date modified'), \
Field('fldOeeSyncDate', 'datetime', label='Sync date'), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.define_table('tblOee_Progress', \
Field('fldOeeProgressTableKeyID', 'id'), \
Field('fldOeeRegID', 'integer'), \
Field('fldOeeStartDateTime', 'datetime'), \
Field('fldOeeActivityDuration', 'integer'), \
Field('fldOeeCounter', 'integer'), \
Field('fldOeeNormSpeed', 'integer'), \
Field('fldOeeCountryID', 'integer'), \
Field('fldOeePlantID', 'integer'), \
Field('fldOeeSubPlantID', 'integer'), \
Field('fldOeeDepartmentID', 'integer'), \
Field('fldOeeCurrentOee', 'integer'), \
Field('fldOeeCurrentAvailability', 'integer'), \
Field('fldOeeCurrentPerformance', 'integer'), \
Field('fldOeeCurrentQuality', 'integer'), \
Field('fldOeeRegHistory', 'boolean'), \
Field('fldOeeDateModified', 'datetime'), \
Field('fldOeeMachineID', 'integer'), \
Field('fldOeeSyncDate', 'datetime'), \
Field('fldOeeSync', 'boolean', label='Sync', default = True))
dboee.define_table('tblOee_UserRight', \
Field('fldOeeUserRightTableKeyID', 'id'), \
Field('fldOeeUserRightNr', 'integer'), \
Field('fldOeeUserRightDescription', 'string'), \
Field('fldDateModified', 'datetime'), \
Field('fldOeeUserRightInformation', 'string'), \
Field('fldOeeUserRightHistory', 'boolean'))
## configure auth policy
auth = Auth(dboee)
auth.settings.table_user_name = 'tblOee_User'
auth.settings.extra_fields['tblOee_User']= [
Field('fldOeeCountryID', 'integer', label='CountryID'), \
Field('fldOeePlantID', 'integer', label='PlantID'), \
Field('fldOeeSubPlantID', 'integer', label='SubPlantID'), \
Field('fldOeeDepartmentID', 'integer', label='DepartmentID'), \
Field('fldOeeUserRightID', 'integer', label='UserRightID'), \
Field('fldOeeUserDescription', 'string', label='User description'), \
Field('fldOeeUserLogin', 'string', label='UserLogin'), \
Field('fldOeeUserDomain', 'string', label='Domain'), \
Field('fldOeeDateModified', 'datetime'), \
Field('fldOeeUserHistory', 'boolean')]
auth.define_tables(username=False, signature=False)
#auth.settings.actions_disabled.append('register')
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
auth.settings.allow_basic_login = True
custom_auth_table = dboee[auth.settings.table_user_name]
custom_auth_table.fldOeeCountryID.requires = IS_IN_DB(dboee(), dboee.tblOee_Country.fldOeeCountryNr, '%(fldOeeCountryDescription)s')
custom_auth_table.fldOeePlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_Plant.fldOeePlantNr, '%(fldOeePlantDescription)s')
custom_auth_table.fldOeeSubPlantID.requires = IS_IN_DB(dboee(), dboee.tblOee_SubPlant.fldOeeSubPlantNr, '%(fldOeeSubPlantDescription)s')
custom_auth_table.fldOeeDepartmentID.requires = IS_IN_DB(dboee(), dboee.tblOee_Department.fldOeeDepartmentNr, '%(fldOeeDepartmentDescription)s')
custom_auth_table.fldOeeUserRightID.requires = IS_IN_DB(dboee(), dboee.tblOee_UserRight.fldOeeUserRightNr, '%(fldOeeUserRightDescription)s')
|
from unet_stylegan2.unet_stylegan2 import Trainer, StyleGAN2, NanException |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import sys, os
from . import common
from . import engine
from proton import *
from .common import pump, Skipped
from proton._compat import str2bin
def _sslCertpath(file):
""" Return the full path to the certificate,keyfile, etc.
"""
if os.name=="nt":
if file.find("private-key")!=-1:
# The private key is not in a separate store
return None
# Substitute pkcs#12 equivalent for the CA/key store
if file.endswith(".pem"):
file = file[:-4] + ".p12"
return os.path.join(os.path.dirname(__file__),
"ssl_db/%s" % file)
def _testSaslMech(self, mech, clientUser='user@proton', authUser='user@proton', encrypted=False, authenticated=True):
self.s1.allowed_mechs(mech)
self.c1.open()
self.c2.open()
pump(self.t1, self.t2, 1024)
if encrypted is not None:
assert self.t2.encrypted == encrypted, encrypted
assert self.t1.encrypted == encrypted, encrypted
assert self.t2.authenticated == authenticated, authenticated
assert self.t1.authenticated == authenticated, authenticated
if authenticated:
# Server
assert self.t2.user == authUser
assert self.s2.user == authUser
assert self.s2.mech == mech.strip()
assert self.s2.outcome == SASL.OK, self.s2.outcome
assert self.c2.state & Endpoint.LOCAL_ACTIVE and self.c2.state & Endpoint.REMOTE_ACTIVE,\
"local_active=%s, remote_active=%s" % (self.c1.state & Endpoint.LOCAL_ACTIVE, self.c1.state & Endpoint.REMOTE_ACTIVE)
# Client
assert self.t1.user == clientUser
assert self.s1.user == clientUser
assert self.s1.mech == mech.strip()
assert self.s1.outcome == SASL.OK, self.s1.outcome
assert self.c1.state & Endpoint.LOCAL_ACTIVE and self.c1.state & Endpoint.REMOTE_ACTIVE,\
"local_active=%s, remote_active=%s" % (self.c1.state & Endpoint.LOCAL_ACTIVE, self.c1.state & Endpoint.REMOTE_ACTIVE)
else:
# Server
assert self.t2.user == None
assert self.s2.user == None
assert self.s2.outcome != SASL.OK, self.s2.outcome
# Client
assert self.t1.user == clientUser
assert self.s1.user == clientUser
assert self.s1.outcome != SASL.OK, self.s1.outcome
class Test(common.Test):
pass
def consumeAllOuput(t):
stops = 0
while stops<1:
out = t.peek(1024)
l = len(out) if out else 0
t.pop(l)
if l <= 0:
stops += 1
class SaslTest(Test):
def setUp(self):
self.t1 = Transport()
self.s1 = SASL(self.t1)
self.t2 = Transport(Transport.SERVER)
self.t2.max_frame_size = 65536
self.s2 = SASL(self.t2)
def pump(self):
pump(self.t1, self.t2, 1024)
def testPipelinedClient(self):
# TODO: When PROTON-1136 is fixed then remove this test
if "java" in sys.platform:
raise Skipped("Proton-J does not support pipelined client input")
# Server
self.s2.allowed_mechs('ANONYMOUS')
c2 = Connection()
self.t2.bind(c2)
assert self.s2.outcome is None
# Push client bytes into server
self.t2.push(str2bin(
# SASL
'AMQP\x03\x01\x00\x00'
# @sasl-init(65) [mechanism=:ANONYMOUS, initial-response=b"anonymous@fuschia"]
'\x00\x00\x002\x02\x01\x00\x00\x00SA\xd0\x00\x00\x00"\x00\x00\x00\x02\xa3\x09ANONYMOUS\xa0\x11anonymous@fuschia'
# AMQP
'AMQP\x00\x01\x00\x00'
# @open(16) [container-id="", channel-max=1234]
'\x00\x00\x00!\x02\x00\x00\x00\x00S\x10\xd0\x00\x00\x00\x11\x00\x00\x00\x0a\xa1\x00@@`\x04\xd2@@@@@@'
))
consumeAllOuput(self.t2)
assert not self.t2.condition
assert self.s2.outcome == SASL.OK
assert c2.state & Endpoint.REMOTE_ACTIVE
def testPipelinedServer(self):
# Client
self.s1.allowed_mechs('ANONYMOUS')
c1 = Connection()
self.t1.bind(c1)
assert self.s1.outcome is None
# Push server bytes into client
# Commented out lines in this test are where the client input processing doesn't
# run after output processing even though there is input waiting
self.t1.push(str2bin(
# SASL
'AMQP\x03\x01\x00\x00'
# @sasl-mechanisms(64) [sasl-server-mechanisms=@PN_SYMBOL[:ANONYMOUS]]
'\x00\x00\x00\x1c\x02\x01\x00\x00\x00S@\xc0\x0f\x01\xe0\x0c\x01\xa3\tANONYMOUS'
# @sasl-outcome(68) [code=0]
'\x00\x00\x00\x10\x02\x01\x00\x00\x00SD\xc0\x03\x01P\x00'
# AMQP
'AMQP\x00\x01\x00\x00'
# @open(16) [container-id="", channel-max=1234]
'\x00\x00\x00!\x02\x00\x00\x00\x00S\x10\xd0\x00\x00\x00\x11\x00\x00\x00\x0a\xa1\x00@@`\x04\xd2@@@@@@'
))
consumeAllOuput(self.t1)
assert self.s1.outcome == SASL.OK
assert c1.state & Endpoint.REMOTE_ACTIVE
def testPipelined2(self):
if "java" in sys.platform:
raise Skipped("Proton-J does not support client pipelining")
out1 = self.t1.peek(1024)
self.t1.pop(len(out1))
self.t2.push(out1)
self.s2.allowed_mechs('ANONYMOUS')
c2 = Connection()
c2.open()
self.t2.bind(c2)
out2 = self.t2.peek(1024)
self.t2.pop(len(out2))
self.t1.push(out2)
out1 = self.t1.peek(1024)
assert len(out1) > 0
def testFracturedSASL(self):
""" PROTON-235
"""
assert self.s1.outcome is None
# self.t1.trace(Transport.TRACE_FRM)
out = self.t1.peek(1024)
self.t1.pop(len(out))
self.t1.push(str2bin("AMQP\x03\x01\x00\x00"))
out = self.t1.peek(1024)
self.t1.pop(len(out))
self.t1.push(str2bin("\x00\x00\x00"))
out = self.t1.peek(1024)
self.t1.pop(len(out))
self.t1.push(str2bin("6\x02\x01\x00\x00\x00S@\xc04\x01\xe01\x04\xa3\x05PLAIN\x0aDIGEST-MD5\x09ANONYMOUS\x08CRAM-MD5"))
out = self.t1.peek(1024)
self.t1.pop(len(out))
self.t1.push(str2bin("\x00\x00\x00\x10\x02\x01\x00\x00\x00SD\xc0\x03\x01P\x00"))
out = self.t1.peek(1024)
self.t1.pop(len(out))
while out:
out = self.t1.peek(1024)
self.t1.pop(len(out))
assert self.s1.outcome == SASL.OK, self.s1.outcome
def test_singleton(self):
"""Verify that only a single instance of SASL can exist per Transport"""
transport = Transport()
attr = object()
sasl1 = SASL(transport)
sasl1.my_attribute = attr
sasl2 = transport.sasl()
sasl3 = SASL(transport)
assert sasl1 == sasl2
assert sasl1 == sasl3
assert sasl1.my_attribute == attr
assert sasl2.my_attribute == attr
assert sasl3.my_attribute == attr
transport = Transport()
sasl1 = transport.sasl()
sasl1.my_attribute = attr
sasl2 = SASL(transport)
assert sasl1 == sasl2
assert sasl1.my_attribute == attr
assert sasl2.my_attribute == attr
|
import numpy as np
from sklearn.metrics import (
precision_score,
recall_score,
accuracy_score,
f1_score,
roc_auc_score,
)
from typing import Dict
import tensorflow.keras.metrics as metrics
from typing import List
def get_metrics(y_pred: np.ndarray, y_actual: np.ndarray) -> Dict[str, float]:
metrics = dict()
metrics["precision"] = precision_score(y_actual, y_pred)
metrics["recall"] = recall_score(y_actual, y_pred)
metrics["accuracy"] = accuracy_score(y_actual, y_pred)
metrics["f1"] = f1_score(y_actual, y_pred)
metrics["auc_roc"] = roc_auc_score(y_actual, y_pred)
return metrics
def print_metrics(metrics: Dict[str, float]) -> None:
print(
f"Metrics:\n"
f"\tPrecision: {metrics['precision']}\n"
f"\tRecall: {metrics['recall']}\n"
f"\tAccuracy: {metrics['accuracy']}\n"
f"\tF1: {metrics['f1']}\n"
f"\tAuc roc: {metrics['auc_roc']}\n"
)
def make_keras_model_metrics() -> List[metrics.Metric]:
return [
metrics.TruePositives(name="tp"),
metrics.FalsePositives(name="fp"),
metrics.TrueNegatives(name="tn"),
metrics.FalseNegatives(name="fn"),
metrics.BinaryAccuracy(name="accuracy"),
metrics.Precision(name="precision"),
metrics.Recall(name="recall"),
metrics.AUC(name="aucpr", curve="PR"),
metrics.AUC(name="aucroc", curve="ROC"),
]
|
from collections import defaultdict
from datetime import datetime, timedelta
import operator
from pathlib import Path
from pprint import pprint
import sys
from types import MappingProxyType
from zipfile import ZipFile, ZIP_LZMA
import django
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import transaction
import pytz
import time
from core import __version__
from mission_report.statuses import LifeStatus, SortieStatus
from mission_report.report import MissionReport
from stats.logger import logger
from stats.models import (Object, Mission, Sortie, Profile, Player, PlayerAircraft,
PlayerMission, KillboardPvP, Tour, LogEntry, Score, Squad)
from stats.online import update_online, cleanup_online
from stats.rewards import reward_sortie, reward_tour, reward_mission
from users.utils import cleanup_registration
User = get_user_model()
MISSION_REPORT_BACKUP_PATH = settings.MISSION_REPORT_BACKUP_PATH
MISSION_REPORT_BACKUP_DAYS = settings.MISSION_REPORT_BACKUP_DAYS
MISSION_REPORT_DELETE = settings.MISSION_REPORT_DELETE
MISSION_REPORT_PATH = settings.MISSION_REPORT_PATH
NEW_TOUR_BY_MONTH = settings.NEW_TOUR_BY_MONTH
TIME_ZONE = pytz.timezone(settings.MISSION_REPORT_TZ)
WIN_BY_SCORE = settings.WIN_BY_SCORE
WIN_SCORE_MIN = settings.WIN_SCORE_MIN
WIN_SCORE_RATIO = settings.WIN_SCORE_RATIO
SORTIE_MIN_TIME = settings.SORTIE_MIN_TIME
def main():
logger.info('IL2 stats {stats}, Python {python}, Django {django}'.format(
stats=__version__, python=sys.version[0:5], django=django.get_version()))
# TODO переделать на проверку по времени создания файлов
processed_reports = []
waiting_new_report = False
online_timestamp = 0
while True:
new_reports = []
for m_report_file in MISSION_REPORT_PATH.glob('missionReport*.txt'):
if m_report_file.name.endswith('[0].txt') and m_report_file.name not in processed_reports:
new_reports.append(m_report_file)
if len(new_reports) > 1:
waiting_new_report = False
# обрабатываем все логи кроме последней миссии
for m_report_file in new_reports[:-1]:
stats_whore(m_report_file=m_report_file)
cleanup(m_report_file=m_report_file)
processed_reports.append(m_report_file.name)
continue
elif len(new_reports) == 1:
m_report_file = new_reports[0]
m_report_files = collect_mission_reports(m_report_file=m_report_file)
online_timestamp = update_online(m_report_files=m_report_files, online_timestamp=online_timestamp)
# если последний файл был создан более 2х минут назад - обрабатываем его
if time.time() - m_report_files[-1].stat().st_mtime > 120:
waiting_new_report = False
stats_whore(m_report_file=m_report_file)
cleanup(m_report_file=m_report_file)
processed_reports.append(m_report_file.name)
continue
if not waiting_new_report:
logger.info('waiting new report...')
waiting_new_report = True
# удаляем юзеров которые не активировали свои регистрации в течении определенного времени
cleanup_registration()
# в идеале новые логи появляются как минимум раз в 30 секунд
time.sleep(30)
def backup_log(name, lines, date):
path_dir = MISSION_REPORT_BACKUP_PATH.joinpath(str(date.year), str(date.month), str(date.day))
if not path_dir.exists():
path_dir.mkdir(parents=True)
file_path = path_dir.joinpath(name)
with file_path.open('w') as f:
f.writelines(lines)
with ZipFile('%s.zip' % str(file_path), 'w', compression=ZIP_LZMA) as f:
f.write(str(file_path), arcname=name)
file_path.unlink()
def collect_mission_reports(m_report_file):
return sorted(MISSION_REPORT_PATH.glob('%s*.txt' % m_report_file.name[:34]), key=lambda x: x.stat().st_mtime)
def cleanup(m_report_file=None):
cleanup_online()
if m_report_file and MISSION_REPORT_DELETE:
m_report_files = collect_mission_reports(m_report_file=m_report_file)
# удаляем файлы репорты данной миссии
for f in m_report_files:
f.unlink()
for f in MISSION_REPORT_BACKUP_PATH.glob('**/*.zip'):
date_creation = datetime.fromtimestamp(f.stat().st_ctime)
date_cleanup = datetime.now() - timedelta(days=MISSION_REPORT_BACKUP_DAYS)
if date_creation < date_cleanup:
f.unlink()
def get_dirs(directory):
dirs = []
for p in directory.iterdir():
if p.is_dir():
dirs.append(p)
dirs.extend(get_dirs(p))
return dirs
for d in get_dirs(directory=MISSION_REPORT_BACKUP_PATH):
try:
d.rmdir()
except OSError:
pass
@transaction.atomic
def stats_whore(m_report_file):
"""
:type m_report_file: Path
"""
mission_timestamp = int(time.mktime(time.strptime(m_report_file.name[14:-8], '%Y-%m-%d_%H-%M-%S')))
if Mission.objects.filter(timestamp=mission_timestamp).exists():
logger.info('{mission} - exists in the DB'.format(mission=m_report_file.stem))
return
logger.info('{mission} - processing new report'.format(mission=m_report_file.stem))
m_report_files = collect_mission_reports(m_report_file=m_report_file)
real_date = TIME_ZONE.localize(datetime.fromtimestamp(mission_timestamp))
real_date = real_date.astimezone(pytz.UTC)
objects = MappingProxyType({obj['log_name']: obj for obj in Object.objects.values()})
# classes = MappingProxyType({obj['cls']: obj['cls_base'] for obj in objects.values()})
score_dict = MappingProxyType({s.key: s.get_value() for s in Score.objects.all()})
m_report = MissionReport(objects=objects)
m_report.processing(files=m_report_files)
backup_log(name=m_report_file.name, lines=m_report.lines, date=real_date)
if not m_report.is_correctly_completed:
logger.info('{mission} - mission has not been completed correctly'.format(mission=m_report_file.stem))
tour = get_tour(date=real_date)
mission = Mission.objects.create(
tour_id=tour.id,
name=m_report.file_path.replace('\\', '/').split('/')[-1].split('.')[0],
path=m_report.file_path,
date_start=real_date,
date_end=real_date + timedelta(seconds=m_report.tik_last // 50),
duration=m_report.tik_last // 50,
timestamp=mission_timestamp,
preset=m_report.preset_id,
settings=m_report.settings,
is_correctly_completed=m_report.is_correctly_completed,
score_dict=dict(score_dict),
)
if m_report.winning_coal_id:
mission.winning_coalition = m_report.winning_coal_id
mission.win_reason = 'task'
mission.save()
# собираем/создаем профили игроков и сквадов
profiles, players_pilots, players_gunners, players_tankmans, squads = create_profiles(tour=tour, sorties=m_report.sorties)
players_aircraft = defaultdict(dict)
players_mission = {}
players_killboard = {}
coalition_score = {1: 0, 2: 0}
new_sorties = []
for sortie in m_report.sorties:
sortie_aircraft_id = objects[sortie.aircraft_name]['id']
profile = profiles[sortie.account_id]
if sortie.cls_base == 'aircraft':
player = players_pilots[sortie.account_id]
elif sortie.cls == 'aircraft_turret':
player = players_gunners[sortie.account_id]
elif sortie.cls in ('tank_medium', 'tank_turret'):
player = players_tankmans[sortie.account_id]
else:
continue
squad = squads[profile.squad_id] if profile.squad else None
player.squad = squad
new_sortie = create_new_sortie(mission=mission, sortie=sortie, profile=profile, player=player,
sortie_aircraft_id=sortie_aircraft_id)
update_fairplay(new_sortie=new_sortie)
update_bonus_score(new_sortie=new_sortie)
# не добавляем очки в сумму если было диско
if not new_sortie.is_disco:
coalition_score[new_sortie.coalition] += new_sortie.score
new_sorties.append(new_sortie)
# добавляем ссылку на запись в базе к объекту вылета, чтобы использовать в добавлении событий вылета
sortie.sortie_db = new_sortie
if not mission.winning_coalition and WIN_BY_SCORE:
_coalition = sorted(coalition_score.items(), key=operator.itemgetter(1), reverse=True)
max_coal, max_score = _coalition[0]
min_coal, min_score = _coalition[1]
# минимальное кол-во очков = 1
min_score = min_score or 1
if max_score >= WIN_SCORE_MIN and max_score / min_score >= WIN_SCORE_RATIO:
mission.winning_coalition = max_coal
mission.win_reason = 'score'
mission.save()
for new_sortie in new_sorties:
player_mission = players_mission.setdefault(
new_sortie.player.id,
PlayerMission.objects.get_or_create(profile_id=new_sortie.profile.id, player_id=new_sortie.player.id,
mission_id=mission.id)[0]
)
player_aircraft = players_aircraft[new_sortie.player.id].setdefault(
new_sortie.aircraft.id,
PlayerAircraft.objects.get_or_create(profile_id=new_sortie.profile.id, player_id=new_sortie.player.id,
aircraft_id=new_sortie.aircraft.id)[0]
)
# если случилась победа по очкам - требуется обновить бонусы
if mission.win_reason == 'score':
update_bonus_score(new_sortie=new_sortie)
update_sortie(new_sortie=new_sortie, player_mission=player_mission, player_aircraft=player_aircraft)
reward_sortie(sortie=new_sortie)
new_sortie.save()
# ===============================================================================
mission.players_total = len(profiles)
mission.pilots_total = len(players_pilots)
mission.gunners_total = len(players_gunners)
mission.save()
for p in profiles.values():
p.save()
for p in players_pilots.values():
p.save()
reward_tour(player=p)
for p in players_gunners.values():
p.save()
for p in players_tankmans.values():
p.save()
for aircrafts in players_aircraft.values():
for a in aircrafts.values():
a.save()
for p in players_mission.values():
p.save()
reward_mission(player_mission=p)
for p in players_killboard.values():
p.save()
for s in squads.values():
s.save()
tour.save()
for event in m_report.log_entries:
params = {
'mission_id': mission.id,
'date': real_date + timedelta(seconds=event['tik'] // 50),
'tik': event['tik'],
'extra_data': {
'pos': event.get('pos'),
},
}
if event['type'] == 'respawn':
params['type'] = 'respawn'
params['act_object_id'] = event['sortie'].sortie_db.aircraft.id
params['act_sortie_id'] = event['sortie'].sortie_db.id
elif event['type'] == 'end':
params['type'] = 'end'
params['act_object_id'] = event['sortie'].sortie_db.aircraft.id
params['act_sortie_id'] = event['sortie'].sortie_db.id
elif event['type'] == 'takeoff':
params['type'] = 'takeoff'
params['act_object_id'] = event['aircraft'].sortie.sortie_db.aircraft.id
params['act_sortie_id'] = event['aircraft'].sortie.sortie_db.id
elif event['type'] == 'landed':
params['act_object_id'] = event['aircraft'].sortie.sortie_db.aircraft.id
params['act_sortie_id'] = event['aircraft'].sortie.sortie_db.id
if event['is_rtb'] and not event['is_killed']:
params['type'] = 'landed'
else:
if event['status'] == LifeStatus.destroyed:
params['type'] = 'crashed'
else:
params['type'] = 'ditched'
elif event['type'] == 'bailout':
params['type'] = 'bailout'
params['act_object_id'] = event['bot'].sortie.sortie_db.aircraft.id
params['act_sortie_id'] = event['bot'].sortie.sortie_db.id
elif event['type'] == 'damage':
params['extra_data']['damage'] = event['damage']
params['extra_data']['is_friendly_fire'] = event['is_friendly_fire']
if event['target'].cls_base == 'crew':
params['type'] = 'wounded'
else:
params['type'] = 'damaged'
if event['attacker']:
if event['attacker'].sortie:
params['act_object_id'] = event['attacker'].sortie.sortie_db.aircraft.id
params['act_sortie_id'] = event['attacker'].sortie.sortie_db.id
else:
params['act_object_id'] = objects[event['attacker'].log_name]['id']
if event['target'].sortie:
params['cact_object_id'] = event['target'].sortie.sortie_db.aircraft.id
params['cact_sortie_id'] = event['target'].sortie.sortie_db.id
else:
params['cact_object_id'] = objects[event['target'].log_name]['id']
elif event['type'] == 'kill':
params['extra_data']['is_friendly_fire'] = event['is_friendly_fire']
if event['target'].cls_base == 'crew':
params['type'] = 'killed'
elif event['target'].cls_base == 'aircraft':
params['type'] = 'shotdown'
else:
params['type'] = 'destroyed'
if event['attacker']:
if event['attacker'].sortie:
params['act_object_id'] = event['attacker'].sortie.sortie_db.aircraft.id
params['act_sortie_id'] = event['attacker'].sortie.sortie_db.id
else:
params['act_object_id'] = objects[event['attacker'].log_name]['id']
if event['target'].sortie:
params['cact_object_id'] = event['target'].sortie.sortie_db.aircraft.id
params['cact_sortie_id'] = event['target'].sortie.sortie_db.id
else:
params['cact_object_id'] = objects[event['target'].log_name]['id']
LogEntry.objects.create(**params)
logger.info('{mission} - processing finished'.format(mission=m_report_file.stem))
def get_tour(date):
"""
:type date: datetime
"""
if NEW_TOUR_BY_MONTH:
try:
tour = Tour.objects.get(date_start__year=date.year, date_start__month=date.month, is_ended=False)
except Tour.DoesNotExist:
tour = Tour.objects.create(date_start=date)
logger.info('started a new tour by month')
Tour.objects.exclude(id=tour.id).filter(is_ended=False).update(is_ended=True, date_end=date)
else:
try:
tour = Tour.objects.get(is_ended=False)
except Tour.DoesNotExist:
tour = Tour.objects.create(title='Tour name')
logger.warning('open tour was not found - started a new tour')
except Tour.MultipleObjectsReturned:
logger.error('multiple not ended tours - should be only one')
input()
sys.exit()
return tour
def create_profiles(tour, sorties):
profiles = {}
players_pilots = {}
players_gunners = {}
players_tankmans = {}
for s in sorties:
profile = profiles.setdefault(
s.account_id, Profile.objects.get_or_create(uuid=s.account_id, defaults={'nickname': s.nickname})[0])
profile.nickname = s.nickname
if s.cls_base == 'aircraft':
players_pilots.setdefault(
s.account_id, Player.objects.get_or_create(profile_id=profile.id, tour_id=tour.id, type='pilot')[0])
elif s.cls == 'aircraft_turret':
players_gunners.setdefault(
s.account_id, Player.objects.get_or_create(profile_id=profile.id, tour_id=tour.id, type='gunner')[0])
elif s.cls in ('tank_medium', 'tank_turret'):
players_tankmans.setdefault(
s.account_id, Player.objects.get_or_create(profile_id=profile.id, tour_id=tour.id, type='tankman')[0])
squads = {}
for p in profiles.values():
# если профиль не привязан к юзеру, пробуем найти и привязать
if not p.user:
try:
user = User.objects.get(username=p.nickname, is_active=True)
if not hasattr(user, 'profile'):
p.connect_with_user(user=user)
except User.DoesNotExist:
pass
if p.squad:
squads.setdefault(p.squad_id, Squad.objects.get_or_create(profile_id=p.squad_id, tour_id=tour.id)[0])
return profiles, players_pilots, players_gunners, players_tankmans, squads
def create_new_sortie(mission, profile, player, sortie, sortie_aircraft_id):
sortie_tik_last = sortie.tik_bailout or sortie.tik_landed or sortie.tik_end or sortie.tik_last
sortie_date_start = mission.date_start + timedelta(seconds=sortie.tik_spawn // 50)
sortie_date_end = mission.date_start + timedelta(seconds=sortie_tik_last // 50)
flight_time = round((sortie_tik_last - (sortie.tik_takeoff or sortie.tik_spawn)) / 50, 0)
is_ignored = False
# вылет игнорируется если общее время вылета меньше установленного конфигом
if SORTIE_MIN_TIME:
if (sortie_tik_last // 50) - (sortie.tik_spawn // 50) < SORTIE_MIN_TIME:
is_ignored = True
killboard_pvp = defaultdict(int)
killboard_pve = defaultdict(int)
# player_targets = []
ak_total = 0
fak_total = 0
ak_assist = 0
gk_total = 0
fgk_total = 0
score = 0
for targets in sortie.killboard.values():
for target in targets:
is_friendly = sortie.coal_id == target.coal_id
if not is_friendly:
score += mission.score_dict[target.cls]
if target.cls_base == 'aircraft':
ak_total += 1
elif target.cls_base in ('block', 'vehicle', 'tank'):
gk_total += 1
if target.sortie:
killboard_pvp[target.cls] += 1
# if sortie.cls_base == 'aircraft' and target.sortie.cls_base == 'aircraft':
# opponent = players_pilots[target.sortie.account_id]
# player_targets.append(opponent)
else:
killboard_pve[target.cls] += 1
else:
cls_name = 'f_%s' % target.cls
if target.cls_base == 'aircraft':
fak_total += 1
elif target.cls_base in ('block', 'vehicle', 'tank'):
fgk_total += 1
if target.sortie:
killboard_pvp[cls_name] += 1
else:
killboard_pve[cls_name] += 1
for targets in sortie.assistboard.values():
for target in targets:
if target.cls_base == 'aircraft':
# френдов не считаем
if sortie.coal_id == target.coal_id:
continue
ak_assist += 1
score += mission.score_dict['ak_assist']
new_sortie = Sortie(
profile=profile,
player=player,
tour=mission.tour,
mission=mission,
nickname=sortie.nickname,
date_start=sortie_date_start,
date_end=sortie_date_end,
flight_time=flight_time,
aircraft_id=sortie_aircraft_id,
fuel=sortie.fuel or 0,
skin=sortie.skin,
payload_id=sortie.payload_id,
weapon_mods_id=sortie.weapon_mods_id,
ammo={'used_cartridges': sortie.used_cartridges,
'used_bombs': sortie.used_bombs,
'used_rockets': sortie.used_rockets,
'used_shells': sortie.used_shells,
'hit_bullets': sortie.hit_bullets,
'hit_bombs': sortie.hit_bombs,
'hit_rockets': sortie.hit_rockets,
'hit_shells': sortie.hit_shells},
coalition=sortie.coal_id,
country=sortie.country_id,
is_airstart=sortie.is_airstart,
ak_total=ak_total,
gk_total=gk_total,
fak_total=fak_total,
fgk_total=fgk_total,
ak_assist=ak_assist,
killboard_pvp=killboard_pvp,
killboard_pve=killboard_pve,
status=sortie.sortie_status.status,
aircraft_status=sortie.aircraft_status.status,
bot_status=sortie.bot_status.status,
is_bailout=sortie.is_bailout,
is_captured=sortie.is_captured,
is_disco=sortie.is_disco,
score=score,
score_dict={'basic': score},
ratio=sortie.ratio,
damage=round(sortie.aircraft_damage, 2),
wound=round(sortie.bot_damage, 2),
debug={'aircraft_id': sortie.aircraft_id, 'bot_id': sortie.bot_id},
is_ignored=is_ignored,
)
return new_sortie
def update_sortie(new_sortie, player_mission, player_aircraft):
player = new_sortie.player
if not player.date_first_sortie:
player.date_first_sortie = new_sortie.date_start
player.date_last_combat = new_sortie.date_start
player.date_last_sortie = new_sortie.date_start
# если вылет был окончен диско - результаты вылета не добавляться к общему профилю
if new_sortie.is_disco:
player.disco += 1
player_mission.disco += 1
player_aircraft.disco += 1
return
# если вылет игнорируется по каким либо причинам
elif new_sortie.is_ignored:
return
# если в вылете было что-то уничтожено - считаем его боевым
if new_sortie.score:
player.date_last_combat = new_sortie.date_start
# TODO проверить как это отработает для вылетов стрелков
if not new_sortie.is_not_takeoff:
player.sorties_coal[new_sortie.coalition] += 1
player_mission.sorties_coal[new_sortie.coalition] += 1
if player.squad:
player.squad.sorties_coal[new_sortie.coalition] += 1
if new_sortie.aircraft.cls_base == 'aircraft':
if new_sortie.aircraft.cls in player.sorties_cls:
player.sorties_cls[new_sortie.aircraft.cls] += 1
else:
player.sorties_cls[new_sortie.aircraft.cls] = 1
if player.squad:
if new_sortie.aircraft.cls in player.squad.sorties_cls:
player.squad.sorties_cls[new_sortie.aircraft.cls] += 1
else:
player.squad.sorties_cls[new_sortie.aircraft.cls] = 1
update_general(player=player, new_sortie=new_sortie)
update_general(player=player_mission, new_sortie=new_sortie)
update_general(player=player_aircraft, new_sortie=new_sortie)
if player.squad:
update_general(player=player.squad, new_sortie=new_sortie)
update_ammo(sortie=new_sortie, player=player)
update_ammo(sortie=new_sortie, player=player_mission)
update_ammo(sortie=new_sortie, player=player_aircraft)
update_killboard(player=player, killboard_pvp=new_sortie.killboard_pvp,
killboard_pve=new_sortie.killboard_pve)
update_killboard(player=player_mission, killboard_pvp=new_sortie.killboard_pvp,
killboard_pve=new_sortie.killboard_pve)
update_killboard(player=player_aircraft, killboard_pvp=new_sortie.killboard_pvp,
killboard_pve=new_sortie.killboard_pve)
if new_sortie.is_relive:
player.streak_current = 0
player.streak_ground_current = 0
player.score_streak_current = 0
player.sorties_streak_current = 0
# elif new_sortie.is_disco:
# player.streak_current = 0
else:
player.streak_current += new_sortie.ak_total
player.streak_max = max(player.streak_max, player.streak_current)
player.streak_ground_current += new_sortie.gk_total
player.streak_ground_max = max(player.streak_ground_max, player.streak_ground_current)
player.score_streak_current += new_sortie.score
player.score_streak_max = max(player.score_streak_max, player.score_streak_current)
player.sorties_streak_current += 1
player.sorties_streak_max = max(player.sorties_streak_max, player.sorties_streak_current)
update_status(new_sortie=new_sortie, player=player)
update_status(new_sortie=new_sortie, player=player_mission)
update_status(new_sortie=new_sortie, player=player_aircraft)
if player.squad:
update_status(new_sortie=new_sortie, player=player.squad)
# for target in player_targets:
# update_killboard_pvp(player=player, opponent=target, players_killboard=players_killboard)
# update_elo_rating(winner=player, loser=target)
def update_general(player, new_sortie):
if not new_sortie.is_not_takeoff:
player.sorties_total += 1
player.flight_time += new_sortie.flight_time
if new_sortie.is_relive:
player.relive += 1
player.ak_total += new_sortie.ak_total
player.fak_total += new_sortie.fak_total
player.gk_total += new_sortie.gk_total
player.fgk_total += new_sortie.fgk_total
player.ak_assist += new_sortie.ak_assist
player.score += new_sortie.score
def update_ammo(sortie, player):
# в логах есть баги, по окончание вылета у самолета может быть больше боемкомплекта чем было вначале
if sortie.ammo['used_cartridges'] >= sortie.ammo['hit_bullets']:
player.ammo['used_cartridges'] += sortie.ammo['used_cartridges']
player.ammo['hit_bullets'] += sortie.ammo['hit_bullets']
if sortie.ammo['used_bombs'] >= sortie.ammo['hit_bombs']:
player.ammo['used_bombs'] += sortie.ammo['used_bombs']
player.ammo['hit_bombs'] += sortie.ammo['hit_bombs']
if sortie.ammo['used_rockets'] >= sortie.ammo['hit_rockets']:
player.ammo['used_rockets'] += sortie.ammo['used_rockets']
player.ammo['hit_rockets'] += sortie.ammo['hit_rockets']
if sortie.ammo['used_shells'] >= sortie.ammo['hit_shells']:
player.ammo['used_shells'] += sortie.ammo['used_shells']
player.ammo['hit_shells'] += sortie.ammo['hit_shells']
def update_status(new_sortie, player):
if not new_sortie.is_not_takeoff:
player.takeoff += 1
if new_sortie.is_landed:
player.landed += 1
elif new_sortie.is_ditched:
player.ditched += 1
elif new_sortie.is_crashed:
player.crashed += 1
elif new_sortie.is_shotdown:
player.shotdown += 1
elif new_sortie.is_in_flight:
player.in_flight += 1
if new_sortie.is_dead:
player.dead += 1
elif new_sortie.is_wounded:
player.wounded += 1
if new_sortie.is_captured and not new_sortie.is_dead:
player.captured += 1
if new_sortie.is_bailout:
player.bailout += 1
def update_killboard(player, killboard_pvp, killboard_pve):
for cls, num in killboard_pvp.items():
player.killboard_pvp.setdefault(cls, 0)
player.killboard_pvp[cls] += num
for cls, num in killboard_pve.items():
player.killboard_pve.setdefault(cls, 0)
player.killboard_pve[cls] += num
def update_killboard_pvp(player, opponent, players_killboard):
# ключ это tuple из ID'шников двух игроков - отсортированные в порядке возрастания
kb_key = tuple(sorted((player.id, opponent.id)))
player_killboard = players_killboard.setdefault(
kb_key,
KillboardPvP.objects.get_or_create(player_1_id=kb_key[0], player_2_id=kb_key[1])[0])
player_killboard.add_won(player=player)
def update_elo_rating(winner, loser):
if (winner.shotdown + winner.ak_total) <= 30:
k_winner = 40
elif winner.elo >= 2400:
k_winner = 10
else:
k_winner = 20
e_winner = 1 / (1 + 10 ** ((loser.elo - winner.elo) / 400))
diff = round(k_winner * (1 - e_winner), 2)
winner.elo += diff
loser.elo -= diff
def update_fairplay(new_sortie):
player = new_sortie.player
score_dict = new_sortie.mission.score_dict
if new_sortie.is_disco:
player.fairplay -= score_dict['fairplay_disco']
if new_sortie.fak_total:
player.fairplay -= score_dict['fairplay_fak']
if new_sortie.fgk_total:
player.fairplay -= score_dict['fairplay_fgk']
if player.fairplay < 0:
player.fairplay = 0
if new_sortie.is_disco or new_sortie.fak_total or new_sortie.fgk_total:
player.fairplay_time = 0
elif player.fairplay < 100:
player.fairplay_time += new_sortie.flight_time
fairplay_hours = player.fairplay_time // 3600
if fairplay_hours > 0:
player.fairplay += (score_dict['fairplay_up'] * fairplay_hours)
player.fairplay_time -= 3600 * fairplay_hours
if player.fairplay > 100:
player.fairplay = 100
new_sortie.fairplay = player.fairplay
def update_bonus_score(new_sortie):
# бонус процент
bonus_pct = 0
bonus_dict = {}
# бонусы получают только "честные" игроки
if new_sortie.fairplay == 100:
if new_sortie.is_landed:
bonus_pct += 25
bonus_dict['landed'] = 25
if new_sortie.coalition == new_sortie.mission.winning_coalition:
bonus_pct += 25
bonus_dict['winning_coalition'] = 25
bonus_dict['total'] = bonus_pct
# ставим базовые очки т.к. функция может вызваться несколько раз
new_sortie.score = new_sortie.score_dict['basic']
new_sortie.bonus = bonus_dict
bonus_score = new_sortie.score * bonus_pct // 100
new_sortie.score_dict['bonus'] = bonus_score
new_sortie.score += bonus_score
penalty_score = new_sortie.score * (100 - new_sortie.fairplay) // 100
new_sortie.score_dict['penalty'] = penalty_score
new_sortie.score -= penalty_score
# new_sortie.save()
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
from io import open
import hashlib
import argparse
from transformers import get_linear_schedule_with_warmup
from layers import RNNModel, AWDLSTMEncoder, DropoutLinearDecoder, LSTMEncoder, LinearDecoder
from utils import count_parameters, get_loaders, drop_mult
from data import Corpus, Dictionary
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='../data/wikitext-2', help='location of the data corpus')
parser.add_argument('--train', type=str, default='wiki.train.tokens', help='name of the training corpus')
parser.add_argument('--valid', type=str, default='wiki.valid.tokens', help='name of the validation corpus')
parser.add_argument('--test', type=str, default='wiki.test.tokens', help='name of the testing corpus')
parser.add_argument('--output', type=str, default='awd_lstm', help='output name')
parser.add_argument('--bs', type=int, default=80, help='batch size')
parser.add_argument('--eval_bs', type=int, default=10, help='evaluation batch size')
parser.add_argument('--bptt', type=int, default=80, help='bptt length')
parser.add_argument('--use_var_bptt', action='store_true', help='use variable length bptt')
parser.add_argument('--rebuild_dataset', action='store_true', help='force rebuild the dataset')
parser.add_argument('--load_vocab', action='store_true', help='load vocabulary')
parser.add_argument('--vocab_file', type=str, default='vocab.pth', help='pretrained vocabulary file')
parser.add_argument('--save_vocab', action='store_true', help='load vocabulary')
parser.add_argument('--encoder', type=str, default='awd_lstm', choices=['awd_lstm', 'lstm'], help='encoder')
parser.add_argument('--decoder', type=str, default='dropoutlinear', choices=['dropoutlinear', 'linear'], help='decoder')
parser.add_argument('--emb_dim', type=int, default=400, help='embedding dimensions')
parser.add_argument('--hidden_dim', type=int, default=1152, help='hidden dimensions')
parser.add_argument('--num_layers', type=int, default=3, help='number of rnn layers')
parser.add_argument('--emb_dp', type=float, default=0.1, help='embeddng dropout')
parser.add_argument('--hidden_dp', type=float, default=0.3, help='hidden to hidden dropout')
parser.add_argument('--input_dp', type=float, default=0.3, help='input dropout')
parser.add_argument('--weight_dp', type=float, default=0.5, help='dropconnect dropout')
parser.add_argument('--out_dp', type=float, default=0.4, help='output dropout')
parser.add_argument('--initrange', type=float, default=0.05, help='initialization range')
parser.add_argument('--tie_weights', action='store_true', help='tie embeddings and decoder weights')
parser.add_argument('--use_pretrained', action='store_true', help='use pretrained weights')
parser.add_argument('--freeze_encoder', action='store_true', help='freezes the encoder')
parser.add_argument('--pretrained_file', type=str, default='pretrained_wt103', help='pretrained model file')
parser.add_argument('--dm', type=float, default=1.0, help='dropout rate scaling')
parser.add_argument('--anneal_factor', type=float, default=4.0, help='learning rate anneal rate')
parser.add_argument('--lr', type=float, default=30, help='learning rate')
parser.add_argument('--no_lr_scaling', action='store_true', help='no lr scaling with var bptt or no auto anneal otherwise')
parser.add_argument('--optimizer', type=str, default='sgd', choices=['sgd', 'adam'], help='Optimzer to use')
parser.add_argument('--no_warmup', action='store_true', help='do not use linear warmups when using Adam')
parser.add_argument('--warmup_pct', type=float, default=0.1, help='percentage of steps for warmup')
parser.add_argument('--disc_rate', type=float, default=1.0, help='Discriminative learning rate scaling')
parser.add_argument('--epochs', type=int, default=2, help='epochs to train the network')
parser.add_argument('--clip', type=float, default=0.25, help='gradient clipping')
parser.add_argument('--alpha', type=float, default=2.0, help='AR alpha parameter')
parser.add_argument('--beta', type=float, default=1.0, help='TAR beta parameter')
parser.add_argument('--no_cuda', action='store_true', help='do not use CUDA')
parser.add_argument('--save_graphs', action='store_true', help='save the loss curve and final epoch results')
parser.add_argument('--gpu', type=int, default=0, help='index of GPU to use')
parser.add_argument('--seed', type=int, default=42, help='random seed')
args = parser.parse_args()
print(args)
if args.decoder == 'dropoutlinear': assert args.encoder == 'awd_lstm'
# CUDA
device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and not args.no_cuda else 'cpu')
np.random.seed(args.seed)
torch.manual_seed(args.seed);
torch.cuda.manual_seed(args.seed);
torch.backends.cudnn.deterministic = True
print("Using device: {}".format(device))
# Produce or load the dataset
path = args.path
fn = '{}/corpus.{}.data'.format(path, hashlib.md5(path.encode()).hexdigest())
if os.path.exists(fn) and not args.rebuild_dataset:
print('Loading cached dataset...')
corpus = torch.load(fn)
else:
print('Producing dataset...')
if args.load_vocab:
print('Vocabulary has been loaded from {}'.format(args.vocab_file))
corpus = Corpus(path, args.train, args.valid, args.test, load_vocab=args.load_vocab, vocab_file=args.vocab_file)
torch.save(corpus, fn)
if args.save_vocab:
with open('{}/{}'.format(path, args.vocab_file), 'wb') as f:
torch.save([corpus.dictionary.word2idx, corpus.dictionary.idx2word], f)
vocab_sz = len(corpus.dictionary)
# Produce dataloaders
train_loader = get_loaders(corpus.train, args.bs, args.bptt, use_var_bptt=args.use_var_bptt)
valid_loader = get_loaders(corpus.valid, args.eval_bs, args.bptt)
test_loader = get_loaders(corpus.test, args.eval_bs, args.bptt)
# Construct encoder
if args.encoder == 'awd_lstm':
encoder = AWDLSTMEncoder(vocab_sz=vocab_sz, emb_dim=args.emb_dim, hidden_dim=args.hidden_dim,
num_layers=args.num_layers, emb_dp=args.emb_dp, weight_dp=args.weight_dp,
input_dp=args.input_dp, hidden_dp=args.hidden_dp, tie_weights=args.tie_weights)
elif args.encoder == 'lstm':
encoder = LSTMEncoder(vocab_sz=vocab_sz, emb_dim=args.emb_dim, num_layers=args.num_layers,
hidden_dim=args.emb_dim if args.tie_weights else args.hidden_dim, dropout=args.weight_dp)
# Construct decoder
if args.decoder == 'dropoutlinear':
decoder = DropoutLinearDecoder(hidden_dim=args.emb_dim if args.tie_weights else args.hidden_dim,
vocab_sz=vocab_sz, out_dp=args.out_dp)
elif args.decoder == 'linear':
decoder = LinearDecoder(hidden_dim=args.emb_dim if args.tie_weights else args.hidden_dim, vocab_sz=vocab_sz)
# Produce model
model = RNNModel(encoder, decoder, tie_weights=args.tie_weights, initrange=args.initrange)
model = drop_mult(model, dm=args.dm)
if args.freeze_encoder:
model.freeze()
model.unfreeze(-1)
print(model)
# Pretrained
if args.use_pretrained:
print("Using pretrained model {}".format(args.pretrained_file))
with open('{}/{}'.format(path, args.pretrained_file), 'rb') as f:
inc = model.load_state_dict(torch.load(f, map_location=device), strict=False)
print(inc)
model = model.to(device);
# Parameter groups
p_groups = [{'name': '0', 'params': []}, {'name': '1', 'params': []}]
for n, p in model.named_parameters():
if 'rnn' in n:
p_groups[1]['params'].append(p)
else:
p_groups[0]['params'].append(p)
# Optimization setup
criterion = nn.CrossEntropyLoss()
optimizer, scheduler = None, None
if args.optimizer == 'sgd':
optimizer = optim.SGD(p_groups, lr=args.lr)
elif args.optimizer == 'adam':
optimizer = optim.Adam(p_groups, lr=args.lr)
steps = len(train_loader) * args.epochs
if not args.no_warmup:
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(steps * args.warmup_pct), num_training_steps=steps)
print("Optimization settings:")
print(optimizer)
print("Scheduler: {}".format(scheduler))
print("The model has {:,} trainable parameters".format(count_parameters(model)))
# Training setup
best_loss = np.inf
best_epoch = 0
train_losses = []
valid_losses = []
# Training!
print("Beginning training")
try:
for e in range(1, args.epochs + 1):
model.train()
model.reset_hidden()
train_loss = 0
with tqdm(total=len(train_loader)) as t:
for batch in train_loader:
x, y = batch
# Scale learning rate to sequence length
if args.use_var_bptt and not args.no_lr_scaling:
seq_len, _ = x.shape
optimizer.param_groups[0]['lr'] = args.lr * seq_len / args.bptt
# Adjust discriminative learning rates
for i in range(len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] /= args.disc_rate ** i
x = x.to(device)
y = y.to(device)
out = model(x, return_states=True)
if args.encoder == 'awd_lstm': out, hidden, raw_out, dropped_out = out
raw_loss = criterion(out.view(-1, vocab_sz), y)
# AR/TAR
loss = raw_loss
if args.encoder == 'awd_lstm':
loss += args.alpha * dropped_out[-1].pow(2).mean()
loss += args.beta * (raw_out[-1][1:] - raw_out[-1][:-1]).pow(2).mean()
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
t.set_postfix({'lr{}'.format(i): optimizer.param_groups[i]['lr'] for i in range(len(optimizer.param_groups))})
if scheduler is not None: scheduler.step()
# Restore original LR
if args.use_var_bptt and not args.no_lr_scaling:
optimizer.param_groups[0]['lr'] = args.lr
t.update()
train_loss += raw_loss.item()
train_loss /= len(train_loader)
train_losses.append(train_loss)
model.eval()
model.reset_hidden()
valid_loss = 0
for batch in tqdm(valid_loader):
with torch.no_grad():
x, y = batch
x = x.to(device)
y = y.to(device)
out = model(x)
loss = criterion(out.view(-1, vocab_sz), y)
valid_loss += loss.item()
valid_loss /= len(valid_loader)
valid_losses.append(valid_loss)
# Track and anneal LR
if valid_loss < best_loss:
best_loss = valid_loss
best_epoch = e
print("Best loss so far. Saving model.")
with open('{}/{}.pth'.format(path, args.output), 'wb') as f:
torch.save(model.state_dict(), f)
else:
if not args.use_var_bptt and not args.no_lr_scaling:
optimizer.param_groups[0]['lr'] /= args.anneal_factor
cur_lr = optimizer.param_groups[0]['lr']
print("Epoch {:3} | Train Loss {:.4f} | Train Ppl {:.4f} | Valid Loss {:.4f} | Valid Ppl {:.4f} | LR {:.4f}".format(e, train_loss, np.exp(train_loss), valid_loss, np.exp(valid_loss), cur_lr))
except KeyboardInterrupt:
print("Exiting training early")
# Load best saved model
print("Loading best model")
with open('{}/{}.pth'.format(path, args.output), 'rb') as f:
model.load_state_dict(torch.load(f))
# Testing evaluation
print("Evaluating model")
model.eval()
model.reset_hidden()
test_loss = 0
for batch in tqdm(test_loader):
with torch.no_grad():
x, y = batch
x = x.to(device)
y = y.to(device)
out = model(x)
loss = criterion(out.view(-1, vocab_sz), y)
test_loss += loss.item()
test_loss /= len(test_loader)
print("Test Loss {:.4f} | Test Ppl {:.4f}".format(test_loss, np.exp(test_loss)))
# Saving graphs
if args.save_graphs:
print("Saving loss data")
pd.DataFrame(data={'train': train_losses, 'valid': valid_losses}).to_csv('{}/{}.csv'.format(path, args.output), index=False)
with open('{}/{}.txt'.format(path, args.output), 'w') as f:
f.write("Best loss {:.4f} | Best ppl {:.4f} | Epoch {} | Test loss {:.4f} | Test ppl {:.4f}".format(best_loss, np.exp(best_loss), best_epoch, test_loss, np.exp(test_loss)))
|
import os
import sys
import shutil
import platform
print('!!!!!!!!!! WARNING !!!!!!!!!!')
print('You should use GenerateADISymlinks instead.')
print('To force continue type \'yes\':')
if(input() == 'yes'):
print('Continuing')
else:
print('Exiting')
sys.exit()
env = platform.system()
print('OS: ' + env)
def create_path(path):
path_words = path.split()
new_path = os.path.join(*path_words)
return new_path
def upper_file(filename):
split = str.rpartition(filename, '.')
uppered = str.upper(split[0]) + split[1] + split[2]
#print(' Renaming ' + filename + ' to ' + uppered)
return uppered
DRIVER_GITIGNORE = "../LTSketchbook/libraries/.gitignore"
VALID_ADI_DRIVERS = "./ADIDrivers.txt"
ADI_DRIVER_LOCATIONS = [
'../ADIDrivers/no-os/device_drivers',
'../ADIDrivers/no-os/drivers'
]
LIN_DRIVER_LOCATIONS = '../LTSketchbook/libraries'
# Convert paths to OS-specific paths
lin_driver_path = create_path(LIN_DRIVER_LOCATIONS)
adi_driver_paths = []
for s in ADI_DRIVER_LOCATIONS:
adi_driver_paths.append(create_path(s))
# Get list of drivers we want to copy
valid_drivers = []
with open(VALID_ADI_DRIVERS) as f:
valid_drivers = f.readlines()
valid_drivers = [x.strip() for x in valid_drivers]
# Create gitignore
with open(DRIVER_GITIGNORE, 'w+') as f:
print('\n**********************************')
print('Creating gitignore at ' + DRIVER_GITIGNORE)
f.write('#################################################\n')
f.write('# THIS IS GENERATED BY PYTHON SCRIPT, DO NOT EDIT\n')
for line in valid_drivers:
f.write(line + "/\n")
# Iterate through driver folders
for adi_path in adi_driver_paths:
print('\n**********************************')
print('Searching through ' + adi_path)
for searchdir, partdirs, filenames in os.walk(adi_path):
# Iterate through parts folders in folder
for part in partdirs:
print(' Found part ' + part)
# Check if we want to copy this part
if part in valid_drivers:
# Create source path
adi_part_path = os.path.join(adi_path, part)
# Create destination path
lin_part_path = os.path.join(lin_driver_path, str.upper(part))
if not os.path.exists(lin_part_path):
os.makedirs(lin_part_path)
# Iterate through files in source path
for partdir, subpartdirs, partfiles in os.walk(adi_part_path):
for partfile in partfiles:
src_path = os.path.join(adi_part_path, partfile)
dst_path = os.path.join(lin_part_path, upper_file(partfile))
shutil.copy2(src_path, dst_path)
print(' Copying ' + src_path + ' to ' + dst_path)
break
else:
print(' Part not found in ADIDrivers.txt, skipping')
break
print('Done!') |
"""
Render service to render previews of materials
"""
from __future__ import print_function
import sys
import socket
import time
import pickle
from threading import Thread
from panda3d.core import load_prc_file_data, Vec4, Filename, Mat4, Notify
from panda3d.core import CS_zup_right, CS_yup_right, PNMImage, BamCache
from direct.showbase.ShowBase import ShowBase
sys.path.insert(0, "../../")
from rpcore import RenderPipeline, PointLight
class Application(ShowBase):
ICOMING_PORT = 62360
def __init__(self):
load_prc_file_data("", "win-size 512 512")
load_prc_file_data("", "window-type offscreen")
load_prc_file_data("", "model-cache-dir")
load_prc_file_data("", "model-cache-textures #f")
load_prc_file_data("", "textures-power-2 none")
load_prc_file_data("", "alpha-bits 0")
load_prc_file_data("", "print-pipe-types #f")
# Construct render pipeline
self.render_pipeline = RenderPipeline()
self.render_pipeline.mount_mgr.config_dir = "config/"
self.render_pipeline.set_empty_loading_screen()
self.render_pipeline.create(self)
self.setup_scene()
# Disable model caching
BamCache.get_global_ptr().cache_models = False
self.update_queue = []
self.start_listen()
# Render initial frames
for i in range(10):
self.taskMgr.step()
last_update = 0.0
self.scene_node = None
current_lights = []
current_envprobes = []
# Wait for updates
while True:
# Update once in a while
curr_time = time.time()
if curr_time > last_update + 1.0:
last_update = curr_time
self.taskMgr.step()
if self.update_queue:
if self.scene_node:
self.scene_node.remove_node()
# Only take the latest packet
payload = self.update_queue.pop(0)
print("RENDERING:", payload)
scene = loader.loadModel(Filename.from_os_specific(payload["scene"]))
for light in scene.find_all_matches("**/+PointLight"):
light.remove_node()
for light in scene.find_all_matches("**/+Spotlight"):
light.remove_node()
# Find camera
main_cam = scene.find("**/Camera")
if main_cam:
transform_mat = main_cam.get_transform(render).get_mat()
transform_mat = Mat4.convert_mat(CS_zup_right, CS_yup_right) * transform_mat
base.camera.set_mat(transform_mat)
else:
print("WARNING: No camera found")
base.camera.set_pos(0, -3.5, 0)
base.camera.look_at(0, -2.5, 0)
base.camLens.set_fov(64.0)
self.scene_node = scene
scene.reparent_to(render)
# Render scene
for i in range(8):
self.taskMgr.step()
dest_path = Filename.from_os_specific(payload["dest"])
print("Saving screenshot to", dest_path)
self.win.save_screenshot(dest_path)
self.notify_about_finish(int(payload["pingback_port"]))
def start_listen(self):
""" Starts the listener thread """
thread = Thread(target=self.listener_thread, args=(), name="ListenerThread")
thread.setDaemon(True)
thread.start()
return thread
def listener_thread(self):
""" Thread which listens to incoming updates """
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("Listening on 127.0.0.1:" + str(self.ICOMING_PORT))
try:
sock.bind(("127.0.0.1", self.ICOMING_PORT))
while True:
data, addr = sock.recvfrom(8192)
self.handle_data(data)
except Exception as msg:
print("Failed to bind to address! Reason:", msg)
finally:
sock.close()
def handle_data(self, data):
""" Handles a new update """
# print("Got:", data)
unpacked_data = pickle.loads(data)
# print("Data = ", unpacked_data)
self.update_queue.append(unpacked_data)
def notify_about_finish(self, port):
""" Notifies the caller that the result finished """
print("Sending finish result to localhost:" + str(port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
sock.connect(("localhost", port))
except Exception as msg:
print("Could not send finish result: ", msg)
return
sock.sendall(b"done")
print("Sent done flag.")
sock.close()
def setup_scene(self):
""" Setups the basic scene geometry """
self.disableMouse()
self.render2d.hide()
self.aspect2d.hide()
light = PointLight()
light.pos = 20.0, -0.85, -1.31
light.radius = 100.0
light.energy = 2500
light.set_color_from_temperature(8000)
# self.render_pipeline.add_light(light)
light = PointLight()
light.pos = -11.2, -13.84, -9.24
light.radius = 1e20
light.set_color_from_temperature(8000)
light.energy = 2500
# self.render_pipeline.add_light(light)
# envprobe = self.render_pipeline.add_environment_probe()
# envprobe.set_pos(0, -16.2, 4.4)
# envprobe.set_scale(40, 40, 40)
# envprobe.parallax_correction = False
Application()
|
__all__ = ['itur_test', 'ITU_validation_report_test', 'ITU_validation_test', 'examples_test']
|
import tensorflow.compat.v1 as tf
try:
from .tokenizer_utils import get_tokenizer
except ImportError:
from tokenizer_utils import get_tokenizer
import json
from pathlib import PurePath, Path
import cv2
from tqdm import tqdm
import glob
import random
import os
import shutil
def dump_jsonl(data, output_path, append=False):
"""
Write list of objects to a JSON lines file.
"""
mode = 'a+' if append else 'w'
with open(output_path, mode, encoding='utf-8') as f:
for line in data:
json_record = json.dumps(line, ensure_ascii=False)
f.write(json_record + '\n')
def load_jsonl(input_path):
"""
Read list of objects from a JSON lines file.
"""
data = []
with open(input_path, 'r', encoding='utf-8') as f:
for line in f:
data.append(json.loads(line.rstrip('\n|\r')))
return data
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def serialize_example(image, caption):
feature = {
'image': _bytes_feature(image),
'caption': _int64_feature(caption),
}
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def create_random_dataset(path_to_images, out_dir, max_images_per_folder=1000, words_per_caption=50):
"""
creates a paired image / text folder with random captions in the correct format to feed to
create_paired_tfrecord_dataset (for testing)
Args:
out_dir: str
path_to_images: str
glob path to images
max_images_per_folder: int
words_per_caption: int
"""
import requests
word_site = "https://www.mit.edu/~ecprice/wordlist.10000"
response = requests.get(word_site)
WORDS = response.content.splitlines()
out_dir = Path(out_dir)
jsonl_path = out_dir / "captions_data.jsonl"
os.makedirs(out_dir, exist_ok=True)
images = glob.glob(path_to_images)
print(f"{len(images)} images found")
pbar = tqdm()
folder_count = 0
for i, image in enumerate(images):
if i % 100 == 0 or i == 0:
pbar.update(100)
if i % max_images_per_folder == 0 or i == 0:
sub_folder = Path(out_dir) / str(folder_count)
os.makedirs(Path(out_dir) / str(folder_count), exist_ok=True)
folder_count += 1
data = {}
image = Path(image)
data["caption"] = " ".join([random.choice(WORDS).decode() for i in range(words_per_caption)])
data["image_path"] = str(sub_folder.relative_to(out_dir) / image.name)
shutil.copy(image, sub_folder)
dump_jsonl([data], jsonl_path, append=True)
def create_paired_dataset(path_to_jsonl, name, out_dir, examples_per_tfrecord=1000, tokenizer=None, reencode=False):
"""
takes in a jsonl with relative paths to images & captions, and saves tfrecords files with num_examples
examples to out_dir.
Folder structure:
data_folder
jsonl_file
folder_1
img1
img2
...
folder_2
img1
img2
...
...
Jsonl structure:
{"image_path": relative_image_path, "caption": caption}
{"image_path": relative_image_path, "caption": caption}
...
TODO: multiprocessing
Args:
path_to_jsonl: str / path / list of str / path
path to jsonl file
examples_per_tfrecord: int
number of examples to write to each tfrecords file
name: str
name of tfrecords files
out_dir: str / path
path to folder in which to save tfrecords
tokenizer: custom HF tokenizer
if None, defaults to GPT2TokenizerFast
"""
if tokenizer is None:
tokenizer = get_tokenizer()
if isinstance(out_dir, str):
out_dir = Path(out_dir)
os.makedirs(out_dir, exist_ok=True)
if isinstance(path_to_jsonl, PurePath) or isinstance(path_to_jsonl, str):
path_to_jsonl = [path_to_jsonl]
if not isinstance(path_to_jsonl, list):
raise TypeError(f"path_to_jsonl type not recognized, should be str, path, or list")
tfrecord_count = 0
example_count = 0
writer = tf.io.TFRecordWriter(str(out_dir / f"{name}_{tfrecord_count}.tfrecords"))
pbar = tqdm()
for path in path_to_jsonl:
path = Path(path)
data = load_jsonl(path)
for item in data:
if example_count % examples_per_tfrecord == 0 and example_count != 0:
writer.close()
writer = tf.io.TFRecordWriter(str(out_dir / f"{name}_{tfrecord_count}.tfrecords"))
tfrecord_count += 1
image_path = path.parent / item["image_path"]
if reencode:
img = cv2.imread(str(image_path))
img = cv2.imencode('.jpg', img, (cv2.IMWRITE_JPEG_QUALITY, 94))[1].tostring() # encodes image to string
else:
img = open(image_path, "rb").read()
caption = tokenizer.encode(item["caption"][0])
example = serialize_example(img, caption)
writer.write(example)
example_count += 1
if example_count % 100 == 0:
pbar.set_description(f"{example_count} examples written to {tfrecord_count + 1} files")
pbar.update(100)
writer.close()
if __name__ == "__main__":
# creates random test dataset with CIFAR 10
create_paired_dataset("/home/data/coco/coco_captions.jsonl", "COCO", "DALLE-tfrecords", examples_per_tfrecord=1000,
tokenizer=None)
|
#!/usr/bin/env python
# coding: utf-8
# # mnist-cnn
# Let's build a super basic Convolutional Neural Network(CNN) to classify MNIST handwritten digits! We'll be using pytorch to specify and train our network.
# ## Setup
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
# In[ ]:
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
# In[ ]:
EPOCHS = 2
BATCH_SIZE = 64
NUM_CLASSES = 10
# ## Loading MNIST Dataset
#
# We'll be using the MNIST dataset to train our CNN. It contains images of handwritten digits. Loading MNIST is trivial using torchvision.
#
# Before we can use the images to train the network, it's a best practice to normalize the images. The images are black-and-white, represented by values from [0, 1]. The transformation will bring the values in a range of [-1, 1]:
# In[ ]:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))
])
# In[ ]:
trainset = torchvision.datasets.MNIST(
root="./data", download=True, train=True, transform=transform
)
testset = torchvision.datasets.MNIST(
root="./data", download=True, train=False, transform=transform
)
# In[ ]:
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2
)
# ## Visualizing
#
# Let's visualize the dataset before actually using it:
# In[ ]:
def show_img(img):
img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(npimg[:, :], cmap='gray_r')
plt.show()
# In[ ]:
dataiter = iter(trainloader)
imgs, labels = next(dataiter)
show_img(imgs[0].squeeze())
print('Label: %i' % labels[0].item())
# ## Model
#
# Now we can at last define our CNN. It consists of:
# - two convolutional blocks to extract relevant features from the input image
# - three fully connected layers to process the extracted features and classify the digit images
# In[ ]:
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(1, 10, 3),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(10, 10, 3),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(250, 120),
nn.Linear(120, 60),
nn.Linear(60, NUM_CLASSES)
)
def forward(self, x):
return self.model(x)
# ## Training
#
# First we'll try setting up pytorch to use a CUDA-capable GPU. If no GPU is detected, our CNN will be trained on CPU:
# In[ ]:
dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device "%s" for training' % dev)
# We then create an instance of our network before moving it to our training device using the `.to()` method:
# In[ ]:
neural_net = Net().to(dev)
# Next, we'll define our loss and optimizer:
# In[ ]:
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(neural_net.parameters())
# Now, let's train our network!
# In[ ]:
for epoch in range(EPOCHS):
running_loss = 0.0
for i, (imgs, labels) in enumerate(trainloader):
imgs = imgs.to(dev)
labels = labels.to(dev)
# Important!
# Clear accumulated gradients from previous iteration
# before backpropagating.
optimizer.zero_grad()
y = neural_net(imgs)
loss = criterion(y, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99:
print('[%.3d / %.3d] Loss: %.9f' % (epoch, i, running_loss / 100))
# ## Testing
#
# Finally, let's test the performance of our network on the testset, containing images of digits the network hasn't seen before:
# In[ ]:
neural_net.train(False)
correct = 0
total = 0
with torch.no_grad():
for i, data in enumerate(testloader, 0):
images, labels = data
images = images.to(dev)
labels = labels.to(dev)
outputs = neural_net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (
100 * correct / total))
|
# -*- coding: utf-8 -*-
class Node:
"""
A class to represent the nodes in SCRDR tree
"""
def __init__(self, condition, conclusion, father = None, exceptChild = None, elseChild = None, cornerstoneCases = [], depth = 0):
self.condition = condition
self.conclusion = conclusion
self.exceptChild = exceptChild
self.elseChild = elseChild
self.cornerstoneCases = cornerstoneCases
self.father = father
self.depth = depth
def satisfied(self, object):
return eval(self.condition)
def executeConclusion(self, object):
exec(self.conclusion)
def appendCornerstoneCase(self, object):
self.cornerstoneCases.append(object)
def check(self, object):
if self.satisfied(object):
self.executeConclusion(object)
if self.exceptChild != None:
self.exceptChild.check(object)
else:
if self.elseChild != None:
self.elseChild.check(object)
def checkDepth(self, object, length):
if self.depth <= length:
if self.satisfied(object):
self.executeConclusion(object)
if self.exceptChild != None:
self.exceptChild.checkDepth(object, length)
else:
if self.elseChild != None:
self.elseChild.checkDepth(object, length)
def findRealFather(self):
node = self
fatherNode = node.father
while True and fatherNode != None:
if fatherNode.exceptChild == node:
break
node = fatherNode
fatherNode = node.father
return fatherNode
def addElseChild(self, node):
fatherNode = self.findRealFather()
for object in fatherNode.cornerstoneCases:
if node.satisfied(object):
print("The new rule fires the cornerstone cases of its father node!!!")
self.findRealFather().cornerstoneCases.remove(object)
self.elseChild = node
return True
def addExceptChild(self, node):
for object in self.cornerstoneCases:
if node.satisfied(object):
print("The new rule fires the cornerstone cases of its father node!!!")
self.cornerstoneCases.remove(object)
self.exceptChild = node
return True
def writeToFileWithSeenCases(self, out, depth):
space = tabStr(depth)
out.write(space + self.condition + " : " + self.conclusion + "\n")
for case in self.cornerstoneCases:
out.write(" " + space + "cc: " + case.toStr() + "\n")
if self.exceptChild != None:
self.exceptChild.writeToFile(out, depth + 1)
if self.elseChild != None:
self.elseChild.writeToFile(out, depth)
def writeToFile(self, out, depth):
space = tabStr(depth)
out.write(space + self.condition + " : " + self.conclusion + "\n")
if self.exceptChild != None:
self.exceptChild.writeToFile(out, depth + 1)
if self.elseChild != None:
self.elseChild.writeToFile(out, depth)
def tabStr(length):
return "".join(["\t"] * length)
|
import primes
import time
class Timer(object):
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.end = time.clock()
self.interval = self.end - self.start
if __name__ == '__main__':
n = 39916801
# We can call pre-existing c++ code from Python
# by wrapping it in Cython
with Timer() as t:
result = primes.is_prime(n)
print('{} (s) {}'.format(t.interval, result))
|
from .AsynchronousStatus import *
from .SynchronousStatus import *
from .errors import *
__name__ = "danbot-status"
__version__ = "0.1.1"
__author__ = "VineyS"
__license__ = "MIT"
|
import re
import requests
import json
from AnimusExceptions import *
class AnimusGenericLog:
################################
# Description:
# Initializer for the AnimusGenericLog object. Pass it a fileName and it will handle
# reduction for generic logs.
#
# Params:
# logfile - The array of lines in the logfile we are analyzing
# port - The port and protocol list to obtain a filter for
# apiKey - The api key pulled from the ~/.animus.cfg file
# baseUri - The base URI of the animus API, as stored in the ~/.animus.cfg file
################################
def __init__(self, logfile, ports, apiKey, baseUri):
self.BASE_URI = baseUri
self.API_ENDPOINT = '/va/generic'
self.apiKey = apiKey
self.unhandledLogs = []
self.features = {}
self.features['ips'] = []
self.features['ports'] = []
self.parsedLog = []
self.filter = []
# quietLogs are logs that have had noise removed
self.quietLogs = []
# noisyLogs are logs that we think are noise
self.noisyLogs = []
# alertLogs are logs where we think a noisy actor managed to do something bad
# For example, if someone has a successful auth attempt, but they
# are known to be brute forcing ssh servers, they may have successfully broken in
self.alertLogs = []
# Get the features from the file
self._getFeatures(logfile)
# These variables are now set:
# self.unhandledLogs
# self.features
# self.parsedLog
# Add port and protocol
for port in ports:
portsItem = {}
(portsItem['port'], portsItem['protocol']) = port.split(':')
self.features['ports'].append(portsItem)
#Set the filter for the file
self._getFilter()
# self.filter is now set
# Perform the analysis operation
self._analyze()
# self.noisyLogs and self.quietLogs is now set
################################
# Description:
# Print the reduced log file
#
# Params
# showQuietLogs - If this is true, shows the reduced log file. If this is false, it shows the logs that were deleted.
#
################################
def reduce(self, showNoisy=False):
if not showNoisy:
for log in self.quietLogs:
yield log['raw'].strip()
else:
for log in self.noisyLogs:
yield log['raw'].strip()
################################
# Description:
# Apply the filter to the log file
#
################################
def _analyze(self, ):
# Go through each line
for line in self.parsedLog:
if 'ip' in line:
if line['ip'] in self.filter['ips']:
self.noisyLogs.append(line)
continue
else:
self.quietLogs.append(line)
else:
self.quietLogs.append(line)
################################
# Description:
# Gets the filter for the features in the object
################################
def _getFilter(self, ):
self.filter = self._sendAuthFeatureQuery(self.features)
################################
# Description:
# Get the feature data from the log file necessary for a reduction
#
# Params:
# logfile - The array of log lines to be analyzed
#
# Returns:
# Nothing. Sets self.parsedLog, self.features, and self.unhandledLogs
################################
def _getFeatures(self, logfile):
REGEX_GET_IP = '(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
# The dict that holds the features of the log file
features = {}
#print(type(logfile))
#print(len(logfile))
for line in logfile:
# Clear previous results
result = {}
# Save the raw line
result['raw'] = line
# Search for an IP in the line
m = re.search(REGEX_GET_IP, line)
# If we found one, save it
if m:
result['ip'] = m.group('ip')
if result['ip'] not in self.features['ips']:
self.features['ips'].append(result['ip'])
self.parsedLog.append(result)
################################
# Description:
# Send a query to the backend api with a list of observed features in this log file
#
# Params:
# features - The list of features we want to return a filter for
#
# Returns:
# logFilter - A list of features that should be filtered out of the log file
################################
def _sendAuthFeatureQuery(self, features):
try:
r = requests.post(self.BASE_URI + self.API_ENDPOINT, data = json.dumps(features), headers={'api_key': self.apiKey})
except requests.exceptions.ConnectionError as e:
raise AnimusAPIUnavailable("The Animus API appears to be unavailable.")
if r.status_code != 200:
raise AnimusAPIUnavailable("Request failed and returned a status of: " + str(r.status_code))
return json.loads(r.text)
|
import base64
import uuid
def base64_uuid4():
"""
Return a base64 encoded uuid4
"""
base64_encoded = base64.urlsafe_b64encode(uuid.uuid4().bytes)
base64_encoded = base64_encoded.decode("utf-8")
return base64_encoded.rstrip("=")
|
"""
TR 64 specific actions
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by Benjamin Pannier.
:license: Apache 2.0, see LICENSE for more details.
"""
from .lan import Lan, HostDetails, EthernetInfo, EthernetStatistic
from .system import System, SystemInfo, TimeInfo
from .wan import Wan, WanLinkInfo, WanLinkProperties, ConnectionInfo, ADSLInfo
from .wifi import Wifi, WifiDeviceInfo, WifiBasicInfo
from .fritz import Fritz
|
import numpy as np
import gym
class DQN:
def __init__(self, env_name):
self.env = gym.make(env_name)
def run(self):
play(self.env, zoom=4)
test_class = DQN('SpaceInvaders-v0')
test_class.run()
|
'''
This agent is a base agent that works under the architecture defined in the documentation.
We are using this base agent to implement a lot of different RL algorithms.
We will maintain updated this base agent with the goal of keeping the balance between flexibility and comfortability.
'''
from .knowledge import Knowledge
from .interpreter import Interpreter
from .actuator import Actuator
from .experiences import Experiences
class Agent():
def __init__(self, action_space):
self.knowledge = Knowledge(action_space)
self.interpreter = Interpreter()
self.actuator = Actuator()
self.experiences = Experiences()
def get_action(self, observation):
state = self.interpreter.obs_to_state(observation)
agent_action = self.knowledge.get_action( state )
return self.actuator.agent_to_env( agent_action )
def add_experience(self, observation, reward, env_action, next_observation):
agent_action = self.actuator.env_to_agent(env_action)
state = self.interpreter.obs_to_state(observation)
next_state = self.interpreter.obs_to_state(next_observation)
self.experiences.add(state, reward, agent_action, next_state)
def start_step(self, current_step):
pass
def end_step(self, current_step):
pass
def start_episode(self, current_episode):
print('episode', current_episode)
pass
def end_episode(self, current_episode):
pass
def train(self):
self.knowledge.train(self.experiences.get())
|
import pandas as pd
import glob, re, os
import click
def split_lines(text):
text = [' '.join(i.split()) for i in re.split(r'\n{2,}', text)]
text = [i for i in text if i]
return text
def get_num(txt):
return int(re.findall(r'\d+', txt)[0])
def get_ref(txt):
return int(re.findall(r'\d+', txt)[1])
def merge_and_get_summaries(forecast_path, forecasted_lessons_file):
# forecast_path = 'eva_forecast_02_21_2020'
# forecasted_lessons_file = '~/notebooks/Cognitive_Search/sash/data/feb_20/ulm_forecasts.csv'
steps = 1000
file = f'{forecast_path}.log.{148000+steps}'
# path = '/data/home/admin01//notebooks/Jude/Presumm2/PreSumm/logs/'
path = 'logs/'
path = os.path.join(path, file)
results = {}
for suffix in ['gold', 'raw_src', 'candidate']:
with open(f'{path}.{suffix}', 'r') as f:
results[suffix] = f.readlines()
df_gen = pd.DataFrame({'human-generated': results['gold'], 'machine-generated': results['candidate']})
df_gen['lesson_num'] = df_gen['human-generated'].apply(get_num)
df_gen['ref_id'] = df_gen['human-generated'].apply(get_ref)
df = pd.read_csv(forecasted_lessons_file, usecols=[1,2,4,5])
df['reference_id'] = df['reference_id'].apply(lambda x: 0 if x!=x else x).astype(int)
df = df.where(df['isLesson']==1).dropna()
df.drop('isLesson', axis=1, inplace=True)
df['paragraph'] = df['paragraph'].apply(split_lines)
df = df.reset_index(drop=True)
df['reference_id'] = df['reference_id'].astype(int)
df['lesson_num'] = df.index
df.rename(columns={'Project Number':'project_number'}, inplace=True)
df_merged = df[['paragraph','reference_id','project_number','lesson_num']].merge(
df_gen[['machine-generated','lesson_num']], on='lesson_num')
df_merged.to_csv(f'{forecast_path}.csv')
return
@click.group()
def cli():
pass
@cli.command()
@click.argument('forecast_path')
@click.argument('forecasted_lessons_file')
def get_summaries(forecast_path, forecasted_lessons_file):
merge_and_get_summaries(forecast_path, forecasted_lessons_file)
return
if __name__=='__main__':
cli()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 11:35:43 2020
@author: willi
"""
import re
import time
import os
import numpy as np
from . import CodonDictionaries
from . import FileParser
from . import poi as POI
try:
from Bio import SeqIO
from Bio import Entrez
except:
print('BioPython is not installed, polling genbank will not be possible')
pass
class SequenceManipMethods():
'''
class that handles manipulation methods dealing with sequences
'''
def __init__(self, sequence=''):
self.sequence = sequence
self.codon_dicts = CodonDictionaries.CodonDictionaries()
#get the codon dictionaries
def optimize_ntseq(self, nt_seq, opt_dict=None):
'''
Optimizes a nucleotide sequence
Parameters
----------
nt_seq : str
nucleotide sequence string
opt_dict : dictionary, optional
a user defined dictionary to optimize over. The default is None.
Returns
-------
opt_seq : str
Optimized NT sequenced based on a given dictionary of rates
'''
if opt_dict is None:
opt_dict = self.codon_dicts.human_codon_frequency_bias_nakamura
codons = nt_seq.upper()
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
aa = [self.codon_dicts.aa_table[x] for x in seperated_codons]
opt_seq = ''
for i in range(0, len(aa)):
ind = np.argmax([opt_dict[x] for x in self.codon_dicts.aa_table_r[aa[i]]])
opt_codon = self.codon_dicts.aa_table_r[aa[i]][ind]
opt_seq = opt_seq + opt_codon
return opt_seq
def deoptimize_ntseq(self, nt_seq, deopt_dict=None):
'''
Optimizes a nucleotide sequence
Parameters
----------
nt_seq : str
nucleotide sequence string
deopt_dict : dictionary, optional
a user defined dictionary to deoptimize over. The default is None.
Returns
-------
deopt_seq : str
Deoptimized NT sequenced based on a given dictionary of rates
'''
if deopt_dict is None:
deopt_dict = self.codon_dicts.human_codon_frequency_bias_nakamura
codons = nt_seq.upper()
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
aa = [self.codon_dicts.aa_table[x] for x in seperated_codons]
opt_seq = ''
for i in range(0, len(aa)):
ind = np.argmin([deopt_dict[x] for x in self.codon_dicts.aa_table_r[aa[i]]])
opt_codon = self.codon_dicts.aa_table_r[aa[i]][ind]
opt_seq = opt_seq + opt_codon
return opt_seq
def nt2aa(self, nt_seq):
'''
Parameters
----------
nt_seq : str
nucleotide sequence.
Returns
-------
aa : str
amino acid sequence.
'''
aa = ''
nt_seq = nt_seq.upper()
for i in range(0, len(nt_seq), 3):
aa += self.codon_dicts.aa_table[nt_seq[i:i+3]]
return aa
def get_gb_file(self, accession_number, save_dir):
'''
A function to poll genbank given an accession number and pull the
relevant gb file
*args*
**accession_number**, the accession number of the sequence to find.
http://www.nslc.wustl.edu/elgin/genomics/bio4342/1archives/2006/AccReference.pdf
*keyword args*
**savetofile**, true or false to save the gb file in the same
directory as sms for future use
'''
Entrez.email = "[email protected]"
Entrez.tool = 'SingleMoleculeSimulator'
er = False
try:
handle = Entrez.efetch(db="nucleotide", rettype="gb",
retmode="text", id=accession_number)
#using "gb" as an alias for "genbank"
gb_record = SeqIO.read(handle, "genbank")
handle.close()
except:
er = True
time.sleep(2)
if er == True:
print('HTTP Error: Could not find specified ascession ID')
return
gb_rec = gb_record
#gb_obj = gb_record
#sequence_str = str(gb_record.seq)
sequence_name = gb_record.name
filename = os.path.join(save_dir, sequence_name, '.gb')
f = open(filename, 'w')
f.write(gb_rec.format('gb'))
f.close()
def get_orfs(self, nt_seq='', min_codons=80):
'''
Returns open reading frames of the nucleotide sequence given
orfs = {'1':[proteins],
'2':[proteins],
'3':[proteins]}
*keyword args*
**nt_seq**, nucleotide sequence as a string. If left blank uses
the self.sequence_str
**min_codons**, minimum amount of codons to be considered
a protein in the open reading frame
'''
if nt_seq == '':
nt_seq = self.sequence.upper()
nt_seq = nt_seq.upper()
allstarts = np.array([m.start() for m in re.finditer(
'(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))', nt_seq)])
#allsegments = re.findall('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))',self.sequence_str)
allstops = np.array(
[m.start() for m in re.finditer('(?=[TU](?:AG|AA|GA))', nt_seq)])
start_frames = allstarts%3
stop_frames = allstops%3
min_len = min_codons*3
orf1_starts = allstarts[np.where(start_frames == 0)]
orf2_starts = allstarts[np.where(start_frames == 1)]
orf3_starts = allstarts[np.where(start_frames == 2)]
orf1_stops = allstops[np.where(stop_frames == 0)]
orf2_stops = allstops[np.where(stop_frames == 1)]
orf3_stops = allstops[np.where(stop_frames == 2)]
#self.starts = [orf1_starts, orf2_starts, orf3_starts]
#self.stops = [orf1_stops, orf2_stops, orf3_stops]
orfs = {'1':[], '2':[], '3':[]}
orfs = {'1':[], '2':[], '3':[]}
laststop = 0
for start in orf1_starts:
nextstop = orf1_stops[np.where(orf1_stops > start)[0][0]]+3
if (nextstop - start) > min_len:
if nextstop != laststop:
orfs['1'].append((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf2_starts:
nextstop = orf2_stops[np.where(orf2_stops > start)[0][0]]+3
if (nextstop - start) > min_len:
if nextstop != laststop:
orfs['2'].append((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf3_starts:
nextstop = orf3_stops[np.where(orf3_stops > start)[0][0]]+3
if (nextstop - start) > min_len:
if nextstop != laststop:
orfs['3'].append((start, nextstop))
laststop = nextstop
return orfs
def codon_usage(self, nt_seq, codon_dict=None):
'''
Analyzes codon useage from the nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**codon_sensitivity**, a list of codon sensitivity for the nucleotide sequence
**cai**, cai value
'''
if codon_dict == None:
codon_dict = self.codon_dicts.human_codon_frequency_bias_nakamura
codon_usage = np.zeros((1, 21))
gene_len = len(nt_seq)/3
aa_seq = self.nt2aa(nt_seq)
for i in range(len(self.codon_dicts.aa_keys)-1):
codon_usage[0, i] = len(
re.findall(self.codon_dicts.aa_keys[i], aa_seq))
codon_usage[0, 20] = len(re.findall('\*', aa_seq))
codon_norm = codon_usage/gene_len
codon_sensitivity = np.round(
codon_norm*self.codon_dicts.sensitivity_fast_slow, 2)
cai_codons = []
for i in range(0, len(nt_seq), 3):
synonmous_codons = self.codon_dicts.aa_table_r[
self.codon_dicts.aa_table[codon_dict[nt_seq[i:i+3]]]]
max_freq = max([codon_dict[x] for x in synonmous_codons])
cai_codons.append(codon_dict[nt_seq[i:i+3]] /max_freq)
cai = self.geomean(cai_codons)
return codon_sensitivity, cai, cai_codons
def get_proteins(self, orfs, seq):
'''
Parameters
----------
orfs : dict
dictionary of open reading frames.
{'1': [[starts],[stops] ],'2': [[starts],[stops] ],'3': [[starts],[stops] ] }
seq : str
nucleotide sequence.
Returns
-------
proteins_strs : dict
aa strings of all proteins found in the given orfs.
protein_objs : dict
container objects for proteins found in the given orf.
proteins_w_tags : dict
conatiner objects for any proteins with detected tags.
'''
cd = self.codon_dicts
proteins_strs = {'1':[], '2':[], '3':[]}
protein_objs = {'1':[], '2':[], '3':[]}
proteins_w_tags = {'1':[], '2':[], '3':[]}
#tagged_proteins = {a:[] for a in cd.tag_dict.keys()}
#tagged_protein_seq = {a:[] for a in cd.tag_dict.keys()}
for i in range(len(orfs)):
for j in range(len(orfs[str(i+1)])):
protein = POI.poi()
pro = self.nt2aa(seq[orfs[str(i+1)][j][0]:orfs[str(i+1)][j][1]])
nt_seq = seq[orfs[str(i+1)][j][0]:orfs[str(i+1)][j][1]]
# if pro[-1] == '*':
# pro = pro[:-1]
# nt_seq = nt_seq[:-3]
protein.aa_seq = pro
protein.nt_seq = nt_seq
proteins_strs[str(i+1)].append(pro)
protein.gene_length = len(pro) #length of the gene
protein.tag_length = 0 #length of the tags
protein.total_length = len(pro) #total length of the full amino acid sequence
protein.source_seq = seq
protein.orf = i
protein.loc = (orfs[str(i+1)][j][0], orfs[str(i+1)][j][1]+3)
protein.tags = []
protein_objs[str(i+1)].append(protein)
for i in range(len(orfs)):
for pr in protein_objs[str(i+1)]:
tag_detected = False
for tag in cd.tag_dict.keys():
if cd.tag_dict[tag] in pr.aa_seq:
tag_detected = True
if tag_detected:
self.analyze_protein_w_tags(pr)
pr.tag_added = False
proteins_w_tags[str(i+1)].append(pr)
else:
self.add_tag_to_protein(pr)
pr.tag_added = True
return proteins_strs, protein_objs, proteins_w_tags
def add_tag_to_protein(self, POI, tag_type='T_Flag'):
'''
Parameters
----------
POI : poi object
protein of interest object.
tag_type : str, optional
What kind of tag to append onto the protein object. The default is 'T_Flag'.
Returns
-------
None.
'''
cd = self.codon_dicts
POI.nt_seq = cd.tag_full[tag_type] + POI.nt_seq
POI.aa_seq = self.nt2aa(POI.nt_seq)
self.analyze_protein_w_tags(POI)
def analyze_protein_w_tags(self, poi_obj, epitope_loc='front'):
cd = self.codon_dicts
nt_seq = poi_obj.nt_seq
aa_seq = poi_obj.aa_seq
#self.POI.name = self.sequence_name
total_length = len(poi_obj.aa_seq)
'''
for key in self.tagged_proteins:
if protein in self.tagged_proteins[key]:
self.POI.tag_types.append(key)
'''
poi_obj.tag_types = []
for tag in cd.tag_dict.keys():
if cd.tag_dict[tag] in aa_seq:
poi_obj.tag_types.append(tag)
#''.join(sms.poi[0].split('DYKDDDDK')
poi_obj.tag_epitopes = {a:[] for a in poi_obj.tag_types}
gs = poi_obj.aa_seq
for i in range(len(poi_obj.tag_types)):
try:
nt_tag = cd.tag_full[poi_obj.tag_types[i]]
aa_tag = self.nt2aa(nt_tag)
except:
epi = cd.tag_dict[poi_obj.tag_types[i]]
firstep = poi_obj.aa_seq.find(epi)
lastep = len(poi_obj.aa_seq) - poi_obj.aa_seq[::-1].find(epi[::-1])
aa_tag = poi_obj.aa_seq[firstep:lastep]
nt_tag = poi_obj.nt_seq[3*firstep:3*lastep]
if epitope_loc == 'front':
offset = 0
if epitope_loc == 'middle':
offset = int(len(cd.tag_dict[poi_obj.tag_types[i]])/2)
if epitope_loc == 'back':
offset = len(cd.tag_dict[poi_obj.tag_types[i]])
poi_obj.tag_epitopes[poi_obj.tag_types[i]] = [
m.start()+1+offset for m in re.finditer(
cd.tag_dict[poi_obj.tag_types[i]], poi_obj.aa_seq)]
gs = gs.replace(aa_tag, '')
poi_obj.gene_seq = gs
poi_obj.gene_length = len(gs)
poi_obj.total_length = total_length
poi_obj.tag_seq = aa_tag
poi_obj.tag_length = len(aa_tag)
codons = []
for i in range(0, len(nt_seq), 3):
codons.append(nt_seq[i:i+3])
#POI.codons = codons
#POI.codon_sensitivity, POI.CAI, POI.CAI_codons = self.codon_usage(POI.nt_seq)
poi_obj.ki = .03
poi_obj.ke = 10
poi_obj.kt = 10
def seq_to_protein_obj(self, sequence_str, min_codons=80):
orfs = self.get_orfs(sequence_str, min_codons=min_codons)
_, proteins, _ = self.get_proteins(
orfs, sequence_str)
return proteins
def open_seq_file(self, seqfile, min_codons=80):
'''
Reads a sequence file, either a .txt file or a .gb genbank file
*args*
**seqfile**, sequence file either in txt, gb, gbk format
'''
fp = FileParser.FileParser()
#TODO expose this to the user:
#sequence_name = fp.get_name(seqfile)
#sequence_description = fp.get_description(seqfile)
sequence_str = fp.get_sequence(seqfile).upper()
orfs = self.get_orfs(sequence_str, min_codons=min_codons)
protein_strs, proteins, tagged_proteins = self.get_proteins(orfs, sequence_str)
return protein_strs, proteins, tagged_proteins, sequence_str
def get_tag_loc(self, aa_seq, tag, epitope_loc='front'):
cd = self.codon_dicts
if epitope_loc == 'front':
offset = 0
if epitope_loc == 'middle':
offset = int(len(tag)/2)
if epitope_loc == 'back':
offset = len(tag)
return [m.start()+1+offset for m in re.finditer(tag, aa_seq)]
@staticmethod
def geomean(iterable):
'''geometric mean used for codon sensitivity calculations
'''
a = np.array(iterable)
return a.prod()**(1.0/len(a))
|
import numpy as np
import gc
import time
import cv2
class database:
def __init__(self, params):
self.size = params['db_size']
self.img_scale = params['img_scale']
self.states = np.zeros([self.size,84,84],dtype='uint8') #image dimensions
self.actions = np.zeros(self.size,dtype='float32')
self.terminals = np.zeros(self.size,dtype='float32')
self.rewards = np.zeros(self.size,dtype='float32')
self.bat_size = params['batch']
self.bat_s = np.zeros([self.bat_size,84,84,4])
self.bat_a = np.zeros([self.bat_size])
self.bat_t = np.zeros([self.bat_size])
self.bat_n = np.zeros([self.bat_size,84,84,4])
self.bat_r = np.zeros([self.bat_size])
self.counter = 0 #keep track of next empty state
self.flag = False
return
def get_batches(self):
for i in range(self.bat_size):
idx = 0
while idx < 3 or (idx > self.counter-2 and idx < self.counter+3):
idx = np.random.randint(3,self.get_size()-1)
self.bat_s[i] = np.transpose(self.states[idx-3:idx+1,:,:],(1,2,0))/self.img_scale
self.bat_n[i] = np.transpose(self.states[idx-2:idx+2,:,:],(1,2,0))/self.img_scale
self.bat_a[i] = self.actions[idx]
self.bat_t[i] = self.terminals[idx]
self.bat_r[i] = self.rewards[idx]
return self.bat_s,self.bat_a,self.bat_t,self.bat_n,self.bat_r
def insert(self, prevstate_proc,reward,action,terminal):
self.states[self.counter] = prevstate_proc
self.rewards[self.counter] = reward
self.actions[self.counter] = action
self.terminals[self.counter] = terminal
#update counter
self.counter += 1
if self.counter >= self.size:
self.flag = True
self.counter = 0
return
def get_size(self):
if self.flag == False:
return self.counter
else:
return self.size
|
Subsets and Splits