code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import numpy as np
import random
import sys
chainlength = int(sys.argv[1])
dfname = sys.argv[2]
outfl = 'result.data'
cluster_size = int(sys.argv[3])
def readsize(dfname):
with open(dfname, 'r') as df:
lines = df.readlines()
for line in lines:
content = line.split()
if content and content[-1] == 'xhi':
return 2*float(content[1])
def readdata(dfname, chainlen):
X=[]
Xi=[]
with open(dfname, 'r') as df:
lines = df.readlines()
for line in lines:
content = line.split()
if len(content) == 9:
# print(content)
if (int(content[0]) % chainlen == 0 or int(content[0]) % chainlen == 1) and int(content[2]) != 3 and int(content[2]) != 4 :
X.append([float(content[i]) for i in range(3,6)])
Xi.append(int(content[0]))
return np.array(X), np.array(Xi)
def initmeans(n):
M=[]
for i in range(n):
M.append([size*(random.random()-0.5),size*(random.random()-0.5),size*(random.random()-0.5)])
return np.array(M)
def SetDistMat(X, means):
distmat_dtype = [('key',int), ('dist',float)]
distmat = np.empty((n,k),dtype=distmat_dtype)
for i in range(n):
distmat[i,:] = [(c[0], GetDist(X[i], c[1])) for c in enumerate(means)]
distmat[i,:] = np.sort(distmat[i,:], order='dist')
return distmat
def GetDist(x, c):
dist = np.linalg.norm(x-c-boxl*np.around((x-c)/boxl))
return dist
def Get_plst(assigned, distmat, full):
plst = []
for i in range(n):
if (i not in assigned):
j = 0
while j<k:
if (not full[distmat[i,j][0]]):
bestkey = distmat[i,j][0]
mindist = distmat[i,j][1]
break
else:
j += 1
for j in range(k-1,-1,-1):
if (not full[distmat[i,j][0]]):
maxdist = distmat[i,j][1]
break
plst.append((i, bestkey, maxdist-mindist))
plst.sort(key=lambda t:t[2])
return plst
def InitialAssignment(distmat):
clusters = {}
full = np.zeros(k,dtype=bool) # a boolean array that records which clusters are full
assigned = [] # a list of objects who has been assigned to a cluster
plst = Get_plst(assigned, distmat, full)
while (len(plst)):
temp = plst.pop()
try:
if (len(clusters[temp[1]])<cluster_size):
clusters[temp[1]].append(temp[0])
assigned.append(temp[0])
else:
full[temp[1]] = True
plst = Get_plst(assigned, distmat, full)
except KeyError:
clusters[temp[1]] = [temp[0]]
assigned.append(temp[0])
return clusters
def CalcMeans(X, oldmeans, clusters):
means = np.zeros((k,3))
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
means[key] += X[i]-boxl*np.around((X[i]-oldmeans[key])/boxl)
means[key] /= len(clusters[key])
means[key] -= boxl*np.around(means[key]/boxl)
return means
def SortObj(X, clusters, means, distmat):
objlst = [] # list of objects ordered in asceding delta of the current
# assignment and the best possible alternate assignment
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
currdist = GetDist(X[i],means[key])
mindist = distmat[i,0][1]
objlst.append((i, key, currdist-mindist))
objlst.sort(key=lambda t:t[2], reverse=True)
return objlst
def Transfer(obj, clufrom, cluto, clusters):
clusters[clufrom].remove(obj)
clusters[cluto].append(obj)
return clusters
def WriteResult(file, X, means, clusters):
with open(file, 'w') as fl:
# keys = sorted(clusters.keys())
# i = 1
# for key in keys:
# for obj in clusters[key]:
# fl.write("%d\t%d\t%f\t%f\t%f\t%d\n"\
# %(obj,Xi[obj], X[obj][0], X[obj][1], X[obj][2], key))
# i = i + 1
for c in enumerate(means):
fl.write("%d\t%f\t%f\t%f"%(c[0], c[1][0], c[1][1], c[1][2]))
for obj in clusters[c[0]]:
fl.write("\t%d"%(Xi[obj]))
fl.write('\n')
# i = i + 1
return
# This function will perform statistical analysis to the clustering results
def ClusterStat(X, means, clusters):
# Average distance between means
means_avg = 0.
for i in range(k-1):
for j in range(i+1,k):
means_avg += GetDist(means[i], means[j])
means_avg /= (k*(k-1)/2.)
# Average distance between obj and mean in a cluster
obj2mean_avg = np.zeros(k)
# Variance of the distances between obj and mean in a cluster
obj2mean_var = np.zeros(k)
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
obj2mean = GetDist(X[i], means[key])
obj2mean_avg[key] += obj2mean
obj2mean_var[key] += obj2mean*obj2mean
obj2mean_avg[key] /= len(clusters[key])
obj2mean_var[key] /= len(clusters[key])
obj2mean_var[key] = np.sqrt(obj2mean_var[key])
# Average within cluster distances between objects
winclu_avg = np.zeros(k)
# Average of within cluster distances of all clusters
winclu_grandavg = 0.
for key in keys:
for i in clusters[key]:
x = X[i]
for j in clusters[key]:
if j>i:
winclu_avg[key] += GetDist(x, X[j])
s = len(clusters[key])
winclu_avg[key] /= (s*(s-1)/2)
winclu_grandavg += winclu_avg[key]
winclu_grandavg /= k
# write the summary
print("average distance among means: %f"%means_avg)
#print("average distance from objects to the mean of a cluster:")
#for i in range(k):
# print("cluster %i: %f"%(i, obj2mean_avg[i]))
#print("variance of distances from objects to the mean of a cluster:")
#for i in range(k):
# print("cluster %i: %f"%(i, obj2mean_var[i]))
#print("within-cluster average distances:")
#for i in range(k):
# print("cluster %i: %f"%(i, winclu_avg[i]))
print("grand average of within-cluster average distances: %f"%winclu_grandavg)
return
X, Xi = readdata(dfname, chainlength)
size = readsize(dfname)
boxl = np.array([size, size, size])
n = len(X)
k = int(len(X)/cluster_size)
# Set up the database of objects
# X = readdata(dfname, chainlength)
# Choose initial means with K-means
means = initmeans(k)
# Set up initial clusters
distmat = SetDistMat(X, means)
clusters = InitialAssignment(distmat)
## debug code
#keys = sorted(clusters.keys())
#for key in keys:
# print("cluster %i:"%key)
# print(clusters[key])
## end of debug
# Iteration step
for iter in range(100):
active = 0 # indicate the number of transfers in the current iteration
tranlst = (-1)*np.ones(k, dtype='int') # set up transfer list for each cluster
# Compute the cluster means
oldmeans = means.copy()
means = CalcMeans(X, oldmeans, clusters)
# Get statistics about the clustering
#ClusterStat(X, means, clusters)
## debug code
#print("old means:")
#print(oldmeans)
#print("new means:")
#print(means)
## end of debug
# For each object, compute the distances to the cluster means
distmat = SetDistMat(X, means)
# Sort objects based on the delta of the current assignment and the best
# possible alternate assignment
objlst = SortObj(X, clusters, means, distmat)
##debug code
#print(objlst)
##return
#end of debug
# For each element by prioty:
while (len(objlst)):
(i, key, temp) = objlst.pop()
obj2key = GetDist(X[i], means[key])
transferred = False #record if any transfering has occured to i
if (key == distmat[i,0][0]):
##debug
#print("%i is already the opt cluster for obj %i. no transfer"%(clu, i))
##end of debug
continue
# For each other clusters by element gain:
else:
for j in range(k):
clu = distmat[i,j][0] # the key of another cluster
objgain = obj2key - distmat[i,j][1] # gain by transfering i from cluster key to clu
if (clu==key): # already in the cluster
continue
if (len(clusters[clu]) < cluster_size):
active += 1
transferred = True
clusters = Transfer(i, key, clu, clusters)
##debug
#print("cluster %i not full. transfer obj %i from cluster %i to it."%(clu, i, key))
##end of debug
break
elif (tranlst[clu] != -1): # if the tranlst of another cluster is not empty
# distance between the obj in the tranlst and the current cluster
tran2key = GetDist(X[tranlst[clu]], means[key])
tran2clu = GetDist(X[tranlst[clu]], means[clu])
# gain by transfering the obj in tranlst from cluster clu to key
trangain = tran2clu - tran2key
if (objgain + trangain > 0): # transfer if the sum of gains are positive, ie net gain
active += 2
transferred = True
clusters = Transfer(i, key, clu, clusters)
clusters = Transfer(tranlst[clu], clu, key, clusters)
##debug
#print("obj %i is transfered from cluster %i to %i"%(i, key, clu))
#print("obj %i is transfered from cluster %i to %i"%(tranlst[clu], clu, key))
#print("objgain: %f, trangain: %f"%(objgain, trangain))
##end of debug
tranlst[clu] = -1 # reset the tranlst to empty
break
if (not transferred):
tranlst[key] = i
##debug
#print("add obj %i in cluster %i to the transfer list"%(i, key))
##end of debug
# nothing is transferred during this iteration, return the clustering result
if (not active):
break
#debug code
print("number of transfers in iter %i: %i\n"%(iter+1, active))
#end of debug
print("K-means clustering converged in %d iterations!\n"%(iter+1))
# Output the clustering results
WriteResult(outfl, X, means, clusters)
ClusterStat(X, means, clusters)
# print(X)
| [
"numpy.sqrt",
"numpy.ones",
"numpy.sort",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.around",
"random.random"
]
| [((6414, 6442), 'numpy.array', 'np.array', (['[size, size, size]'], {}), '([size, size, size])\n', (6422, 6442), True, 'import numpy as np\n'), ((1052, 1063), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (1060, 1063), True, 'import numpy as np\n'), ((1155, 1192), 'numpy.empty', 'np.empty', (['(n, k)'], {'dtype': 'distmat_dtype'}), '((n, k), dtype=distmat_dtype)\n', (1163, 1192), True, 'import numpy as np\n'), ((2155, 2178), 'numpy.zeros', 'np.zeros', (['k'], {'dtype': 'bool'}), '(k, dtype=bool)\n', (2163, 2178), True, 'import numpy as np\n'), ((2845, 2861), 'numpy.zeros', 'np.zeros', (['(k, 3)'], {}), '((k, 3))\n', (2853, 2861), True, 'import numpy as np\n'), ((4754, 4765), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (4762, 4765), True, 'import numpy as np\n'), ((4851, 4862), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (4859, 4862), True, 'import numpy as np\n'), ((5317, 5328), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (5325, 5328), True, 'import numpy as np\n'), ((863, 874), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (871, 874), True, 'import numpy as np\n'), ((876, 888), 'numpy.array', 'np.array', (['Xi'], {}), '(Xi)\n', (884, 888), True, 'import numpy as np\n'), ((1316, 1352), 'numpy.sort', 'np.sort', (['distmat[i, :]'], {'order': '"""dist"""'}), "(distmat[i, :], order='dist')\n", (1323, 1352), True, 'import numpy as np\n'), ((5218, 5244), 'numpy.sqrt', 'np.sqrt', (['obj2mean_var[key]'], {}), '(obj2mean_var[key])\n', (5225, 5244), True, 'import numpy as np\n'), ((6980, 7003), 'numpy.ones', 'np.ones', (['k'], {'dtype': '"""int"""'}), "(k, dtype='int')\n", (6987, 7003), True, 'import numpy as np\n'), ((3090, 3118), 'numpy.around', 'np.around', (['(means[key] / boxl)'], {}), '(means[key] / boxl)\n', (3099, 3118), True, 'import numpy as np\n'), ((1426, 1451), 'numpy.around', 'np.around', (['((x - c) / boxl)'], {}), '((x - c) / boxl)\n', (1435, 1451), True, 'import numpy as np\n'), ((2985, 3025), 'numpy.around', 'np.around', (['((X[i] - oldmeans[key]) / boxl)'], {}), '((X[i] - oldmeans[key]) / boxl)\n', (2994, 3025), True, 'import numpy as np\n'), ((964, 979), 'random.random', 'random.random', ([], {}), '()\n', (977, 979), False, 'import random\n'), ((991, 1006), 'random.random', 'random.random', ([], {}), '()\n', (1004, 1006), False, 'import random\n'), ((1018, 1033), 'random.random', 'random.random', ([], {}), '()\n', (1031, 1033), False, 'import random\n')] |
from django.test import TestCase
from django.utils import timezone
from accounts.models import CustomUser, CustomUserManager
class UserCreateTestCase(TestCase):
def test_create_user_correctly(self):
"Creating users correctly"
new_user = CustomUser.objects.create(
email="<EMAIL>",
name="<NAME>",
phone="09876543210",
school="Some University",
is_staff="False",
is_active="True",
date_joined=timezone.now())
self.assertTrue(isinstance(new_user, CustomUser))
self.assertEqual(new_user.get_full_name(), "<NAME>")
self.assertEqual(new_user.get_short_name(), "<NAME>")
| [
"django.utils.timezone.now"
]
| [((498, 512), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (510, 512), False, 'from django.utils import timezone\n')] |
import pytest
from pji.utils import ValueProxy
@pytest.mark.unittest
class TestUtilsValue:
def test_value_proxy_init(self):
value = ValueProxy()
assert value.value is None
value = ValueProxy(233)
assert value.value == 233
def test_value_proxy_set(self):
value = ValueProxy()
value.value = 233
assert value.value == 233
value.value = -27
assert value.value == -27
| [
"pji.utils.ValueProxy"
]
| [((147, 159), 'pji.utils.ValueProxy', 'ValueProxy', ([], {}), '()\n', (157, 159), False, 'from pji.utils import ValueProxy\n'), ((212, 227), 'pji.utils.ValueProxy', 'ValueProxy', (['(233)'], {}), '(233)\n', (222, 227), False, 'from pji.utils import ValueProxy\n'), ((315, 327), 'pji.utils.ValueProxy', 'ValueProxy', ([], {}), '()\n', (325, 327), False, 'from pji.utils import ValueProxy\n')] |
#Intro Page
from tkinter import *
from PIL import Image, ImageTk
import cv2
#----------------------------Start Function--------------------------#
def start(event):
label1.destroy()
import log
win.destroy()
log.main()
#------------------------Main Window---------------------------------#li
def main_window():
global win
global label1
win = Tk()
win.title('Library Management System')
win.iconbitmap("images/main_icon.ico")
win.bind('<Key>', start) # start function on pressing any key
win.state('zoomed')
# opens video
cap = cv2.VideoCapture("images/vid.MP4")
global n
n = 0
#-----------------------------------------------------------------
# defining show function
def show():
global n # frame count
n = n+1
if n <= 30:
rest, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image).resize((1600, 850))
imgtk = ImageTk.PhotoImage(image=img)
label1.imgtk = imgtk
label1.configure(image=imgtk)
win.after(10, show)
else:
label1.destroy()
frm = Frame(win, bg='black')
frm.place(relx=0, rely=0, relwidth=1, relheight=1)
label = Label(frm, text='Press any Key to continue',
bg='black', fg='white')
label.place(relx=0.45, rely=0.5)
#-----------------------------------------------------------------
label1 = Label(win)
label1.place(relx=0, rely=0, relheight=1, relwidth=1)
show()
win.mainloop()
#-----------------------------------------------------------------
main_window()
| [
"PIL.Image.fromarray",
"log.main",
"cv2.VideoCapture",
"cv2.cvtColor",
"PIL.ImageTk.PhotoImage"
]
| [((225, 235), 'log.main', 'log.main', ([], {}), '()\n', (233, 235), False, 'import log\n'), ((584, 618), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""images/vid.MP4"""'], {}), "('images/vid.MP4')\n", (600, 618), False, 'import cv2\n'), ((887, 926), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGBA'], {}), '(frame, cv2.COLOR_BGR2RGBA)\n', (899, 926), False, 'import cv2\n'), ((1011, 1040), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'img'}), '(image=img)\n', (1029, 1040), False, 'from PIL import Image, ImageTk\n'), ((945, 970), 'PIL.Image.fromarray', 'Image.fromarray', (['cv2image'], {}), '(cv2image)\n', (960, 970), False, 'from PIL import Image, ImageTk\n')] |
"""
This module provides helper functions to support exercises during AM1
with outliers, robust regression and template regression in the CORE
data analytics workshop series, week 4.
"""
import numpy as np
import pandas as pd
import math
from collections import namedtuple
def recovery_sulphur_dataframe_with_outliers(outlier_probability):
"""Return dataframe representing recovery as a function of sulphur.
Parameters:
----------
outlier_probability:
This floating point parameter should range between 0 and 1
and is probability of an observation being an outlier.
Returns:
-------
Pandas dataframe:
A dataframe is returned with two series, the first being observed
recovery, and the second being sulphur %. The data may be sampled
from the true underlying relationship, plus gaussian noise, or
may be an outlier value taken from a non-gaussian distribution.
The proportion of outliers to non-outliers will depend on
the outlier_probability parameter.
"""
# Check that the outlier_probability is an ordinary number.
assert isinstance(outlier_probability, (float, int))
# As it's a probability, ensure that it ranges between 0 and 1.
assert outlier_probability >= 0.0
assert outlier_probability <= 1.0
# If no exceptions have been thrown then we likely have a valid input.
# Get 50 pairs of sulphur features and recovery labels
sulphur_percent = _draw_sulphur_observations(50)
recovery_percent = _observe_recovery(sulphur_percent,
outlier_probability)
return pd.DataFrame({'metal_recovery_percent': recovery_percent,
'feed_sulphur_percent': sulphur_percent})
def _initialise_randomstate(seed):
""" Use RandomState object with seed set."""
return np.random.RandomState(seed)
def _draw_sulphur_observations(count):
rs = _initialise_randomstate(7)
# draw "count" sulphur observations from a uniform distribution of
# sulphur percentages between 0.15% and 1.35%
sulphur_percent = rs.uniform(0.15, 1.35, count)
return sulphur_percent
def _draw_dilithium_observations(count):
rs = _initialise_randomstate(8)
return rs.uniform(25, 35, count)
def _draw_kryptonite_observations(count):
rs = _initialise_randomstate(9)
return rs.uniform(20, 25, count)
def _draw_unobtainium_observations(count):
rs = _initialise_randomstate(10)
return rs.uniform(0, 7, count)
def _draw_quartz_observations(count):
rs = _initialise_randomstate(11)
return rs.uniform(25, 35, count)
def _observe_recovery(sulphur_percent, outlier_probability):
"""Returns an array of metal recoveries.
This method returns an array of metal recoveries given both
an array of sulphur percentages and the probability of an
outlier being observed.
"""
recovery_percent = np.zeros_like(sulphur_percent)
is_outlier = _is_outlier(outlier_probability, len(sulphur_percent))
for index in range(0, len(recovery_percent)):
if is_outlier[index]:
recovery_percent [index]= _return_outlier_model_of_recovery(sulphur_percent[index])
else:
recovery_percent [index]=_noise_free_model_of_recovery(sulphur_percent[index])
return recovery_percent
def _noise_free_model_of_recovery(sulphur):
"""This method returns a metal recovery for a given sulphur %."""
return 74.81 - 6.81/sulphur
def _return_outlier_model_of_recovery(sulphur):
return (74.81 - 6.81/sulphur)/3
def _is_outlier(outlier_probability, how_many):
"""Return true/false numpy array
"""
rs = _initialise_randomstate(5)
uniformly_distributed = rs.uniform(0, 1, how_many)
is_outlier = np.zeros_like(uniformly_distributed)
for index in range(0, len(is_outlier)):
is_outlier[index]=uniformly_distributed[index]>(1-outlier_probability)
return is_outlier
def add_gaussian_noise(noise_free_input, mean, sigma):
"""Adds gaussian noise to vector, given mean and sigma
"""
bins = len(noise_free_input)
noise = np.random.normal(mean, sigma, bins)
return noise_free_input + noise
def gaussian_fwhm_pdf(X, height, x_position, fwhm):
"""Returns guassian probability distribution function, given FWHM
This computes a gaussian probability density function (pdf) given a
Full Width at Half Maximum (FWHM) instead of standard deviation, and
scales it by the height parameters. If the height is one, then the
area of the guassian will also be unity, as required for a pdf, and
for preserving area when used as an impulse response function in
convolution operations.
Note, this returns the function, it does not sample from the
distribution.
"""
return gaussian_pdf(X, height, x_position, fwhm / (2 * math.sqrt(2 * math.log(2))))
def gaussian_pdf(X, area, x_position, standard_deviation):
"""Returns gaussian probability distribution function multiplied by area.
This computes a gaussian with unit area and multiplies it
by the area parameter. It is translated to be centered
on x_position and has the width specified by standard_deviation.
Unit area gaussians are used as probability distributions functions,
and are also important in convolutions, as area of the convolution
of two functions is the product of their areas. If it is important
for the convolution to preserve area of a function when convolved
with a gaussian then that gaussian needs to have unit area. Preserving
area also implies conservation of energy in many physical models.
It can be shown that the integral of the gaussian function is unity
when the guassian's height is scaled as a function of standard_deviation
as:
height_scaling = 1/(standard_deviation*sqrt(2*pi))
So this function multiplies the height of the guassian by this factor and
then multiplies this result by the area parameter that is passed in.
If area parameter is 1, then the height of this gaussian with also
be 1 for all standard deviations, otherwise the area will be set by the
area parameter. The relationship between height and area, and the scaling
of height by the second parameter below, will be made clearer by
also studying the guassian function.
"""
return gaussian(X, area / (standard_deviation * math.sqrt(2 * math.pi)), x_position,
standard_deviation)
def gaussian(X, height, x_position, standard_deviation):
"""Return standard gaussian function
This is the unnormalised gaussian function
f(x)=height*exp(-(x-x_position)^2/(2*standard_deviation^2))
Parameters
----------
height:
This is the maximum of the gaussian peak.
This function does not normalise to constant area, the caller
must do this if this is what they want.
x_position:
This is the x position of the centre of the gaussian. If the
guassian is being used to apply the impulse response of an
instrument applied to an XRD reflection, then this will be the
two-theta position of the peak.
standard_deviation:
The standard deviation of the guassian curve.
If this function is being applied in spectroscopy, optics or
electrical engineering, it is common for gaussians to be
defined in terms of Full Width at Half Maximum (FWHM), which
is the width of the peak when the height drops to half
of the peak height, specified by the height parameter. If
the x-axis represents frequency, and the function height
is proportional to energy or power, then this will be the
gaussian's bandwidth, that is, the width between the -3db points.
To convert from FWHM to standard deviation use the relationship:
FWHM = 2*sqrt(2*log(2)) * standard_deviation
Returns
-------
double:
Evaluated gaussian function.
"""
return height * math.e**(-(X - x_position)**2 / 2 / standard_deviation**2)
class MultichannelXAxis:
"""Set up an X axis for isntrument
This object is set up with three inputs, min_x is the minimum value
on the axis. In the example I've chosen 5. The max_x
value is the highest value on the x axis, and spacing is
the x spacing between channels. In the example I've chosen
a max_x of 90 and spacing of 0.2. The unit is two-theta
degrees, and this unit (and the axis values) come from the
world of x-ray diffraction (XRD). We're describing the x-axis
of a low resolution XRD instrument.
The object's as_vector method can return the x_axis as an array
of numbers using numpy's linspace method, which we've already used
for plotting and other purposes.
"""
def __init__(self, min_x, max_x, spacing):
self._min = min_x
self._max = max_x
self._spacing = spacing
self._channel_count = \
round((self.max - self.min) / self.spacing + 1)
self._label = "r'$2\theta$ (degrees)"
@property
def min(self):
"""Return minimum two-theta for diffractogram x-axis."""
return self._min
@property
def max(self):
"""Return maximum two-theta for diffractogram x-axis."""
return self._max
@property
def spacing(self):
"""Return channel spacing in two-theta for diffractogram x-axis."""
return self._spacing
@property
def channel_count(self):
"""Return the count of channels in this diffractogram."""
return self._channel_count
@property
def label(self):
"""Return the x-axis label, for use with plot and report generation."""
return self._label
@property
def as_vector(self):
"""Return a numpy vector containing two-theta values for each channel."""
x_axis_vector = np.linspace(self.min, self.max, self.channel_count)
return x_axis_vector
def _apply_convolution_kernals(x_axis_vector, intensity, two_theta_angle,
instrument_broadening_fwhm,
reflection_broadening_fwhm):
"""Apply gaussian kernel for instrument broadening only."""
def _add_gaussian_fwhms(fwhm1, fwhm2):
sigma_fwhm_conversion_constant = 2*math.sqrt(2*math.log(2))
sigma_1 = fwhm1/sigma_fwhm_conversion_constant
sigma_2 = fwhm2/sigma_fwhm_conversion_constant
#squares of std_dev (ie sigma^2 which is variance) are additive
sigma_summed = math.sqrt(sigma_1*sigma_1 + sigma_2*sigma_2)
return sigma_summed*sigma_fwhm_conversion_constant
fwhm = _add_gaussian_fwhms (instrument_broadening_fwhm,
reflection_broadening_fwhm)
return gaussian_fwhm_pdf(x_axis_vector, intensity, two_theta_angle,
fwhm)
def create_templates_matrix():
"""Create templates for four test pure components.
This creates templates for quartz, dilithium, kryptonite and
unobtainium, in that order. The templates are returned
in an array where the first column is quartz, and the last is
unobtainium. If you plot them, you'll see gently varying
squiggly lines.
"""
# Create a templates matrix containing space for four templates, plus
# a column of ones.
x_axis = MultichannelXAxis(5, 90, 0.2)
template_count = 4
templates_matrix = np.zeros((x_axis.channel_count, template_count+1))
# set 4 two-theta units of instrument broadening
instrument_broadening = 4
# create a tuple for each reflection, and add it to a list. The loop
# then grabs each reflection from the list and then adds it to the
# template. The first value in the tuple is intensity, the second
# two-theta angle and the third is how much broadening to apply.
Reflection = namedtuple('Reflection', ('intensity', 'two_theta', 'broadening'))
quartz_reflections = []
quartz_reflections.append (Reflection(intensity=10.0, two_theta=25.0, broadening=3.0))
quartz_reflections.append (Reflection(13.0, 38.0, 6.0))
quartz_reflections.append (Reflection(10.0, 43.0, 2.0))
quartz_reflections.append (Reflection(25.0, 60, 2.0))
dilithium_reflections = []
dilithium_reflections.append (Reflection(25.0, 80, 1.0))
kryptonite_reflections = []
#kryptonite_reflections.append (Reflection(intensity=12.0, two_theta=25.0, broadening=9.0))
kryptonite_reflections.append (Reflection(17.0, 12.0, 1.0))
kryptonite_reflections.append (Reflection(19.0, 43.0, 12.0))
#kryptonite_reflections.append (Reflection(4.0, 70, 2.0))
#kryptonite_reflections.append (Reflection(32.0, 74, 2.0))
unobtainium_reflections = []
#unobtainium_reflections.append (Reflection(intensity=4.0, two_theta=25.0, broadening=12.0))
unobtainium_reflections.append (Reflection(5.0, 18.0, 6.0))
unobtainium_reflections.append (Reflection(1.0, 23.0, 1.0))
unobtainium_reflections.append (Reflection(5.0, 31.0, 2.0))
unobtainium_reflections.append (Reflection(3.0, 55.0, 6.0))
unobtainium_reflections.append (Reflection(7.0, 58.0, 1.0))
#unobtainium_reflections.append (Reflection(5.0, 80, 2.0))
phases=[]
# create four phases
phases.append(quartz_reflections)
phases.append(dilithium_reflections)
phases.append(kryptonite_reflections)
phases.append(unobtainium_reflections)
for phase_idx in range(0, template_count):
for a_reflection in phases[phase_idx]:
contribution_of_this_reflection = \
_apply_convolution_kernals(
x_axis.as_vector,
a_reflection.intensity,
a_reflection.two_theta,
instrument_broadening,
a_reflection.broadening)
templates_matrix[:, phase_idx] += \
contribution_of_this_reflection
# set the last column to be all ones
templates_matrix[:, template_count] = \
np.ones(x_axis.channel_count)
return templates_matrix
def create_composition_dataframe(observations_count):
"""Create a dataframe of observations of drilling samples
Returns:
Pandas DataFrame with observations_count observations.
The dataframe has four columns representing the amount
of quartz, dilithium, kryptonite and unobtainium present.
These values are drawn from uniform distributions."""
unobtainium = _draw_unobtainium_observations (observations_count)
dilithium = _draw_dilithium_observations(observations_count)
kryptonite = _draw_kryptonite_observations(observations_count)
quartz = _draw_quartz_observations(observations_count)
# Create clusters by imposing a relationship between quartz
# and dilithium.
for observation_idx in range(0, observations_count):
if quartz[observation_idx] > 30:
dilithium[observation_idx] = 5
if dilithium[observation_idx] > 30:
quartz[observation_idx] = 5
return pd.DataFrame({'Quartz': quartz,
'Dilithium': dilithium,
'Kryptonite': kryptonite,
'Unobtainium': unobtainium})
def create_observations(compositions_dataframe, templates):
"""Create a new array containing synthetic observations"""
observations_count = len(compositions_dataframe)
channels_count = len(templates[:,0])
observations_matrix = np.zeros((channels_count, observations_count))
for observation_idx in range (0, observations_count):
observations_matrix[:, observation_idx] = \
templates[:,0]*compositions_dataframe['Quartz'][observation_idx] + \
templates[:,1]*compositions_dataframe['Dilithium'][observation_idx] + \
templates[:,2]*compositions_dataframe['Kryptonite'][observation_idx] + \
templates[:,3]*compositions_dataframe['Unobtainium'][observation_idx]
# add gaussian noise. If you have time, try increasing this and watch
# prediction performance fall over.
observations_matrix[:, observation_idx] = \
add_gaussian_noise(observations_matrix[:, observation_idx], 10, 3)
return observations_matrix
| [
"numpy.random.normal",
"collections.namedtuple",
"numpy.ones",
"math.sqrt",
"math.log",
"numpy.zeros",
"numpy.linspace",
"pandas.DataFrame",
"numpy.zeros_like",
"numpy.random.RandomState"
]
| [((1701, 1804), 'pandas.DataFrame', 'pd.DataFrame', (["{'metal_recovery_percent': recovery_percent, 'feed_sulphur_percent':\n sulphur_percent}"], {}), "({'metal_recovery_percent': recovery_percent,\n 'feed_sulphur_percent': sulphur_percent})\n", (1713, 1804), True, 'import pandas as pd\n'), ((1910, 1937), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1931, 1937), True, 'import numpy as np\n'), ((3004, 3034), 'numpy.zeros_like', 'np.zeros_like', (['sulphur_percent'], {}), '(sulphur_percent)\n', (3017, 3034), True, 'import numpy as np\n'), ((3871, 3907), 'numpy.zeros_like', 'np.zeros_like', (['uniformly_distributed'], {}), '(uniformly_distributed)\n', (3884, 3907), True, 'import numpy as np\n'), ((4230, 4265), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma', 'bins'], {}), '(mean, sigma, bins)\n', (4246, 4265), True, 'import numpy as np\n'), ((11800, 11852), 'numpy.zeros', 'np.zeros', (['(x_axis.channel_count, template_count + 1)'], {}), '((x_axis.channel_count, template_count + 1))\n', (11808, 11852), True, 'import numpy as np\n'), ((12245, 12311), 'collections.namedtuple', 'namedtuple', (['"""Reflection"""', "('intensity', 'two_theta', 'broadening')"], {}), "('Reflection', ('intensity', 'two_theta', 'broadening'))\n", (12255, 12311), False, 'from collections import namedtuple\n'), ((14447, 14476), 'numpy.ones', 'np.ones', (['x_axis.channel_count'], {}), '(x_axis.channel_count)\n', (14454, 14476), True, 'import numpy as np\n'), ((15510, 15624), 'pandas.DataFrame', 'pd.DataFrame', (["{'Quartz': quartz, 'Dilithium': dilithium, 'Kryptonite': kryptonite,\n 'Unobtainium': unobtainium}"], {}), "({'Quartz': quartz, 'Dilithium': dilithium, 'Kryptonite':\n kryptonite, 'Unobtainium': unobtainium})\n", (15522, 15624), True, 'import pandas as pd\n'), ((15948, 15994), 'numpy.zeros', 'np.zeros', (['(channels_count, observations_count)'], {}), '((channels_count, observations_count))\n', (15956, 15994), True, 'import numpy as np\n'), ((10197, 10248), 'numpy.linspace', 'np.linspace', (['self.min', 'self.max', 'self.channel_count'], {}), '(self.min, self.max, self.channel_count)\n', (10208, 10248), True, 'import numpy as np\n'), ((10874, 10922), 'math.sqrt', 'math.sqrt', (['(sigma_1 * sigma_1 + sigma_2 * sigma_2)'], {}), '(sigma_1 * sigma_1 + sigma_2 * sigma_2)\n', (10883, 10922), False, 'import math\n'), ((6577, 6599), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (6586, 6599), False, 'import math\n'), ((10652, 10663), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (10660, 10663), False, 'import math\n'), ((4997, 5008), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (5005, 5008), False, 'import math\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import json
from os.path import join, isfile
import pytest
from asv import util
from . import tools
def test_run_publish(capfd, basic_conf_2):
tmpdir, local, conf, machine_file = basic_conf_2
tmpdir = util.long_path(tmpdir)
conf.matrix = {
"req": dict(conf.matrix),
"env": {"SOME_TEST_VAR": ["1"]},
}
# Tests a typical complete run/publish workflow
ret = tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--quick', '--show-stderr', '--profile',
'-a', 'warmup_time=0',
'--durations=5',
_machine_file=machine_file)
assert ret is None
text, err = capfd.readouterr()
assert len(os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))) == 5
assert len(os.listdir(join(tmpdir, 'results_workflow'))) == 2
assert 'asv: benchmark timed out (timeout 0.1s)' in text
assert 'total duration' in text
tools.run_asv_with_conf(conf, 'publish')
assert isfile(join(tmpdir, 'html', 'index.html'))
assert isfile(join(tmpdir, 'html', 'index.json'))
assert isfile(join(tmpdir, 'html', 'asv.js'))
assert isfile(join(tmpdir, 'html', 'asv.css'))
# Check parameterized test json data format
filename = glob.glob(join(tmpdir, 'html', 'graphs', 'arch-x86_64',
'asv_dummy_test_package_1',
'asv_dummy_test_package_2-' + tools.DUMMY2_VERSIONS[1],
'branch-master',
'cpu-Blazingly fast',
'env-SOME_TEST_VAR-1',
'machine-orangutan',
'os-GNU_Linux', 'python-*', 'ram-128GB',
'params_examples.time_skip.json'))[0]
with open(filename, 'r') as fp:
data = json.load(fp)
assert len(data) == 2
assert isinstance(data[0][0], int) # revision
assert len(data[0][1]) == 3
assert len(data[1][1]) == 3
assert isinstance(data[0][1][0], float)
assert isinstance(data[0][1][1], float)
assert data[0][1][2] is None
# Check that the skip options work
capfd.readouterr()
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--quick', '--skip-existing-successful',
'--bench=time_secondary.track_value',
'--skip-existing-failed',
_machine_file=join(tmpdir, 'asv-machine.json'))
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--bench=time_secondary.track_value',
'--quick', '--skip-existing-commits',
_machine_file=join(tmpdir, 'asv-machine.json'))
text, err = capfd.readouterr()
assert 'Running benchmarks.' not in text
# Check EXISTING and --environment work
python = "{0[0]}.{0[1]}".format(sys.version_info)
env_type = tools.get_default_environment_type(conf, python)
env_spec = ("-E", env_type + ":" + python)
tools.run_asv_with_conf(conf, 'run', "EXISTING", '--quick',
'--bench=time_secondary.track_value',
*env_spec,
_machine_file=machine_file)
# Remove the benchmarks.json file and check publish fails
os.remove(join(tmpdir, "results_workflow", "benchmarks.json"))
with pytest.raises(util.UserError):
tools.run_asv_with_conf(conf, 'publish')
| [
"json.load",
"asv.util.long_path",
"os.path.join",
"pytest.raises"
]
| [((312, 334), 'asv.util.long_path', 'util.long_path', (['tmpdir'], {}), '(tmpdir)\n', (326, 334), False, 'from asv import util\n'), ((1171, 1205), 'os.path.join', 'join', (['tmpdir', '"""html"""', '"""index.html"""'], {}), "(tmpdir, 'html', 'index.html')\n", (1175, 1205), False, 'from os.path import join, isfile\n'), ((1225, 1259), 'os.path.join', 'join', (['tmpdir', '"""html"""', '"""index.json"""'], {}), "(tmpdir, 'html', 'index.json')\n", (1229, 1259), False, 'from os.path import join, isfile\n'), ((1279, 1309), 'os.path.join', 'join', (['tmpdir', '"""html"""', '"""asv.js"""'], {}), "(tmpdir, 'html', 'asv.js')\n", (1283, 1309), False, 'from os.path import join, isfile\n'), ((1329, 1360), 'os.path.join', 'join', (['tmpdir', '"""html"""', '"""asv.css"""'], {}), "(tmpdir, 'html', 'asv.css')\n", (1333, 1360), False, 'from os.path import join, isfile\n'), ((2019, 2032), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (2028, 2032), False, 'import json\n'), ((3580, 3631), 'os.path.join', 'join', (['tmpdir', '"""results_workflow"""', '"""benchmarks.json"""'], {}), "(tmpdir, 'results_workflow', 'benchmarks.json')\n", (3584, 3631), False, 'from os.path import join, isfile\n'), ((3643, 3672), 'pytest.raises', 'pytest.raises', (['util.UserError'], {}), '(util.UserError)\n', (3656, 3672), False, 'import pytest\n'), ((1436, 1736), 'os.path.join', 'join', (['tmpdir', '"""html"""', '"""graphs"""', '"""arch-x86_64"""', '"""asv_dummy_test_package_1"""', "('asv_dummy_test_package_2-' + tools.DUMMY2_VERSIONS[1])", '"""branch-master"""', '"""cpu-Blazingly fast"""', '"""env-SOME_TEST_VAR-1"""', '"""machine-orangutan"""', '"""os-GNU_Linux"""', '"""python-*"""', '"""ram-128GB"""', '"""params_examples.time_skip.json"""'], {}), "(tmpdir, 'html', 'graphs', 'arch-x86_64', 'asv_dummy_test_package_1', \n 'asv_dummy_test_package_2-' + tools.DUMMY2_VERSIONS[1], 'branch-master',\n 'cpu-Blazingly fast', 'env-SOME_TEST_VAR-1', 'machine-orangutan',\n 'os-GNU_Linux', 'python-*', 'ram-128GB', 'params_examples.time_skip.json')\n", (1440, 1736), False, 'from os.path import join, isfile\n'), ((2681, 2713), 'os.path.join', 'join', (['tmpdir', '"""asv-machine.json"""'], {}), "(tmpdir, 'asv-machine.json')\n", (2685, 2713), False, 'from os.path import join, isfile\n'), ((2953, 2985), 'os.path.join', 'join', (['tmpdir', '"""asv-machine.json"""'], {}), "(tmpdir, 'asv-machine.json')\n", (2957, 2985), False, 'from os.path import join, isfile\n'), ((890, 935), 'os.path.join', 'join', (['tmpdir', '"""results_workflow"""', '"""orangutan"""'], {}), "(tmpdir, 'results_workflow', 'orangutan')\n", (894, 935), False, 'from os.path import join, isfile\n'), ((969, 1001), 'os.path.join', 'join', (['tmpdir', '"""results_workflow"""'], {}), "(tmpdir, 'results_workflow')\n", (973, 1001), False, 'from os.path import join, isfile\n')] |
# @author Metro
# @time 2021/11/24
import os.path
import gym
from agents.pdqn import P_DQN
from utilities.memory import ReplayBuffer
from utilities.utilities import *
from utilities.route_generator import generate_routefile
class Train_and_Evaluate(object):
def __init__(self, config):
# Environment
generate_routefile(seed=config.seed, demand=config.demand)
self.env = gym.make(config.environment)
# Agent
self.agent = P_DQN(config, self.env)
# Memory
self.replay_memory_size = config.hyperparameters['replay_memory_size']
self.batch_size = config.hyperparameters['batch_size']
self.updates_per_step = config.hyperparameters['updates_per_step']
self.memory = ReplayBuffer(self.replay_memory_size)
self.total_steps = 0
self.total_updates = 0
self.save_freq = config.save_freq
self.file_to_save = config.file_to_save
self.maximum_episodes = config.hyperparameters['maximum_episodes']
self.train = config.train
self.evaluate = config.evaluate
self.evaluate_internal = config.evaluate_internal
self.agent_to_color_dictionary = config.agent_to_color_dictionary
self.standard_deviation_results = config.standard_deviation_results
self.colors = ['red', 'blue', 'green', 'orange', 'yellow', 'purple']
self.color_idx = 0
self.rolling_score_window = config.rolling_score_window
self.runs_per_agent = config.runs_per_agent
self.agent_name = config.agent_name
self.ceil = config.ceil
# Training Loop
def train_agent(self):
"""
:return:
"""
rolling_scores_for_diff_runs = []
file_to_save_actor = os.path.join(self.file_to_save, 'actor/')
file_to_save_actor_param = os.path.join(self.file_to_save, 'actor_param/')
file_to_save_runs = os.path.join(self.file_to_save, 'runs_1/')
file_to_save_rolling_scores = os.path.join(self.file_to_save, 'rolling_scores/')
os.makedirs(file_to_save_actor, exist_ok=True)
os.makedirs(file_to_save_actor_param, exist_ok=True)
os.makedirs(file_to_save_runs, exist_ok=True)
os.makedirs(file_to_save_rolling_scores, exist_ok=True)
for run in range(self.runs_per_agent):
game_full_episodes_scores = []
game_full_episodes_rolling_scores = []
for i_episode in range(self.maximum_episodes):
if self.save_freq > 0 and i_episode % self.save_freq == 0:
actor_path = os.path.join(file_to_save_actor, 'episode{}'.format(i_episode))
actor_param_path = os.path.join(file_to_save_actor_param, 'episode{}'.format(i_episode))
self.agent.save_models(actor_path, actor_param_path)
episode_score = []
episode_steps = 0
done = 0
state = self.env.reset() # n_steps
while not done:
if len(self.memory) > self.batch_size:
action, action_params = self.agent.select_action(state, self.train)
if self.ceil:
action_params = np.ceil(action_params).squeeze(0)
action_for_env = [action, int(action_params[action])]
for i in range(self.updates_per_step):
self.agent.update(self.memory)
self.total_updates += 1
else:
action_params = np.random.randint(low=10, high=31, size=8)
action = np.random.randint(7, size=1)[0]
action_for_env = [action, action_params[action]]
next_state, reward, done, info = self.env.step(action_for_env)
print(reward)
episode_steps += 1
episode_score.append(info)
self.total_steps += 1
self.memory.push(state, action, action_params, reward, next_state, done)
state = next_state
episode_score_so_far = np.mean(episode_score)
game_full_episodes_scores.append(episode_score_so_far)
game_full_episodes_rolling_scores.append(
np.mean(game_full_episodes_scores[-1 * self.rolling_score_window:]))
print("Episode: {}, total steps:{}, episode steps:{}, scores:{}".format(
i_episode, self.total_steps, episode_steps, episode_score_so_far))
self.env.close()
file_path_for_pic = os.path.join(file_to_save_runs, 'episode{}_run{}.jpg'.format(i_episode, run))
visualize_results_per_run(agent_results=game_full_episodes_scores,
agent_name=self.agent_name,
save_freq=1,
file_path_for_pic=file_path_for_pic)
rolling_scores_for_diff_runs.append(game_full_episodes_rolling_scores)
file_path_for_pic = os.path.join(file_to_save_rolling_scores, 'rolling_scores.jpg')
visualize_overall_agent_results(agent_results=rolling_scores_for_diff_runs,
agent_name=self.agent_name,
show_mean_and_std_range=True,
agent_to_color_dictionary=self.agent_to_color_dictionary,
standard_deviation_results=1,
file_path_for_pic=file_path_for_pic
)
| [
"utilities.memory.ReplayBuffer",
"utilities.route_generator.generate_routefile",
"gym.make",
"agents.pdqn.P_DQN"
]
| [((324, 382), 'utilities.route_generator.generate_routefile', 'generate_routefile', ([], {'seed': 'config.seed', 'demand': 'config.demand'}), '(seed=config.seed, demand=config.demand)\n', (342, 382), False, 'from utilities.route_generator import generate_routefile\n'), ((402, 430), 'gym.make', 'gym.make', (['config.environment'], {}), '(config.environment)\n', (410, 430), False, 'import gym\n'), ((469, 492), 'agents.pdqn.P_DQN', 'P_DQN', (['config', 'self.env'], {}), '(config, self.env)\n', (474, 492), False, 'from agents.pdqn import P_DQN\n'), ((750, 787), 'utilities.memory.ReplayBuffer', 'ReplayBuffer', (['self.replay_memory_size'], {}), '(self.replay_memory_size)\n', (762, 787), False, 'from utilities.memory import ReplayBuffer\n')] |
import numpy as np
from fixtrack.frontend.pickable_base import PickableBase
from vispy import scene
class PickableMarkers(PickableBase):
"""
Markers that can highlight on hover and be selected
"""
class State(PickableBase.State):
def __init__(self, **kwargs):
super(PickableMarkers.State, self).__init__(**kwargs)
self.sizes_raw = None
self.sizes = None
class Config(PickableBase.Config):
def __init__(self, select_scale=1.0, hover_scale=1.0, **kwargs):
super(PickableMarkers.Config, self).__init__(**kwargs)
self.select_scale = select_scale
self.hover_scale = hover_scale
_kwargs_ignore = ["size", "color_select", "color_hover"]
def __init__(self, parent=None, data=np.zeros((0, 3)), select_scale=2.0, **kwargs):
super(PickableMarkers, self).__init__(
scene.visuals.Markers(pos=data, parent=parent), data=data, parent=parent, **kwargs
)
self.visual.set_gl_state("translucent", depth_test=False, blend=True)
self._cfg.select_scale = select_scale
self._cfg.hover_scale = select_scale * 1.15
self.multi_sel = None
@property
def marker_size(self):
return self._cfg.vis_args["size"]
@marker_size.setter
def marker_size(self, s):
self._cfg.vis_args["size"] = max(1, s)
self._init_data()
self.set_data()
def _selected_idxs(self):
sel = []
if self.multi_sel is None:
if self._state.idx_selected >= 0:
sel = [self._state.idx_selected]
else:
sel = self.multi_sel
return sel
def _init_data(self):
super(PickableMarkers, self)._init_data()
n = len(self._state.data)
self._state.sizes_raw = np.full((n, ), self._cfg.vis_args["size"])
self._state.sizes = self._state.sizes_raw.copy()
def _highlight(self):
self._state.sizes = self._state.sizes_raw.copy()
super(PickableMarkers, self)._highlight()
def _highlight_selected(self):
super(PickableMarkers, self)._highlight_selected()
cfg = self._cfg
state = self._state
if (state.idx_selected >= 0) and cfg.pickable:
state.sizes[self._selected_idxs()] = cfg.vis_args["size"] * cfg.select_scale
def _highlight_hovered(self):
super(PickableMarkers, self)._highlight_hovered()
cfg = self._cfg
state = self._state
if (state.idx_hover >= 0) and cfg.hoverable:
state.sizes[self._hover_idxs()] = cfg.vis_args["size"] * cfg.hover_scale
def _set_data(self):
if len(self._state.data) > 0:
kwargs = {
k: v
for k, v in self._cfg.vis_args.items() if k not in self._kwargs_ignore
}
self._state.edge_colors[:, 3] = self._state.colors[:, 3]
self.visual.set_data(
pos=self._state.data,
size=self._state.sizes,
face_color=self._state.colors,
edge_color=self._state.edge_colors,
edge_width=3,
**kwargs
)
else:
self.visual.set_data(np.zeros((0, 3)))
def _set_data_false(self):
if len(self._state.data) > 0:
colors = self._pa.unique_colors(id(self)) / 255.0
colors[self._state.colors[:, 3] < 1.0e-3] = 0.0
self.visual.set_data(
pos=self._state.data,
size=self._state.sizes,
face_color=colors,
edge_color=colors,
edge_width=0,
)
else:
self.visual.set_data(np.zeros((0, 3)))
| [
"numpy.full",
"numpy.zeros",
"vispy.scene.visuals.Markers"
]
| [((790, 806), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (798, 806), True, 'import numpy as np\n'), ((1818, 1859), 'numpy.full', 'np.full', (['(n,)', "self._cfg.vis_args['size']"], {}), "((n,), self._cfg.vis_args['size'])\n", (1825, 1859), True, 'import numpy as np\n'), ((896, 942), 'vispy.scene.visuals.Markers', 'scene.visuals.Markers', ([], {'pos': 'data', 'parent': 'parent'}), '(pos=data, parent=parent)\n', (917, 942), False, 'from vispy import scene\n'), ((3232, 3248), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (3240, 3248), True, 'import numpy as np\n'), ((3715, 3731), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (3723, 3731), True, 'import numpy as np\n')] |
# Generated by Django 2.1.7 on 2019-02-27 14:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_longer_password'),
]
operations = [
migrations.AlterField(
model_name='session',
name='title',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| [
"django.db.models.CharField"
]
| [((330, 385), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'null': '(True)'}), '(blank=True, max_length=200, null=True)\n', (346, 385), False, 'from django.db import migrations, models\n')] |
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='xiandb',
version='0.2.0',
description='A database model for Xian',
long_description=long_description,
url='https://github.com/Kuba77/Xian-DB',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: XIAN Collaborators',
'Topic :: Software Development :: Database',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'
],
keywords='xian database db',
packages=['xiandb', 'xiandb.models'],
install_requires=['mongokat', 'pyyaml', 'bcrypt'],
extras_require={}
)
| [
"os.path.join",
"os.path.dirname",
"setuptools.setup"
]
| [((217, 828), 'setuptools.setup', 'setup', ([], {'name': '"""xiandb"""', 'version': '"""0.2.0"""', 'description': '"""A database model for Xian"""', 'long_description': 'long_description', 'url': '"""https://github.com/Kuba77/Xian-DB"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'classifiers': "['Development Status :: 3 - Alpha',\n 'Intended Audience :: XIAN Collaborators',\n 'Topic :: Software Development :: Database',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7']", 'keywords': '"""xian database db"""', 'packages': "['xiandb', 'xiandb.models']", 'install_requires': "['mongokat', 'pyyaml', 'bcrypt']", 'extras_require': '{}'}), "(name='xiandb', version='0.2.0', description=\n 'A database model for Xian', long_description=long_description, url=\n 'https://github.com/Kuba77/Xian-DB', author='<NAME>', author_email=\n '<EMAIL>', license='MIT', classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: XIAN Collaborators',\n 'Topic :: Software Development :: Database',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7'], keywords='xian database db',\n packages=['xiandb', 'xiandb.models'], install_requires=['mongokat',\n 'pyyaml', 'bcrypt'], extras_require={})\n", (222, 828), False, 'from setuptools import setup\n'), ((94, 116), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (106, 116), False, 'from os import path\n'), ((129, 158), 'os.path.join', 'path.join', (['here', '"""README.rst"""'], {}), "(here, 'README.rst')\n", (138, 158), False, 'from os import path\n')] |
from functools import partial
import tensorflow as tf
_EPSILON = tf.keras.backend.epsilon()
def register_keras_custom_object(cls):
tf.keras.utils.get_custom_objects()[cls.__name__] = cls
return cls
def binary_focal_loss(y_true, y_pred, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None):
y_pred = tf.convert_to_tensor(y_pred)
if not y_pred.dtype.is_floating:
y_pred = tf.dtypes.cast(y_pred, dtype=tf.float32)
if from_logits:
return _binary_focal_loss_from_logits(labels=y_true,
logits=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
else:
return _binary_focal_loss_from_probs(labels=y_true,
p=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
@register_keras_custom_object
class BinaryFocalLoss(tf.keras.losses.Loss):
def __init__(self, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None, **kwargs):
super().__init__(**kwargs)
self.gamma = gamma
self.pos_weight = pos_weight
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def get_config(self):
config = super().get_config()
config.update(gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
return config
def call(self, y_true, y_pred):
return binary_focal_loss(y_true=y_true,
y_pred=y_pred,
gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
# Helper functions below
def _process_labels(labels, label_smoothing, dtype):
labels = tf.dtypes.cast(labels, dtype=dtype)
if label_smoothing is not None:
labels = (1 - label_smoothing) * labels + label_smoothing * 0.5
return labels
def _binary_focal_loss_from_logits(labels, logits, gamma, pos_weight, label_smoothing):
labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=logits.dtype)
# Compute probabilities for the positive class
p = tf.math.sigmoid(logits)
if label_smoothing is None:
labels_shape = labels.shape
logits_shape = logits.shape
if not labels_shape.is_fully_defined() or labels_shape != logits_shape:
labels_shape = tf.shape(labels)
logits_shape = tf.shape(logits)
shape = tf.broadcast_dynamic_shape(labels_shape, logits_shape)
labels = tf.broadcast_to(labels, shape)
logits = tf.broadcast_to(logits, shape)
if pos_weight is None:
loss_func = tf.nn.sigmoid_cross_entropy_with_logits
else:
loss_func = partial(tf.nn.weighted_cross_entropy_with_logits, pos_weight=pos_weight)
loss = loss_func(labels=labels, logits=logits)
modulation_pos = (1 - p)**gamma
modulation_neg = p**gamma
mask = tf.dtypes.cast(labels, dtype=tf.bool)
modulation = tf.where(mask, modulation_pos, modulation_neg)
return modulation * loss
# Terms for the positive and negative class components of the loss
pos_term = labels * ((1 - p)**gamma)
neg_term = (1 - labels) * (p**gamma)
# Term involving the log and ReLU
log_weight = pos_term
if pos_weight is not None:
log_weight *= pos_weight
log_weight += neg_term
log_term = tf.math.log1p(tf.math.exp(-tf.math.abs(logits)))
log_term += tf.nn.relu(-logits)
log_term *= log_weight
# Combine all the terms into the loss
loss = neg_term * logits + log_term
return loss
def _binary_focal_loss_from_probs(labels, p, gamma, pos_weight, label_smoothing):
q = 1 - p
# For numerical stability (so we don't inadvertently take the log of 0)
p = tf.math.maximum(p, _EPSILON)
q = tf.math.maximum(q, _EPSILON)
# Loss for the positive examples
pos_loss = -(q**gamma) * tf.math.log(p)
if pos_weight is not None:
pos_loss *= pos_weight
# Loss for the negative examples
neg_loss = -(p**gamma) * tf.math.log(q)
# Combine loss terms
if label_smoothing is None:
labels = tf.dtypes.cast(labels, dtype=tf.bool)
loss = tf.where(labels, pos_loss, neg_loss)
else:
labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=p.dtype)
loss = labels * pos_loss + (1 - labels) * neg_loss
return loss | [
"tensorflow.shape",
"tensorflow.keras.backend.epsilon",
"tensorflow.nn.relu",
"tensorflow.math.log",
"tensorflow.math.sigmoid",
"tensorflow.math.maximum",
"tensorflow.where",
"tensorflow.broadcast_dynamic_shape",
"tensorflow.broadcast_to",
"functools.partial",
"tensorflow.convert_to_tensor",
"tensorflow.keras.utils.get_custom_objects",
"tensorflow.math.abs",
"tensorflow.dtypes.cast"
]
| [((66, 92), 'tensorflow.keras.backend.epsilon', 'tf.keras.backend.epsilon', ([], {}), '()\n', (90, 92), True, 'import tensorflow as tf\n'), ((331, 359), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y_pred'], {}), '(y_pred)\n', (351, 359), True, 'import tensorflow as tf\n'), ((2262, 2297), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (['labels'], {'dtype': 'dtype'}), '(labels, dtype=dtype)\n', (2276, 2297), True, 'import tensorflow as tf\n'), ((2671, 2694), 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['logits'], {}), '(logits)\n', (2686, 2694), True, 'import tensorflow as tf\n'), ((4026, 4045), 'tensorflow.nn.relu', 'tf.nn.relu', (['(-logits)'], {}), '(-logits)\n', (4036, 4045), True, 'import tensorflow as tf\n'), ((4355, 4383), 'tensorflow.math.maximum', 'tf.math.maximum', (['p', '_EPSILON'], {}), '(p, _EPSILON)\n', (4370, 4383), True, 'import tensorflow as tf\n'), ((4392, 4420), 'tensorflow.math.maximum', 'tf.math.maximum', (['q', '_EPSILON'], {}), '(q, _EPSILON)\n', (4407, 4420), True, 'import tensorflow as tf\n'), ((138, 173), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (171, 173), True, 'import tensorflow as tf\n'), ((414, 454), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (['y_pred'], {'dtype': 'tf.float32'}), '(y_pred, dtype=tf.float32)\n', (428, 454), True, 'import tensorflow as tf\n'), ((3497, 3534), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (['labels'], {'dtype': 'tf.bool'}), '(labels, dtype=tf.bool)\n', (3511, 3534), True, 'import tensorflow as tf\n'), ((3556, 3602), 'tensorflow.where', 'tf.where', (['mask', 'modulation_pos', 'modulation_neg'], {}), '(mask, modulation_pos, modulation_neg)\n', (3564, 3602), True, 'import tensorflow as tf\n'), ((4488, 4502), 'tensorflow.math.log', 'tf.math.log', (['p'], {}), '(p)\n', (4499, 4502), True, 'import tensorflow as tf\n'), ((4632, 4646), 'tensorflow.math.log', 'tf.math.log', (['q'], {}), '(q)\n', (4643, 4646), True, 'import tensorflow as tf\n'), ((4722, 4759), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (['labels'], {'dtype': 'tf.bool'}), '(labels, dtype=tf.bool)\n', (4736, 4759), True, 'import tensorflow as tf\n'), ((4775, 4811), 'tensorflow.where', 'tf.where', (['labels', 'pos_loss', 'neg_loss'], {}), '(labels, pos_loss, neg_loss)\n', (4783, 4811), True, 'import tensorflow as tf\n'), ((2907, 2923), 'tensorflow.shape', 'tf.shape', (['labels'], {}), '(labels)\n', (2915, 2923), True, 'import tensorflow as tf\n'), ((2951, 2967), 'tensorflow.shape', 'tf.shape', (['logits'], {}), '(logits)\n', (2959, 2967), True, 'import tensorflow as tf\n'), ((2988, 3042), 'tensorflow.broadcast_dynamic_shape', 'tf.broadcast_dynamic_shape', (['labels_shape', 'logits_shape'], {}), '(labels_shape, logits_shape)\n', (3014, 3042), True, 'import tensorflow as tf\n'), ((3064, 3094), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['labels', 'shape'], {}), '(labels, shape)\n', (3079, 3094), True, 'import tensorflow as tf\n'), ((3116, 3146), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['logits', 'shape'], {}), '(logits, shape)\n', (3131, 3146), True, 'import tensorflow as tf\n'), ((3280, 3352), 'functools.partial', 'partial', (['tf.nn.weighted_cross_entropy_with_logits'], {'pos_weight': 'pos_weight'}), '(tf.nn.weighted_cross_entropy_with_logits, pos_weight=pos_weight)\n', (3287, 3352), False, 'from functools import partial\n'), ((3988, 4007), 'tensorflow.math.abs', 'tf.math.abs', (['logits'], {}), '(logits)\n', (3999, 4007), True, 'import tensorflow as tf\n')] |
import time
from collections import OrderedDict
from datetime import datetime, timedelta
from django.db import models
from django.conf import settings
from django.utils.timezone import utc
from .skills import Skill, SkillGroup
from metrics.models import Corporation
from tasks.models import EveApiCache, Task
from evetool.storage import OverwriteStorage
import utils
class CharacterApi(models.Model):
""" charactertype apis """
api = models.ForeignKey("apis.Api")
characterid = models.BigIntegerField()
charactername = models.CharField(max_length=254)
corporationid = models.BigIntegerField()
corporationname = models.CharField(max_length=254)
def __unicode__(self):
return self.charactername
#get right icon for characters view
def view_icon(self):
try:
icon = self.characterapiicon_set.get(size=128, relation=self)
return icon.icon
except CharacterApiIcon.DoesNotExist:
return None
#def character sheet image
def sheet_icon(self):
try:
icon = self.characterapiicon_set.get(size=200, relation=self)
return icon.icon
except CharacterApiIcon.DoesNotExist:
return None
def current_balance(self):
if self.api.access_to("CharacterInfo"):
sheet = utils.connection.api_request(
"CharacterInfoAuth", obj=self
)
if sheet.accountBalance:
return round(float(sheet.accountBalance), 2)
return 0
def sheet_cache_key(self):
key = "CharacterInfo"
category = EveApiCache.EVE
kwargs = {"characterID": self.characterid}
if self.api.access_to("CharacterInfo"):
return utils.connection.generate_cache_key(
category, key, api=self.api, **kwargs
)
else:
return utils.connection.generate_cache_key(category, key)
def sheet_set_cache_job(self):
key = "CharacterInfo"
category = EveApiCache.EVE
kwargs = {"characterID": self.characterid}
if self.api.access_to("CharacterInfo"):
api = self.api
else:
api = None
EveApiCache.objects.create(
priority=Task.VERY_HIGH,
api=api,
category=category,
key=key,
kwargs=kwargs,
)
#get the data for landing page after character selection
def character_sheet(self):
sheet = utils.connection.get_cache(self.sheet_cache_key())
employment = self.employment_history(sheet)
return sheet, employment
#employment history of a player
@staticmethod
def employment_history(sheet):
cache_key = "employment_history_%d" % int(sheet.characterID)
#result = utils.connection.get_cache(cache_key)
result = None
if not result:
cache_timer = 60 * 60
result = []
for corp_data in sheet.employmentHistory:
result.append({
"corporation": Corporation.find_corporation(
corp_data.corporationID
),
"startdate": utils.common.convert_timestamp(
corp_data.startDate
)
})
utils.connection.set_cache(cache_key, result, cache_timer)
return result
#get skill in training
def skill_in_training(self):
training_skill = None
if self.api.access_to("SkillInTraining"):
in_training = utils.connection.api_request(
"SkillInTraining", obj=self
)
try:
training_skill = {
"skill": Skill.objects.get(
typeid=int(in_training.trainingTypeID)
).typename,
"to_level": int(in_training.trainingToLevel),
"finnished": utils.common.convert_timestamp(
in_training.trainingEndTime
)
}
except AttributeError:
training_skill = {"skill": "No skill in training"}
return training_skill
#characters trained skills
def trained_skills(self):
cache_key = "trained_skills_%d" % self.pk
result = utils.connection.get_cache(cache_key)
if not result:
cache_timer = 60 * 5
sheet = utils.connection.api_request("CharacterSheet", obj=self)
groups = SkillGroup.objects.exclude(
groupname="Fake Skills"
).order_by("groupname")
skills = Skill.objects.order_by("typename")
all_skills = OrderedDict()
skillpoints = {}
for group in groups:
all_skills[group.groupname] = list()
skillpoints[group.groupname] = 0
for skill in skills:
trained = sheet.skills.Get(skill.typeid, False)
if trained:
all_skills[skill.skillgroup.groupname].append(
{
"skill": skill,
"level": int(trained.level)
}
)
skillpoints[skill.skillgroup.groupname] += \
trained.skillpoints
result = {
"all_skills": all_skills,
"skillpoints": skillpoints,
}
utils.connection.set_cache(cache_key, result, cache_timer)
return result
#get skillqueue
def skill_queue(self):
queue = None
if self.api.access_to("SkillQueue"):
queue = {}
skills = utils.connection.api_request(
"SkillQueue", obj=self
).skillqueue
queue["skills"] = skills
queue["total"] = self.total_skillpoints(skills)
now = datetime.now().replace(tzinfo=utc)
try:
trainingtime = utils.common.convert_timestamp(
skills[-1].endTime
) - now
trainingtime -= timedelta(
microseconds=trainingtime.microseconds
)
queue["trainingtime"] = trainingtime
except TypeError:
pass
return queue
#get total skillpoints for skills in queue
@staticmethod
def total_skillpoints(skills):
total = 0
for skill in skills:
total += int(skill.endSP - skill.startSP)
return total
#walletjournal
def wallet_journal(self):
cache_key = "walletjournal_character_%d" % self.pk
result = utils.connection.get_cache(cache_key)
if not result:
self.update_journal()
cache_timer = 60 * 10
utils.connection.set_cache(cache_key, True, cache_timer)
return CharacterJournal.objects.filter(characterapi=self)
#updates journal to current moment
def update_journal(self):
fromid = 0
transactions = utils.connection.api_request(
"WalletJournal", obj=self, rowcount=2500
).transactions
while True:
for trans in transactions:
date = utils.common.convert_timestamp(trans.date)
#check for duplicate
if CharacterJournal.objects.filter(
characterapi=self,
balance=trans.balance,
date=date,
).exists():
continue
else:
CharacterJournal.create_entry(self, trans)
if int(trans.refID) < fromid or fromid == 0:
fromid = int(trans.refID)
if len(transactions) < 2500:
break
else:
time.sleep(1)
transactions = utils.connection.api_request(
"WalletJournal", obj=self, rowcount=2500, fromid=fromid
).transactions
class CharacterApiIcon(models.Model):
""" images related to characters """
relation = models.ForeignKey("characters.CharacterApi")
size = models.IntegerField(choices=settings.IMAGE_SIZES)
typeid = models.IntegerField()
icon = models.ImageField(
upload_to="images/characters/",
storage=OverwriteStorage(),
blank=True,
null=True
)
class Meta:
unique_together = ["size", "relation"]
def __unicode__(self):
return "Character Image %s" % self.relation.charactername
# def save(self, *args, **kwargs):
# try:
# temp = CharacterApiIcon.objects.get(pk=self.pk)
# if temp.icon != self.icon:
# temp.icon.delete()
# except ObjectDoesNotExist:
# pass
# super(CharacterApiIcon, self).save(*args, **kwargs)
#get list of wanted character icon sizes
@staticmethod
def icon_sizes():
return [128, 200]
class Transaction(models.Model):
reftypeid = models.SmallIntegerField()
ownername1 = models.CharField(max_length=254)
ownerid1 = models.IntegerField()
ownername2 = models.CharField(max_length=254)
ownerid2 = models.IntegerField()
argname1 = models.CharField(max_length=254)
argid1 = models.IntegerField()
amount = models.FloatField(null=True)
reason = models.TextField(blank=True)
taxreceiverid = models.IntegerField(null=True)
taxamount = models.FloatField(null=True)
class Meta:
abstract = True
class CharacterJournal(Transaction):
"""
Wallet transcations of a player. Saved to database so data can
be filtered, and metadata can be created.
Like balance graphs, see how much you paid in taxes and more.
"""
characterapi = models.ForeignKey(CharacterApi)
date = models.DateTimeField()
balance = models.FloatField()
class Meta:
unique_together = ["characterapi", "date", "balance"]
ordering = ["-date", "-reftypeid"]
def __unicode__(self):
return "%s's transaction" % self.characterapi.charactername
@staticmethod
def create_entry(characterapi, transaction):
if transaction.taxReceiverID == "":
taxreceiverid = None
else:
taxreceiverid = int(transaction.taxReceiverID)
if transaction.taxAmount == "":
taxamount = None
else:
taxamount = round(float(transaction.taxAmount), 2)
date = utils.common.convert_timestamp(transaction.date)
CharacterJournal.objects.create(
characterapi=characterapi,
date=date,
balance=round(float(transaction.balance), 2),
reftypeid=int(transaction.refTypeID),
ownername1=str(transaction.ownerName1),
ownerid1=int(transaction.ownerID1),
ownername2=str(transaction.ownerName2),
ownerid2=int(transaction.ownerID2),
argname1=str(transaction.argName1),
argid1=int(transaction.argID1),
amount=round(float(transaction.amount), 2),
reason=str(transaction.reason),
taxreceiverid=taxreceiverid,
taxamount=taxamount,
)
@staticmethod
def monthly_balance(characterapi):
last_restart = utils.common.last_server_restart()
days = last_restart - timedelta(days=31)
entries = CharacterJournal.objects.filter(
characterapi=characterapi,
date__range=[days, last_restart]
)
balance = []
for days in range(31):
first = entries.first()
date = (last_restart - timedelta(days=days))
#make timestamp in miliseconds
timestamp = int(time.mktime(date.timetuple()) * 1000)
if first:
isk = first.balance
else:
try:
isk = balance[-1][1]
except IndexError:
isk = characterapi.current_balance()
balance.append([timestamp, isk])
entries = entries.filter(date__lt=(date - timedelta(days=1)))
#return reversed list
return balance[::-1]
@staticmethod
def weekly_balance(characterapi):
now = datetime.now().replace(tzinfo=utc)
entries = CharacterJournal.objects.filter(
characterapi=characterapi,
date__range=[
now.replace(hour=23, minute=59, second=0) - timedelta(days=9),
now
]
)
balance = []
for days in range(8):
date = now.replace(
hour=0, minute=0, second=0
) - timedelta(days=days)
day_entries = entries.filter(
date__lt=now.replace(
hour=23, minute=59, second=59
) - timedelta(days=days),
date__gt=date
)
if not day_entries.count() > 0:
try:
isk = balance[-1][1]
except IndexError:
isk = characterapi.current_balance()
timestamp = int(time.mktime(date.timetuple()) * 1000)
balance.append([timestamp, isk])
else:
for entry in day_entries:
timestamp = int(time.mktime(entry.date.timetuple()) * 1000)
balance.append([timestamp, entry.balance])
#add last value for date on xaxis
date = now.replace(hour=23, minute=59, second=59) - timedelta(days=8)
isk = balance[-1][1]
timestamp = int(time.mktime(date.timetuple()) * 1000)
balance.append([timestamp, isk])
return balance[::-1]
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"utils.common.convert_timestamp",
"time.sleep",
"django.db.models.BigIntegerField",
"utils.connection.get_cache",
"datetime.timedelta",
"metrics.models.Corporation.find_corporation",
"django.db.models.FloatField",
"django.db.models.ForeignKey",
"utils.common.last_server_restart",
"django.db.models.DateTimeField",
"utils.connection.set_cache",
"django.db.models.CharField",
"evetool.storage.OverwriteStorage",
"utils.connection.api_request",
"collections.OrderedDict",
"utils.connection.generate_cache_key",
"django.db.models.SmallIntegerField",
"tasks.models.EveApiCache.objects.create",
"datetime.datetime.now"
]
| [((448, 477), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""apis.Api"""'], {}), "('apis.Api')\n", (465, 477), False, 'from django.db import models\n'), ((496, 520), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (518, 520), False, 'from django.db import models\n'), ((541, 573), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (557, 573), False, 'from django.db import models\n'), ((594, 618), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (616, 618), False, 'from django.db import models\n'), ((641, 673), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (657, 673), False, 'from django.db import models\n'), ((8185, 8229), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""characters.CharacterApi"""'], {}), "('characters.CharacterApi')\n", (8202, 8229), False, 'from django.db import models\n'), ((8241, 8290), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'settings.IMAGE_SIZES'}), '(choices=settings.IMAGE_SIZES)\n', (8260, 8290), False, 'from django.db import models\n'), ((8304, 8325), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (8323, 8325), False, 'from django.db import models\n'), ((9111, 9137), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {}), '()\n', (9135, 9137), False, 'from django.db import models\n'), ((9155, 9187), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (9171, 9187), False, 'from django.db import models\n'), ((9203, 9224), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (9222, 9224), False, 'from django.db import models\n'), ((9242, 9274), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (9258, 9274), False, 'from django.db import models\n'), ((9290, 9311), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (9309, 9311), False, 'from django.db import models\n'), ((9327, 9359), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (9343, 9359), False, 'from django.db import models\n'), ((9373, 9394), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (9392, 9394), False, 'from django.db import models\n'), ((9408, 9436), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)'}), '(null=True)\n', (9425, 9436), False, 'from django.db import models\n'), ((9450, 9478), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (9466, 9478), False, 'from django.db import models\n'), ((9499, 9529), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (9518, 9529), False, 'from django.db import models\n'), ((9546, 9574), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)'}), '(null=True)\n', (9563, 9574), False, 'from django.db import models\n'), ((9870, 9901), 'django.db.models.ForeignKey', 'models.ForeignKey', (['CharacterApi'], {}), '(CharacterApi)\n', (9887, 9901), False, 'from django.db import models\n'), ((9913, 9935), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (9933, 9935), False, 'from django.db import models\n'), ((9950, 9969), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (9967, 9969), False, 'from django.db import models\n'), ((2214, 2322), 'tasks.models.EveApiCache.objects.create', 'EveApiCache.objects.create', ([], {'priority': 'Task.VERY_HIGH', 'api': 'api', 'category': 'category', 'key': 'key', 'kwargs': 'kwargs'}), '(priority=Task.VERY_HIGH, api=api, category=\n category, key=key, kwargs=kwargs)\n', (2240, 2322), False, 'from tasks.models import EveApiCache, Task\n'), ((4351, 4388), 'utils.connection.get_cache', 'utils.connection.get_cache', (['cache_key'], {}), '(cache_key)\n', (4377, 4388), False, 'import utils\n'), ((6741, 6778), 'utils.connection.get_cache', 'utils.connection.get_cache', (['cache_key'], {}), '(cache_key)\n', (6767, 6778), False, 'import utils\n'), ((10569, 10617), 'utils.common.convert_timestamp', 'utils.common.convert_timestamp', (['transaction.date'], {}), '(transaction.date)\n', (10599, 10617), False, 'import utils\n'), ((11386, 11420), 'utils.common.last_server_restart', 'utils.common.last_server_restart', ([], {}), '()\n', (11418, 11420), False, 'import utils\n'), ((1332, 1391), 'utils.connection.api_request', 'utils.connection.api_request', (['"""CharacterInfoAuth"""'], {'obj': 'self'}), "('CharacterInfoAuth', obj=self)\n", (1360, 1391), False, 'import utils\n'), ((1752, 1826), 'utils.connection.generate_cache_key', 'utils.connection.generate_cache_key', (['category', 'key'], {'api': 'self.api'}), '(category, key, api=self.api, **kwargs)\n', (1787, 1826), False, 'import utils\n'), ((1890, 1940), 'utils.connection.generate_cache_key', 'utils.connection.generate_cache_key', (['category', 'key'], {}), '(category, key)\n', (1925, 1940), False, 'import utils\n'), ((3336, 3394), 'utils.connection.set_cache', 'utils.connection.set_cache', (['cache_key', 'result', 'cache_timer'], {}), '(cache_key, result, cache_timer)\n', (3362, 3394), False, 'import utils\n'), ((3584, 3641), 'utils.connection.api_request', 'utils.connection.api_request', (['"""SkillInTraining"""'], {'obj': 'self'}), "('SkillInTraining', obj=self)\n", (3612, 3641), False, 'import utils\n'), ((4465, 4521), 'utils.connection.api_request', 'utils.connection.api_request', (['"""CharacterSheet"""'], {'obj': 'self'}), "('CharacterSheet', obj=self)\n", (4493, 4521), False, 'import utils\n'), ((4728, 4741), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4739, 4741), False, 'from collections import OrderedDict\n'), ((5520, 5578), 'utils.connection.set_cache', 'utils.connection.set_cache', (['cache_key', 'result', 'cache_timer'], {}), '(cache_key, result, cache_timer)\n', (5546, 5578), False, 'import utils\n'), ((6882, 6938), 'utils.connection.set_cache', 'utils.connection.set_cache', (['cache_key', '(True)', 'cache_timer'], {}), '(cache_key, True, cache_timer)\n', (6908, 6938), False, 'import utils\n'), ((7117, 7187), 'utils.connection.api_request', 'utils.connection.api_request', (['"""WalletJournal"""'], {'obj': 'self', 'rowcount': '(2500)'}), "('WalletJournal', obj=self, rowcount=2500)\n", (7145, 7187), False, 'import utils\n'), ((8412, 8430), 'evetool.storage.OverwriteStorage', 'OverwriteStorage', ([], {}), '()\n', (8428, 8430), False, 'from evetool.storage import OverwriteStorage\n'), ((11451, 11469), 'datetime.timedelta', 'timedelta', ([], {'days': '(31)'}), '(days=31)\n', (11460, 11469), False, 'from datetime import datetime, timedelta\n'), ((13629, 13646), 'datetime.timedelta', 'timedelta', ([], {'days': '(8)'}), '(days=8)\n', (13638, 13646), False, 'from datetime import datetime, timedelta\n'), ((5760, 5812), 'utils.connection.api_request', 'utils.connection.api_request', (['"""SkillQueue"""'], {'obj': 'self'}), "('SkillQueue', obj=self)\n", (5788, 5812), False, 'import utils\n'), ((6179, 6228), 'datetime.timedelta', 'timedelta', ([], {'microseconds': 'trainingtime.microseconds'}), '(microseconds=trainingtime.microseconds)\n', (6188, 6228), False, 'from datetime import datetime, timedelta\n'), ((7306, 7348), 'utils.common.convert_timestamp', 'utils.common.convert_timestamp', (['trans.date'], {}), '(trans.date)\n', (7336, 7348), False, 'import utils\n'), ((7906, 7919), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7916, 7919), False, 'import time\n'), ((11739, 11759), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (11748, 11759), False, 'from datetime import datetime, timedelta\n'), ((12350, 12364), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12362, 12364), False, 'from datetime import datetime, timedelta\n'), ((12767, 12787), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (12776, 12787), False, 'from datetime import datetime, timedelta\n'), ((3966, 4025), 'utils.common.convert_timestamp', 'utils.common.convert_timestamp', (['in_training.trainingEndTime'], {}), '(in_training.trainingEndTime)\n', (3996, 4025), False, 'import utils\n'), ((5969, 5983), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5981, 5983), False, 'from datetime import datetime, timedelta\n'), ((6052, 6102), 'utils.common.convert_timestamp', 'utils.common.convert_timestamp', (['skills[-1].endTime'], {}), '(skills[-1].endTime)\n', (6082, 6102), False, 'import utils\n'), ((7951, 8040), 'utils.connection.api_request', 'utils.connection.api_request', (['"""WalletJournal"""'], {'obj': 'self', 'rowcount': '(2500)', 'fromid': 'fromid'}), "('WalletJournal', obj=self, rowcount=2500,\n fromid=fromid)\n", (7979, 8040), False, 'import utils\n'), ((3073, 3126), 'metrics.models.Corporation.find_corporation', 'Corporation.find_corporation', (['corp_data.corporationID'], {}), '(corp_data.corporationID)\n', (3101, 3126), False, 'from metrics.models import Corporation\n'), ((3207, 3258), 'utils.common.convert_timestamp', 'utils.common.convert_timestamp', (['corp_data.startDate'], {}), '(corp_data.startDate)\n', (3237, 3258), False, 'import utils\n'), ((12200, 12217), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (12209, 12217), False, 'from datetime import datetime, timedelta\n'), ((12561, 12578), 'datetime.timedelta', 'timedelta', ([], {'days': '(9)'}), '(days=9)\n', (12570, 12578), False, 'from datetime import datetime, timedelta\n'), ((12939, 12959), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (12948, 12959), False, 'from datetime import datetime, timedelta\n')] |
from django.urls import path
from backend.api.v1.dialogs.views import (
DialogListCreateView,
DialogRetrieveUpdateDestroyAPIView,
DialogMembershipListCreateView,
DialogMessageListCreateView,
DialogMessageRetrieveUpdateDestroyAPIView,
)
urlpatterns = [
path('', DialogListCreateView.as_view()),
path('<int:pk>', DialogRetrieveUpdateDestroyAPIView.as_view()),
path('membership/', DialogMembershipListCreateView.as_view()),
path('messages/', DialogMessageListCreateView.as_view()),
path('messages/<int:pk>', DialogMessageRetrieveUpdateDestroyAPIView.as_view()),
]
| [
"backend.api.v1.dialogs.views.DialogMessageListCreateView.as_view",
"backend.api.v1.dialogs.views.DialogMembershipListCreateView.as_view",
"backend.api.v1.dialogs.views.DialogRetrieveUpdateDestroyAPIView.as_view",
"backend.api.v1.dialogs.views.DialogMessageRetrieveUpdateDestroyAPIView.as_view",
"backend.api.v1.dialogs.views.DialogListCreateView.as_view"
]
| [((286, 316), 'backend.api.v1.dialogs.views.DialogListCreateView.as_view', 'DialogListCreateView.as_view', ([], {}), '()\n', (314, 316), False, 'from backend.api.v1.dialogs.views import DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView\n'), ((340, 384), 'backend.api.v1.dialogs.views.DialogRetrieveUpdateDestroyAPIView.as_view', 'DialogRetrieveUpdateDestroyAPIView.as_view', ([], {}), '()\n', (382, 384), False, 'from backend.api.v1.dialogs.views import DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView\n'), ((411, 451), 'backend.api.v1.dialogs.views.DialogMembershipListCreateView.as_view', 'DialogMembershipListCreateView.as_view', ([], {}), '()\n', (449, 451), False, 'from backend.api.v1.dialogs.views import DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView\n'), ((476, 513), 'backend.api.v1.dialogs.views.DialogMessageListCreateView.as_view', 'DialogMessageListCreateView.as_view', ([], {}), '()\n', (511, 513), False, 'from backend.api.v1.dialogs.views import DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView\n'), ((546, 597), 'backend.api.v1.dialogs.views.DialogMessageRetrieveUpdateDestroyAPIView.as_view', 'DialogMessageRetrieveUpdateDestroyAPIView.as_view', ([], {}), '()\n', (595, 597), False, 'from backend.api.v1.dialogs.views import DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView\n')] |
#
# msmarco doc: create the train.tsv triples
# -------------------------------
import random
random.seed(42)
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.getcwd())
from matchmaker.evaluation.msmarco_eval import *
from collections import defaultdict
from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer
#
# config
#
parser = argparse.ArgumentParser()
parser.add_argument('--out-file', action='store', dest='out_file',
help='training output text file location', required=True)
parser.add_argument('--out-file-ids', action='store', dest='out_file_ids',
help='training output ids file location', required=True)
parser.add_argument('--candidate-file', action='store', dest='candidate_file',
help='trec ranking file location (lucene output)', required=True)
parser.add_argument('--collection-file', action='store', dest='collection_file',
help='collection.tsv location', required=True)
parser.add_argument('--query-file', action='store', dest='query_file',
help='query.tsv location', required=True)
parser.add_argument('--qrel', action='store', dest='qrel_file',
help='qrel location', required=True)
args = parser.parse_args()
max_triples = 10_000_000
max_doc_char_length = 150_000
max_doc_token_length = 10000
#
# load data
# -------------------------------
#
collection = {}
#collection_length = {}
tokenizer = BlingFireTokenizer()
with open(args.collection_file,"r",encoding="utf8") as collection_file:
for line in tqdm(collection_file):
ls = line.split("\t") # id<\t>text ....
_id = ls[0]
max_char_doc = ls[1].rstrip()[:max_doc_char_length]
collection[_id] = max_char_doc
#collection_length[_id] = len(tokenizer.tokenize(max_char_doc))
queries = {}
with open(args.query_file,"r",encoding="utf8") as query_file:
for line in tqdm(query_file):
ls = line.split("\t") # id<\t>text ....
_id = ls[0]
queries[_id] = ls[1].rstrip()
qrels = load_reference(args.qrel_file)
#
# produce output
# -------------------------------
#
triples = []
stats = defaultdict(int)
with open(args.candidate_file,"r",encoding="utf8") as candidate_file:
for line in tqdm(candidate_file):
#if random.random() <= 0.5: continue #skip some entries for faster processing
[topicid, _ , unjudged_docid, rank, _ , _ ] = line.split()
#if int(rank) <= 100:
# #if random.random() < 0.7: continue # skip 70% of candidates to speed up things...
# #else:
# stats['< 100 sampling count'] += 1
#else:
# if random.random() <= 0.9: continue # skip 90% of candidates assumong top1k -> same number of samples from 0-100 as 101 - 1000
# else:
# stats['> 100 sampling count'] += 1
if topicid not in queries or topicid not in qrels: # added: because we carved out the validation qrels from the train -> so there are some missing
stats['skipped'] += 1
continue
#assert topicid in qrels
assert unjudged_docid in collection
# Use topicid to get our positive_docid
positive_docid = random.choice(qrels[topicid])
assert positive_docid in collection
if unjudged_docid in qrels[topicid]:
stats['docid_collision'] += 1
continue
stats['kept'] += 1
#if collection_length[positive_docid] > max_doc_token_length and collection_length[unjudged_docid] > max_doc_token_length:
# stats['both_to_long'] += 1
# continue
#if collection_length[positive_docid] > max_doc_token_length:
# stats['pos_to_long'] += 1
# continue
#if collection_length[unjudged_docid] > max_doc_token_length:
# stats['unjuged_to_long'] += 1
# continue
triples.append((topicid,positive_docid,unjudged_docid))
# important: shuffle the train data
random.shuffle(triples)
with open(args.out_file,"w",encoding="utf8") as out_file_text ,\
open(args.out_file_ids,"w",encoding="utf8") as out_file_ids:
for i,(topicid, positive_docid, unjudged_docid) in tqdm(enumerate(triples)):
if i == max_triples:
break
if collection[positive_docid].strip() != "" and collection[unjudged_docid].strip() != "":
out_file_ids.write(str(topicid)+"\t"+positive_docid+"\t"+unjudged_docid+"\n")
out_file_text.write(queries[topicid]+"\t"+collection[positive_docid]+"\t"+collection[unjudged_docid]+"\n")
for key, val in stats.items():
print(f"{key}\t{val}") | [
"random.choice",
"random.shuffle",
"argparse.ArgumentParser",
"tqdm.tqdm",
"random.seed",
"os.getcwd",
"collections.defaultdict",
"matchmaker.dataloaders.bling_fire_tokenizer.BlingFireTokenizer"
]
| [((97, 112), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (108, 112), False, 'import random\n'), ((386, 411), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (409, 411), False, 'import argparse\n'), ((1503, 1523), 'matchmaker.dataloaders.bling_fire_tokenizer.BlingFireTokenizer', 'BlingFireTokenizer', ([], {}), '()\n', (1521, 1523), False, 'from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer\n'), ((2212, 2228), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2223, 2228), False, 'from collections import defaultdict\n'), ((4049, 4072), 'random.shuffle', 'random.shuffle', (['triples'], {}), '(triples)\n', (4063, 4072), False, 'import random\n'), ((189, 200), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (198, 200), False, 'import os\n'), ((1612, 1633), 'tqdm.tqdm', 'tqdm', (['collection_file'], {}), '(collection_file)\n', (1616, 1633), False, 'from tqdm import tqdm\n'), ((1966, 1982), 'tqdm.tqdm', 'tqdm', (['query_file'], {}), '(query_file)\n', (1970, 1982), False, 'from tqdm import tqdm\n'), ((2317, 2337), 'tqdm.tqdm', 'tqdm', (['candidate_file'], {}), '(candidate_file)\n', (2321, 2337), False, 'from tqdm import tqdm\n'), ((3276, 3305), 'random.choice', 'random.choice', (['qrels[topicid]'], {}), '(qrels[topicid])\n', (3289, 3305), False, 'import random\n')] |
import matplotlib.pyplot as plt
import numpy as np
from photonpy.cpp.context import Context
import photonpy.cpp.gaussian as gaussian
from photonpy.smlm.util import imshow_hstack
from photonpy.cpp.estimator import Estimator
def CheckDeriv(psf:Estimator, theta):
nderiv,ev=psf.NumDeriv(theta,eps=1e-6)
deriv,ev=psf.Derivatives(theta)
maxerr = np.max( np.abs(deriv-nderiv), (-1,-2) )
print(f"PSF {psf.ParamFormat()}, max {np.max(deriv)}, min: {np.min(deriv)}: Deriv-NumDeriv: {maxerr}")
plt.figure()
imshow_hstack(deriv[0] - nderiv[0])
with Context() as ctx:
g = gaussian.Gaussian(ctx)
for cuda in [False]:
print(f"CUDA = {cuda}")
sigma=2
roisize=12
psf = g.CreatePSF_XYIBg(roisize, sigma, cuda)
theta = [[4, 4, 1000, 3]]
img = psf.ExpectedValue(theta)
plt.figure()
plt.set_cmap('inferno')
smp = np.random.poisson(img)
plt.imshow(smp[0])
psf_sigma = g.CreatePSF_XYIBgSigma(roisize, sigma, cuda)
theta_s = [[4,4,1000,3,sigma]]
img2 = psf_sigma.ExpectedValue(theta_s)
CheckDeriv(psf, theta)
# CheckDeriv(psf_sigma)
print(f"PSF Sigma crlb: {psf_sigma.CRLB(theta_s)}")
theta = psf_sigma.Estimate(smp)[0]
print(theta)
| [
"matplotlib.pyplot.imshow",
"photonpy.cpp.context.Context",
"numpy.abs",
"photonpy.cpp.gaussian.Gaussian",
"numpy.random.poisson",
"photonpy.smlm.util.imshow_hstack",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.set_cmap"
]
| [((508, 520), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (518, 520), True, 'import matplotlib.pyplot as plt\n'), ((525, 560), 'photonpy.smlm.util.imshow_hstack', 'imshow_hstack', (['(deriv[0] - nderiv[0])'], {}), '(deriv[0] - nderiv[0])\n', (538, 560), False, 'from photonpy.smlm.util import imshow_hstack\n'), ((568, 577), 'photonpy.cpp.context.Context', 'Context', ([], {}), '()\n', (575, 577), False, 'from photonpy.cpp.context import Context\n'), ((594, 616), 'photonpy.cpp.gaussian.Gaussian', 'gaussian.Gaussian', (['ctx'], {}), '(ctx)\n', (611, 616), True, 'import photonpy.cpp.gaussian as gaussian\n'), ((364, 386), 'numpy.abs', 'np.abs', (['(deriv - nderiv)'], {}), '(deriv - nderiv)\n', (370, 386), True, 'import numpy as np\n'), ((855, 867), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (865, 867), True, 'import matplotlib.pyplot as plt\n'), ((876, 899), 'matplotlib.pyplot.set_cmap', 'plt.set_cmap', (['"""inferno"""'], {}), "('inferno')\n", (888, 899), True, 'import matplotlib.pyplot as plt\n'), ((915, 937), 'numpy.random.poisson', 'np.random.poisson', (['img'], {}), '(img)\n', (932, 937), True, 'import numpy as np\n'), ((946, 964), 'matplotlib.pyplot.imshow', 'plt.imshow', (['smp[0]'], {}), '(smp[0])\n', (956, 964), True, 'import matplotlib.pyplot as plt\n'), ((438, 451), 'numpy.max', 'np.max', (['deriv'], {}), '(deriv)\n', (444, 451), True, 'import numpy as np\n'), ((460, 473), 'numpy.min', 'np.min', (['deriv'], {}), '(deriv)\n', (466, 473), True, 'import numpy as np\n')] |
# -*- coding: utf-8
"""Module for testing helper functions.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location
tests/tools_tests/helpers_tests.py
SPDX-License-Identifier: MIT
"""
from nose.tools import eq_
from tespy.tools.helpers import newton
def func(params, x):
return x ** 2 + x - 20
def deriv(params, x):
return 2 * x + 1
def test_newton_bounds():
"""
Test newton algorithm value limit handling.
Try to calculate a zero crossing of a quadratic function in three
tries.
- zero crossing within limits, starting value near 4
- zero crossing within limits, starting value near -5
- zero crossing below minimum
- zero crossing above maximum
The function is x^2 + x - 20, there crossings are -5 and 4.
"""
result = newton(func, deriv, [], 0, valmin=-10, valmax=10, val0=0)
msg = ('The newton algorithm should find the zero crossing at 4.0. ' +
str(round(result, 1)) + ' was found instead.')
eq_(4.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-10, valmax=10, val0=-10)
msg = ('The newton algorithm should find the zero crossing at -5.0. ' +
str(round(result, 1)) + ' was found instead.')
eq_(-5.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-4, valmax=-2, val0=-3)
msg = ('The newton algorithm should not be able to find a zero crossing. '
'The value ' + str(round(result, 1)) + ' was found, but the '
'algorithm should have found the lower boundary of -4.0.')
eq_(-4.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-20, valmax=-10, val0=-10)
msg = ('The newton algorithm should not be able to find a zero crossing. '
'The value ' + str(round(result, 1)) + ' was found, but the '
'algorithm should have found the upper boundary of -10.0.')
eq_(-10.0, result, msg)
| [
"tespy.tools.helpers.newton",
"nose.tools.eq_"
]
| [((919, 976), 'tespy.tools.helpers.newton', 'newton', (['func', 'deriv', '[]', '(0)'], {'valmin': '(-10)', 'valmax': '(10)', 'val0': '(0)'}), '(func, deriv, [], 0, valmin=-10, valmax=10, val0=0)\n', (925, 976), False, 'from tespy.tools.helpers import newton\n'), ((1114, 1135), 'nose.tools.eq_', 'eq_', (['(4.0)', 'result', 'msg'], {}), '(4.0, result, msg)\n', (1117, 1135), False, 'from nose.tools import eq_\n'), ((1150, 1209), 'tespy.tools.helpers.newton', 'newton', (['func', 'deriv', '[]', '(0)'], {'valmin': '(-10)', 'valmax': '(10)', 'val0': '(-10)'}), '(func, deriv, [], 0, valmin=-10, valmax=10, val0=-10)\n', (1156, 1209), False, 'from tespy.tools.helpers import newton\n'), ((1348, 1370), 'nose.tools.eq_', 'eq_', (['(-5.0)', 'result', 'msg'], {}), '(-5.0, result, msg)\n', (1351, 1370), False, 'from nose.tools import eq_\n'), ((1385, 1442), 'tespy.tools.helpers.newton', 'newton', (['func', 'deriv', '[]', '(0)'], {'valmin': '(-4)', 'valmax': '(-2)', 'val0': '(-3)'}), '(func, deriv, [], 0, valmin=-4, valmax=-2, val0=-3)\n', (1391, 1442), False, 'from tespy.tools.helpers import newton\n'), ((1669, 1691), 'nose.tools.eq_', 'eq_', (['(-4.0)', 'result', 'msg'], {}), '(-4.0, result, msg)\n', (1672, 1691), False, 'from nose.tools import eq_\n'), ((1706, 1766), 'tespy.tools.helpers.newton', 'newton', (['func', 'deriv', '[]', '(0)'], {'valmin': '(-20)', 'valmax': '(-10)', 'val0': '(-10)'}), '(func, deriv, [], 0, valmin=-20, valmax=-10, val0=-10)\n', (1712, 1766), False, 'from tespy.tools.helpers import newton\n'), ((1994, 2017), 'nose.tools.eq_', 'eq_', (['(-10.0)', 'result', 'msg'], {}), '(-10.0, result, msg)\n', (1997, 2017), False, 'from nose.tools import eq_\n')] |
"""
Dynamo Utils
============
All utility functions for interactions with DynamoDB
Functions
- ensure_json
- create_user_table
- create_or_update_record
- list_tables
- list_records
- get_record
- delete_table
- delete_record
- check_active
"""
import boto3
from decimal import Decimal
from constants import AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, DYNAMO_URL
ddb = boto3.resource(
'dynamodb',
aws_access_key_id = AWS_ACCESS_KEY,
aws_secret_access_key = AWS_SECRET_KEY,
endpoint_url = DYNAMO_URL,
region_name = AWS_REGION
)
client = boto3.client(
'dynamodb',
aws_access_key_id = AWS_ACCESS_KEY,
aws_secret_access_key = AWS_SECRET_KEY,
endpoint_url = DYNAMO_URL,
region_name = AWS_REGION
)
def ensure_json(obj):
"""
Function to ensure that a python object is JSON serializable
Params:
obj::dict|[dict]
Object to be JSON serializable
Returns:
obj::dict|[dict]
Returns the JSON serializable object
"""
if isinstance(obj, list):
for i in range(len(obj)):
obj[i] = ensure_json(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.keys():
obj[k] = ensure_json(obj[k])
return obj
elif isinstance(obj, Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def create_user_table():
"""
Function to create the "users" table in DynamoDB
Returns:
bool
If the table was created or not
"""
try:
table = ddb.create_table(
TableName = "users",
KeySchema = [
{
"AttributeName": "username",
"KeyType": "HASH" # Partition key
},
{
"AttributeName": "index",
"KeyType": "RANGE" # Sort key
}
],
AttributeDefinitions = [
{
"AttributeName": "username",
"AttributeType": "S"
},
{
"AttributeName": "index",
"AttributeType": "S"
}
],
ProvisionedThroughput = {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_user_table\n{}".format(e))
return None
def create_train_table():
"""
Function to create the "trains" table in DynamoDB
Returns:
bool
If the table was created or not
"""
try:
table = ddb.create_table(
TableName = "trains",
KeySchema = [
{
"AttributeName": "train_name",
"KeyType": "HASH" # Partition key
},
{
"AttributeName": "train_type",
"KeyType": "RANGE" # Sort key
}
],
AttributeDefinitions = [
{
"AttributeName": "train_name",
"AttributeType": "N"
},
{
"AttributeName": "train_type",
"AttributeType": "S"
}
],
ProvisionedThroughput = {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_user_table\n{}".format(e))
return None
def create_or_update_record(tableName, record):
"""
Function to create or update a record in DynamoDB
Params:
tableName::str
The table name to get the record
record::dict
The object to store
Returns:
bool
If the record was inserted or not
"""
if not tableName or not record:
return False
if not {'username', 'index'}.issubset(record):
return False
try:
res = ddb.Table(tableName).get_item(
Key = {
"username": record['username'],
"index": record['index']
}
)
record = { **res['Item'], **record } if 'Item' in res else record
ddb.Table(tableName).put_item(
Item = record
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_or_update_record\n{}".format(e))
return None
def list_tables():
"""
Function to list all tables in DynamoDB
Returns:
tables::[str]
The list of tables
"""
try:
return client.list_tables()['TableNames']
except client.exceptions.ResourceNotFoundException:
print("Tables do not exist")
return False
except Exception as e:
print("Exception @ list_tables\n{}".format(e))
return None
def list_records(tableName):
"""
Function to list all records from a DynamoDB table
Params:
tableName::str
The table name to get the records
Returns:
records::[dict]
The list of records stored in the table
"""
if not tableName:
return False
try:
table = ddb.Table(tableName)
res = table.scan()
docs = ensure_json(res['Items'])
while 'LastEvaluatedKey' in res:
res = table.scan(ExclusiveStartKey = res['LastEvaluatedKey'])
docs.extend(ensure_json(res['Items']))
return docs
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ list_records\n{}".format(e))
return None
def get_record(tableName, query):
"""
Function to retrieve one record from DynamoDB table
Params:
tableName::str
The table name to get the record
query::dict
The query to fetch the record
Returns:
doc::dict
The record retrieved from the table
"""
if not tableName or not query or not isinstance(query, dict):
return False
try:
res = ddb.Table(tableName).get_item(
Key = query
)
doc = ensure_json(res['Item']) if 'Item' in res else None
return doc
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ get_record\n{}".format(e))
return None
def delete_table(tableName):
"""
Function to delete a DynamoDB table
Params:
tableName::str
The table name to delete
Returns:
bool
If the table was deleted or not
"""
if not tableName:
return False
try:
ddb.Table(tableName).delete()
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ delete_table\n{}".format(e))
return None
def delete_record(tableName, query):
"""
Function to delete a DynamoDB table
Params:
tableName::str
The table name to get the record
query::dict
The query to fetch the record
Returns:
bool
If the record was deleted or not
"""
if not tableName or not key or not val:
return False
try:
res = ddb.Table(tableName).delete_item(
Key = query
)
print(res)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ delete_record\n{}".format(e))
return None
def check_active(tableName):
"""
Function to check if a table is ACTIVE
Params:
tableName::str
The table name to check
Returns:
bool
If the table is active or not
"""
if not tableName:
return False
try:
if ddb.Table(tableName).table_status == "ACTIVE":
return True
return False
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ check_status\n{}".format(e))
return None | [
"boto3.resource",
"boto3.client"
]
| [((409, 564), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'aws_access_key_id': 'AWS_ACCESS_KEY', 'aws_secret_access_key': 'AWS_SECRET_KEY', 'endpoint_url': 'DYNAMO_URL', 'region_name': 'AWS_REGION'}), "('dynamodb', aws_access_key_id=AWS_ACCESS_KEY,\n aws_secret_access_key=AWS_SECRET_KEY, endpoint_url=DYNAMO_URL,\n region_name=AWS_REGION)\n", (423, 564), False, 'import boto3\n'), ((596, 749), 'boto3.client', 'boto3.client', (['"""dynamodb"""'], {'aws_access_key_id': 'AWS_ACCESS_KEY', 'aws_secret_access_key': 'AWS_SECRET_KEY', 'endpoint_url': 'DYNAMO_URL', 'region_name': 'AWS_REGION'}), "('dynamodb', aws_access_key_id=AWS_ACCESS_KEY,\n aws_secret_access_key=AWS_SECRET_KEY, endpoint_url=DYNAMO_URL,\n region_name=AWS_REGION)\n", (608, 749), False, 'import boto3\n')] |
# -*- coding: utf-8 -*-
import unittest
from uuid import uuid4
from copy import deepcopy
from openprocurement.api.models import get_now
from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX
try:
import openprocurement.auctions.core as auctions_core
except ImportError:
auctions_core = None
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionResourceTest(AuctionBaseWebTest):
def test_empty_listing(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/auctions?opt_jsonp=callback')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions?opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions?opt_jsonp=callback&opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions?offset=2015-01-01T00:00:00+02:00&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
response = self.app.get('/auctions?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/auctions?feed=changes&offset=0', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Offset expired/invalid', u'location': u'params', u'name': u'offset'}
])
response = self.app.get('/auctions?feed=changes&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
def test_listing(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
for i in range(3):
offset = get_now().isoformat()
auctions.append(self.create_auction())
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
while True:
response = self.app.get('/auctions?offset={}'.format(offset))
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/auctions', params=[('opt_fields', 'status,enquiryPeriod')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'enquiryPeriod']))
self.assertIn('opt_fields=status%2CenquiryPeriod', response.json['next_page']['uri'])
response = self.app.get('/auctions?descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions], reverse=True))
response = self.app.get('/auctions?descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_auction_data2 = test_auction_data.copy()
test_auction_data2['mode'] = 'test'
self.create_auction(test_auction_data2)
while True:
response = self.app.get('/auctions?mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_changes(self):
response = self.app.get('/auctions?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
for i in range(3):
auctions.append(self.create_auction())
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions?feed=changes')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
response = self.app.get('/auctions?feed=changes&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions?feed=changes', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/auctions?feed=changes', params=[('opt_fields', 'status,enquiryPeriod')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'enquiryPeriod']))
self.assertIn('opt_fields=status%2CenquiryPeriod', response.json['next_page']['uri'])
response = self.app.get('/auctions?feed=changes&descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions], reverse=True))
response = self.app.get('/auctions?feed=changes&descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_auction_data2 = test_auction_data.copy()
test_auction_data2['mode'] = 'test'
self.create_auction(test_auction_data2)
while True:
response = self.app.get('/auctions?feed=changes&mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?feed=changes&mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_draft(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
data = test_auction_data.copy()
data.update({'status': 'draft'})
for i in range(3):
auctions.append(self.create_auction(data))
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
def test_get_auction(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], auction)
response = self.app.get('/auctions/{}?opt_jsonp=callback'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}?opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_auction_not_found(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'}
])
response = self.app.patch_json(
'/auctions/some_id', {'data': {}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'}
])
# put custom document object into database to check auction construction on non-Auction data
data = {'contract': 'test', '_id': uuid4().hex}
self.db.save(data)
response = self.app.get('/auctions/{}'.format(data['_id']), status=404)
self.assertEqual(response.status, '404 Not Found')
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionAwardResourceTest(AuctionBaseWebTest):
def test_listing(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], auction['awards'])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_jsonp=callback'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_jsonp=callback&opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
def test_listing_changes(self):
auction = self.create_auction()
data = self.db[auction['id']]
awards = data['awards']
for i in range(3):
award = deepcopy(test_award)
award['date'] = get_now().isoformat()
award['id'] = uuid4().hex
awards.append(award)
self.db.save(data)
ids = ','.join([i['id'] for i in awards])
response = self.app.get('/auctions/{}/awards'.format(auction['id']))
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), len(awards))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in awards]))
self.assertEqual(set([i['date'] for i in response.json['data']]), set([i['date'] for i in awards]))
self.assertEqual([i['date'] for i in response.json['data']], sorted([i['date'] for i in awards]))
def test_get_award(self):
auction = self.create_auction()
award = auction['awards'][0]
response = self.app.get('/auctions/{}/awards/{}'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], award)
response = self.app.get('/auctions/{}/awards/{}?opt_jsonp=callback'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}/awards/{}?opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_award_not_found(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards/some_id'.format(auction['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'award_id'}
])
def test_get_document_with_versions(self):
auction = self.create_auction()
data = self.db[auction['id']]
documents = data['documents']
for i in range(3):
document = deepcopy(test_document)
document['id'] = data['documents'][0]['id']
document['url'] += str(i)
document['dateModified'] = get_now().isoformat()
documents.append(document)
self.db.save(data)
versions = [{'dateModified': i['dateModified'], 'url': i['url']} for i in documents[:-1]]
response = self.app.get('/auctions/{}/documents/{}'.format(auction['id'], document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['previousVersions']), len(versions))
self.assertEqual(response.json['data']['previousVersions'], versions)
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionAwardDocumentResourceTest(AuctionBaseWebTest):
def test_listing(self):
auction = self.create_auction()
award = auction['awards'][0]
document = award['documents'][0]
response = self.app.get('/auctions/{}/awards/{}/documents'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], award['documents'])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_jsonp=callback'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_jsonp=callback&opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
def test_listing_changes(self):
auction = self.create_auction()
data = self.db[auction['id']]
award = data['awards'][0]
award_documents = award['documents']
for i in range(3):
document = deepcopy(test_document)
document['dateModified'] = get_now().isoformat()
document['id'] = uuid4().hex
award_documents.append(document)
self.db.save(data)
ids = ','.join([i['id'] for i in award_documents])
response = self.app.get('/auctions/{}/awards/{}/documents'.format(auction['id'], award['id']))
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), len(award_documents))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in award_documents]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in award_documents]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in award_documents]))
def test_get_award_document(self):
auction = self.create_auction()
award = auction['awards'][0]
award_document = award['documents'][0]
response = self.app.get('/auctions/{}/awards/{}/documents/{}'.format(auction['id'], award['id'], award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], award_document)
response = self.app.get('/auctions/{}/awards/{}/documents/{}?opt_jsonp=callback'.format(auction['id'], award['id'],award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents/{}?opt_pretty=1'.format(auction['id'], award['id'], award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_award_document_not_found(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards/{}/documents/some_id'.format(auction['id'], auction['awards'][0]['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AuctionResourceTest))
suite.addTest(unittest.makeSuite(AuctionAwardResourceTest))
suite.addTest(unittest.makeSuite(AuctionAwardDocumentResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| [
"unittest.TestSuite",
"openprocurement.edge.tests.base.test_auction_data.copy",
"unittest.makeSuite",
"unittest.skipUnless",
"uuid.uuid4",
"copy.deepcopy",
"unittest.main",
"openprocurement.api.models.get_now"
]
| [((371, 434), 'unittest.skipUnless', 'unittest.skipUnless', (['auctions_core', '"""Auctions is not reachable"""'], {}), "(auctions_core, 'Auctions is not reachable')\n", (390, 434), False, 'import unittest\n'), ((16751, 16814), 'unittest.skipUnless', 'unittest.skipUnless', (['auctions_core', '"""Auctions is not reachable"""'], {}), "(auctions_core, 'Auctions is not reachable')\n", (16770, 16814), False, 'import unittest\n'), ((21745, 21808), 'unittest.skipUnless', 'unittest.skipUnless', (['auctions_core', '"""Auctions is not reachable"""'], {}), "(auctions_core, 'Auctions is not reachable')\n", (21764, 21808), False, 'import unittest\n'), ((26413, 26433), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (26431, 26433), False, 'import unittest\n'), ((26679, 26713), 'unittest.main', 'unittest.main', ([], {'defaultTest': '"""suite"""'}), "(defaultTest='suite')\n", (26692, 26713), False, 'import unittest\n'), ((8064, 8088), 'openprocurement.edge.tests.base.test_auction_data.copy', 'test_auction_data.copy', ([], {}), '()\n', (8086, 8088), False, 'from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX\n'), ((12783, 12807), 'openprocurement.edge.tests.base.test_auction_data.copy', 'test_auction_data.copy', ([], {}), '()\n', (12805, 12807), False, 'from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX\n'), ((13579, 13603), 'openprocurement.edge.tests.base.test_auction_data.copy', 'test_auction_data.copy', ([], {}), '()\n', (13601, 13603), False, 'from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX\n'), ((26452, 26491), 'unittest.makeSuite', 'unittest.makeSuite', (['AuctionResourceTest'], {}), '(AuctionResourceTest)\n', (26470, 26491), False, 'import unittest\n'), ((26511, 26555), 'unittest.makeSuite', 'unittest.makeSuite', (['AuctionAwardResourceTest'], {}), '(AuctionAwardResourceTest)\n', (26529, 26555), False, 'import unittest\n'), ((26575, 26627), 'unittest.makeSuite', 'unittest.makeSuite', (['AuctionAwardDocumentResourceTest'], {}), '(AuctionAwardDocumentResourceTest)\n', (26593, 26627), False, 'import unittest\n'), ((18481, 18501), 'copy.deepcopy', 'deepcopy', (['test_award'], {}), '(test_award)\n', (18489, 18501), False, 'from copy import deepcopy\n'), ((21012, 21035), 'copy.deepcopy', 'deepcopy', (['test_document'], {}), '(test_document)\n', (21020, 21035), False, 'from copy import deepcopy\n'), ((23716, 23739), 'copy.deepcopy', 'deepcopy', (['test_document'], {}), '(test_document)\n', (23724, 23739), False, 'from copy import deepcopy\n'), ((16568, 16575), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (16573, 16575), False, 'from uuid import uuid4\n'), ((18578, 18585), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18583, 18585), False, 'from uuid import uuid4\n'), ((23830, 23837), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (23835, 23837), False, 'from uuid import uuid4\n'), ((3974, 3983), 'openprocurement.api.models.get_now', 'get_now', ([], {}), '()\n', (3981, 3983), False, 'from openprocurement.api.models import get_now\n'), ((18530, 18539), 'openprocurement.api.models.get_now', 'get_now', ([], {}), '()\n', (18537, 18539), False, 'from openprocurement.api.models import get_now\n'), ((21169, 21178), 'openprocurement.api.models.get_now', 'get_now', ([], {}), '()\n', (21176, 21178), False, 'from openprocurement.api.models import get_now\n'), ((23779, 23788), 'openprocurement.api.models.get_now', 'get_now', ([], {}), '()\n', (23786, 23788), False, 'from openprocurement.api.models import get_now\n')] |
#!/usr/bin/env python3
import os
import sys
import logging
import subprocess
logging.basicConfig(level=logging.INFO)
root_dir = 'submitted_data'
submitted_file_split = set()
for dir_, _, files in os.walk(root_dir):
for file_name in files:
rel_dir = os.path.relpath(dir_, root_dir)
rel_file = os.path.join(root_dir, rel_dir, file_name)
submitted_file_split.add(rel_file)
for submitted_file in submitted_file_split:
if submitted_file.startswith('submitted_data'):
dir_name, data_type, file_name = submitted_file.split('/')
out_dir_name = 'processed_data'
if not os.path.isdir(out_dir_name):
os.makedirs(out_dir_name, exist_ok=True)
if not os.path.isdir(out_dir_name + '/' + data_type):
os.makedirs(out_dir_name + '/' + data_type, exist_ok=True)
outfile = submitted_file.replace(dir_name, out_dir_name)
if not os.path.isfile(outfile):
if not data_type == 'tad':
from cimr.processor.utils import Infiler
infile = Infiler(
data_type,
submitted_file,
genome_build='b38',
update_rsid=False,
outfile=str(outfile),
chunksize=700000
)
infile.read_file()
if data_type == 'eqtl':
from cimr.processor.query import Querier
genes = list(infile.list_genes())
queried = Querier(genes)
queried.form_query()
else:
logging.info(f' processed file already exists for {submitted_file}')
logging.info(f' if reprocessing, delete {outfile} and file a new pull request')
| [
"logging.basicConfig",
"os.makedirs",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"cimr.processor.query.Querier",
"logging.info",
"os.walk",
"os.path.relpath"
]
| [((80, 119), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (99, 119), False, 'import logging\n'), ((202, 219), 'os.walk', 'os.walk', (['root_dir'], {}), '(root_dir)\n', (209, 219), False, 'import os\n'), ((267, 298), 'os.path.relpath', 'os.path.relpath', (['dir_', 'root_dir'], {}), '(dir_, root_dir)\n', (282, 298), False, 'import os\n'), ((318, 360), 'os.path.join', 'os.path.join', (['root_dir', 'rel_dir', 'file_name'], {}), '(root_dir, rel_dir, file_name)\n', (330, 360), False, 'import os\n'), ((626, 653), 'os.path.isdir', 'os.path.isdir', (['out_dir_name'], {}), '(out_dir_name)\n', (639, 653), False, 'import os\n'), ((667, 707), 'os.makedirs', 'os.makedirs', (['out_dir_name'], {'exist_ok': '(True)'}), '(out_dir_name, exist_ok=True)\n', (678, 707), False, 'import os\n'), ((723, 768), 'os.path.isdir', 'os.path.isdir', (["(out_dir_name + '/' + data_type)"], {}), "(out_dir_name + '/' + data_type)\n", (736, 768), False, 'import os\n'), ((782, 840), 'os.makedirs', 'os.makedirs', (["(out_dir_name + '/' + data_type)"], {'exist_ok': '(True)'}), "(out_dir_name + '/' + data_type, exist_ok=True)\n", (793, 840), False, 'import os\n'), ((923, 946), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (937, 946), False, 'import os\n'), ((1637, 1705), 'logging.info', 'logging.info', (['f""" processed file already exists for {submitted_file}"""'], {}), "(f' processed file already exists for {submitted_file}')\n", (1649, 1705), False, 'import logging\n'), ((1722, 1801), 'logging.info', 'logging.info', (['f""" if reprocessing, delete {outfile} and file a new pull request"""'], {}), "(f' if reprocessing, delete {outfile} and file a new pull request')\n", (1734, 1801), False, 'import logging\n'), ((1546, 1560), 'cimr.processor.query.Querier', 'Querier', (['genes'], {}), '(genes)\n', (1553, 1560), False, 'from cimr.processor.query import Querier\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 29 13:38:35 2021
GROUND INVASION! The Game
@author: <NAME> (<EMAIL>)
"""
# Packages used:
import numpy as np
import pandas as pd
import random as rng
from termcolor import colored
# Defining starting forces
## Defenders:
def_force = 1250
def_reserves = 400
defenders = def_force + def_reserves
def_strength = def_force
def_guard = def_force
## Attackers:
att_force = 900
att_reserves = 1000
attackers = att_force + att_reserves
att_strength = att_force
att_guard = att_force
# Defining strategies:
## Defenders:
def_strat = ["draft", "turtle"]
### Draft
def draft(def_force, def_reserves):
global def_pair
global def_strength
global def_guard
# Defender Strategy Information
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("You hear news that a draft decree was issued...")
print("Intelligence suggests that there will be more enemy combatants.")
print("You expect the drafted soldiers to have decreased combat effectiveness.")
# Defender Strategy Effects
if def_reserves >= 100:
def_danger = def_force + 100
def_safe = def_reserves - 100
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
else:
def_danger = def_force + def_reserves
def_safe = 0
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
def_power = def_danger * 0.980
def_protection = def_danger * 0.95
def_deployment = [def_danger, def_safe, def_power, def_protection]
return(def_deployment)
### Turtle
def turtle(def_force, def_reserves):
global def_pair
global def_strength
global def_guard
# Defender Strategy Information
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("The defenders appear to bolster their defenses in preparation.")
print("Intelligence suggests that their defenses will be difficult to penetrate.")
print("It is likely that the defenders will try to keep soldiers out of harm's way.")
# Defender Strategy Effects
if def_force > 1100:
def_danger = def_force
def_safe = def_reserves + (def_danger - 1100)
def_danger = 1100
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
else:
def_danger = def_force
def_safe = def_reserves
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
def_power = def_danger * 0.975
def_protection = def_danger * 1.15
def_deployment = [def_danger, def_safe, def_power, def_protection]
return(def_deployment)
## Attackers:
att_strat = ["blitz", "guerilla"]
### Blitz
def blitz(att_force, att_reserves):
global att_pair
global att_strength
global att_guard
# Attacker Strategy Information
print(colored("########## OFFICERS' REPORTS #########", on_color = "on_cyan"))
print("Your officers grimly accept your orders...")
print("There is an air of apprehension as the troops prepare to deploy.")
print("While offensive effectiveness will improve, heavier losses are expected.")
# Attacker Strategy Effects
if att_reserves >= 200:
att_danger = att_force + 200
att_safe = att_reserves - 200
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_safe)
else:
att_danger = att_force + att_reserves
att_safe = 0
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_reserves)
att_power = att_danger * 1.10
att_protection = att_danger * 0.90
att_deployment = [att_danger, att_safe, att_power, att_protection]
return(att_deployment)
### Guerilla
def guerilla(att_force, att_reserves):
global att_pair
global att_strength
global att_guard
# Attacker Strategy Information
print(colored("########## OFFICERS' REPORTS #########", on_color = "on_cyan"))
print("Your officers immediately begin plans to target strategic weak points.")
print("Soldiers move out in small forces and keep the enemy guessing.")
print("While not as effective offensively, troop survival rates should be higher.")
# Attacker Strategy Effects
if att_force > 750:
att_danger = att_force
att_safe = att_reserves + (att_force - 750)
att_danger = 750
else:
att_danger = att_force
att_safe = att_reserves
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_safe)
att_power = att_danger * 0.95
att_protection = att_danger * 1.25
att_deployment = [att_danger, att_safe, att_power, att_protection]
return(att_deployment)
# Ground Battle Event (Player == Attacker)
wave = 0
player = input("Attacker or Defender? [A/D]:")
while (attackers > 0) and (defenders > 0):
# Wave Information
wave = wave + 1
if wave == 1:
print("############################################################")
print("PREPARE FOR BATTLE! THE FIRST WAVE OF THE BATTLE BEGINS NOW.")
print("############################################################")
else:
print("########## WAVE:", wave, "##########")
print("#############################")
print("Defending force strength:", def_force)
print("Defending forces in reserve:", def_reserves)
print("Attacking force strength:", att_force)
print("Attacking forces in reserve:", att_reserves)
if player =="A":
# Active Player (Attacker)
att_strat_chosen = input(colored("How should we proceed, commander? [blitz/guerilla]:", "yellow"))
elif player == "D":
# CPU Attacker
att_strat_chosen = rng.choice(att_strat)
# Defender Setup
if player == "A":
# CPU Defender
if def_reserves > 0:
def_strat = ["none",
"draft", "draft", "draft", "draft", "draft", "draft",
"turtle", "turtle", "turtle"]
def_strat_chosen = rng.choice(def_strat)
else:
def_strat = ["none", "none",
"turtle", "turtle", "turtle" ,"turtle", "turtle", "turtle", "turtle", "turtle"]
def_strat_chosen = rng.choice(def_strat)
elif player == "D":
# Active Player (defender)
def_strat_chosen = input(colored("How should we proceed, commander? [draft/turtle]:", "yellow"))
if def_strat_chosen == "draft":
draft_results = draft(def_force, def_reserves)
def_force = draft_results[0]
def_reserves = draft_results[1]
def_strength = draft_results[2]
def_guard = draft_results[3]
elif def_strat_chosen == "turtle":
turtle_results = turtle(def_force, def_reserves)
def_force = turtle_results[0]
def_reserves = turtle_results[1]
def_strength = turtle_results[2]
def_guard = turtle_results[3]
elif def_strat_chosen == "none":
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("It appears that the enemy will employ standard tactics...")
def_force = def_force
def_reserves = def_reserves
def_strength = def_force
def_guard = def_force
print("Defending force strength:", def_force)
print("Forces kept in reserve:", def_reserves)
# Attacker Setup
if att_strat_chosen == "blitz":
blitz_results = blitz(att_force, att_reserves)
att_force = blitz_results[0]
att_reserves = blitz_results[1]
att_strength = blitz_results[2]
att_guard = blitz_results[3]
elif att_strat_chosen == "guerilla":
guerilla_results = guerilla(att_force, att_reserves)
att_force = guerilla_results[0]
att_reserves = guerilla_results[1]
att_strength = guerilla_results[2]
att_guard = guerilla_results[3]
# Combat
# Attacker damage
def_guard = np.random.normal(def_guard, def_guard/10) * 0.50
att_strength = att_strength - def_guard
if att_strength < 0:
att_strength = 0
def_force = def_force - np.random.normal(att_strength, att_strength/10)//2 - (0.1*att_strength)//1
if def_force < 0:
def_force = 0
# Defender damage
att_guard = np.random.normal(att_guard, att_guard/10) * 0.50 - 0.1
def_strength = def_strength - att_guard
if def_strength < 0:
def_strength = 0
att_force = att_force - np.random.normal(def_strength, def_strength/10)//2 - (0.1*def_strength)//1
if att_force < 0:
att_force = 0
# Post-wave results:
print(colored("########## POST-WAVE RESULTS ##########", on_color = "on_cyan"))
print(colored("Defenders:", on_color = "on_blue"))
print("Surviving defensive forces:", def_force)
print("Defenseive forces kept in reserve:", def_reserves)
print("Defender strength estimate:", def_strength)
print("Defender guard estimate:", def_guard)
print(colored("Attackers:", on_color = "on_red"))
print("Surviving attacker forces:", att_force)
print("Attacker forces kept in reserve:", att_reserves)
print("Attacker strength estimate:", att_strength)
print("Attacker guard estimate:", att_guard)
# Reset allocations
# Defender reallocations:
def_reserves = def_reserves + def_force
def_force = 0
if def_reserves >= 1250:
def_reserves = def_reserves - 1250
def_force = 1250
def_guard = def_force
else:
def_force = def_reserves
def_reserves = 0
def_guard = def_force
# Attacker reallocations:
att_reserves = att_reserves + att_force
att_force = 0
if att_reserves >= 900:
att_reserves = att_reserves - 900
att_force = 900
att_guard = att_force
else:
att_force = att_reserves
att_reserves = 0
att_guard = att_force
defenders = def_force + def_reserves
attackers = att_force + att_reserves
# End of wave conditionals
if (attackers > 0) and (defenders > 0) and (player == "A"):
fightflight = input(colored("Continue or retreat?: [continue/retreat]:", "yellow"))
if fightflight == "retreat":
print(colored("########## WITHDRAWAL ##########", on_color = "on_blue"))
print("You choose to withdraw your troops...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
break
else:
print("The battle will continue next turn...")
elif attackers <= 0 and player == "A":
print(colored("########## FAILURE! ##########", on_color = "on_red"))
print("Your assault has been repelled!")
print("You return home, wondering what punishment for your failure awaits...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
elif defenders <= 0 and player == "A":
print(colored("########## SUCCESS! ##########", on_color = "on_green"))
print("The defenders have been routed!")
print("You may now decide the fate of the defending population...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
elif (attackers > 0) and (defenders > 0) and (player == "D"):
fightflight = input(colored("Defend or retreat?: [defend/retreat]:", "yellow"))
if fightflight == "retreat":
print(colored("########## WITHDRAWAL ##########", on_color = "on_blue"))
print("You choose to withdraw your troops from the region...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1900 - defenders))
print("Survival rate:", (defenders)/1900)
print("Total assault waves:", wave)
break
else:
print("The battle will continue next turn...")
elif defenders <= 0 and player == "D":
print(colored("########## FAILURE! ##########", on_color = "on_red"))
print("Your defense has been broken!")
print("Enemy troops now occupy your lands and have claimed dominion...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1650 - defenders))
print("Survival rate:", (defenders)/1650)
print("Total assault waves:", wave)
elif attackers <= 0 and player == "D":
print(colored("########## SUCCESS! ##########", on_color = "on_green"))
print("The attackers have been repelled!")
print("The storm has passed, and your people live another day...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1650 - defenders))
print("Survival rate:", (defenders)/1650)
print("Total assault waves:", wave)
print("#############################")
| [
"numpy.random.normal",
"termcolor.colored",
"random.choice"
]
| [((799, 871), 'termcolor.colored', 'colored', (['"""########## INTELLIGENCE REPORT ##########"""'], {'on_color': '"""on_cyan"""'}), "('########## INTELLIGENCE REPORT ##########', on_color='on_cyan')\n", (806, 871), False, 'from termcolor import colored\n'), ((1906, 1978), 'termcolor.colored', 'colored', (['"""########## INTELLIGENCE REPORT ##########"""'], {'on_color': '"""on_cyan"""'}), "('########## INTELLIGENCE REPORT ##########', on_color='on_cyan')\n", (1913, 1978), False, 'from termcolor import colored\n'), ((3130, 3200), 'termcolor.colored', 'colored', (['"""########## OFFICERS\' REPORTS #########"""'], {'on_color': '"""on_cyan"""'}), '("########## OFFICERS\' REPORTS #########", on_color=\'on_cyan\')\n', (3137, 3200), False, 'from termcolor import colored\n'), ((4239, 4309), 'termcolor.colored', 'colored', (['"""########## OFFICERS\' REPORTS #########"""'], {'on_color': '"""on_cyan"""'}), '("########## OFFICERS\' REPORTS #########", on_color=\'on_cyan\')\n', (4246, 4309), False, 'from termcolor import colored\n'), ((8411, 8454), 'numpy.random.normal', 'np.random.normal', (['def_guard', '(def_guard / 10)'], {}), '(def_guard, def_guard / 10)\n', (8427, 8454), True, 'import numpy as np\n'), ((9087, 9157), 'termcolor.colored', 'colored', (['"""########## POST-WAVE RESULTS ##########"""'], {'on_color': '"""on_cyan"""'}), "('########## POST-WAVE RESULTS ##########', on_color='on_cyan')\n", (9094, 9157), False, 'from termcolor import colored\n'), ((9172, 9213), 'termcolor.colored', 'colored', (['"""Defenders:"""'], {'on_color': '"""on_blue"""'}), "('Defenders:', on_color='on_blue')\n", (9179, 9213), False, 'from termcolor import colored\n'), ((9450, 9490), 'termcolor.colored', 'colored', (['"""Attackers:"""'], {'on_color': '"""on_red"""'}), "('Attackers:', on_color='on_red')\n", (9457, 9490), False, 'from termcolor import colored\n'), ((5968, 6040), 'termcolor.colored', 'colored', (['"""How should we proceed, commander? [blitz/guerilla]:"""', '"""yellow"""'], {}), "('How should we proceed, commander? [blitz/guerilla]:', 'yellow')\n", (5975, 6040), False, 'from termcolor import colored\n'), ((6119, 6140), 'random.choice', 'rng.choice', (['att_strat'], {}), '(att_strat)\n', (6129, 6140), True, 'import random as rng\n'), ((6444, 6465), 'random.choice', 'rng.choice', (['def_strat'], {}), '(def_strat)\n', (6454, 6465), True, 'import random as rng\n'), ((6661, 6682), 'random.choice', 'rng.choice', (['def_strat'], {}), '(def_strat)\n', (6671, 6682), True, 'import random as rng\n'), ((8748, 8791), 'numpy.random.normal', 'np.random.normal', (['att_guard', '(att_guard / 10)'], {}), '(att_guard, att_guard / 10)\n', (8764, 8791), True, 'import numpy as np\n'), ((10601, 10663), 'termcolor.colored', 'colored', (['"""Continue or retreat?: [continue/retreat]:"""', '"""yellow"""'], {}), "('Continue or retreat?: [continue/retreat]:', 'yellow')\n", (10608, 10663), False, 'from termcolor import colored\n'), ((6778, 6848), 'termcolor.colored', 'colored', (['"""How should we proceed, commander? [draft/turtle]:"""', '"""yellow"""'], {}), "('How should we proceed, commander? [draft/turtle]:', 'yellow')\n", (6785, 6848), False, 'from termcolor import colored\n'), ((8586, 8635), 'numpy.random.normal', 'np.random.normal', (['att_strength', '(att_strength / 10)'], {}), '(att_strength, att_strength / 10)\n', (8602, 8635), True, 'import numpy as np\n'), ((8929, 8978), 'numpy.random.normal', 'np.random.normal', (['def_strength', '(def_strength / 10)'], {}), '(def_strength, def_strength / 10)\n', (8945, 8978), True, 'import numpy as np\n'), ((10722, 10785), 'termcolor.colored', 'colored', (['"""########## WITHDRAWAL ##########"""'], {'on_color': '"""on_blue"""'}), "('########## WITHDRAWAL ##########', on_color='on_blue')\n", (10729, 10785), False, 'from termcolor import colored\n'), ((10868, 10939), 'termcolor.colored', 'colored', (['"""######### INVASION STATISTICS ##########"""'], {'on_color': '"""on_cyan"""'}), "('######### INVASION STATISTICS ##########', on_color='on_cyan')\n", (10875, 10939), False, 'from termcolor import colored\n'), ((11307, 11367), 'termcolor.colored', 'colored', (['"""########## FAILURE! ##########"""'], {'on_color': '"""on_red"""'}), "('########## FAILURE! ##########', on_color='on_red')\n", (11314, 11367), False, 'from termcolor import colored\n'), ((11524, 11595), 'termcolor.colored', 'colored', (['"""######### INVASION STATISTICS ##########"""'], {'on_color': '"""on_cyan"""'}), "('######### INVASION STATISTICS ##########', on_color='on_cyan')\n", (11531, 11595), False, 'from termcolor import colored\n'), ((7414, 7486), 'termcolor.colored', 'colored', (['"""########## INTELLIGENCE REPORT ##########"""'], {'on_color': '"""on_cyan"""'}), "('########## INTELLIGENCE REPORT ##########', on_color='on_cyan')\n", (7421, 7486), False, 'from termcolor import colored\n'), ((11853, 11915), 'termcolor.colored', 'colored', (['"""########## SUCCESS! ##########"""'], {'on_color': '"""on_green"""'}), "('########## SUCCESS! ##########', on_color='on_green')\n", (11860, 11915), False, 'from termcolor import colored\n'), ((12061, 12132), 'termcolor.colored', 'colored', (['"""######### INVASION STATISTICS ##########"""'], {'on_color': '"""on_cyan"""'}), "('######### INVASION STATISTICS ##########', on_color='on_cyan')\n", (12068, 12132), False, 'from termcolor import colored\n'), ((12427, 12485), 'termcolor.colored', 'colored', (['"""Defend or retreat?: [defend/retreat]:"""', '"""yellow"""'], {}), "('Defend or retreat?: [defend/retreat]:', 'yellow')\n", (12434, 12485), False, 'from termcolor import colored\n'), ((12544, 12607), 'termcolor.colored', 'colored', (['"""########## WITHDRAWAL ##########"""'], {'on_color': '"""on_blue"""'}), "('########## WITHDRAWAL ##########', on_color='on_blue')\n", (12551, 12607), False, 'from termcolor import colored\n'), ((12706, 12777), 'termcolor.colored', 'colored', (['"""######### INVASION STATISTICS ##########"""'], {'on_color': '"""on_cyan"""'}), "('######### INVASION STATISTICS ##########', on_color='on_cyan')\n", (12713, 12777), False, 'from termcolor import colored\n'), ((13145, 13205), 'termcolor.colored', 'colored', (['"""########## FAILURE! ##########"""'], {'on_color': '"""on_red"""'}), "('########## FAILURE! ##########', on_color='on_red')\n", (13152, 13205), False, 'from termcolor import colored\n'), ((13354, 13425), 'termcolor.colored', 'colored', (['"""######### INVASION STATISTICS ##########"""'], {'on_color': '"""on_cyan"""'}), "('######### INVASION STATISTICS ##########', on_color='on_cyan')\n", (13361, 13425), False, 'from termcolor import colored\n'), ((13683, 13745), 'termcolor.colored', 'colored', (['"""########## SUCCESS! ##########"""'], {'on_color': '"""on_green"""'}), "('########## SUCCESS! ##########', on_color='on_green')\n", (13690, 13745), False, 'from termcolor import colored\n'), ((13892, 13963), 'termcolor.colored', 'colored', (['"""######### INVASION STATISTICS ##########"""'], {'on_color': '"""on_cyan"""'}), "('######### INVASION STATISTICS ##########', on_color='on_cyan')\n", (13899, 13963), False, 'from termcolor import colored\n')] |
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2021 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
import dateutil.parser
import pytz
from flask import flash, request, session
from flask_pluginengine import render_plugin_template, url_for_plugin
from indico.core import signals
from indico.core.config import config
from indico.core.plugins import IndicoPlugin
from indico.core.settings.converters import ModelListConverter
from indico.modules.events.requests.models.requests import Request, RequestState
from indico.modules.events.requests.views import WPRequestsEventManagement
from indico.modules.rb.models.rooms import Room
from indico.modules.users import User
from indico.util.string import natural_sort_key
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import EmailListField, IndicoQuerySelectMultipleField, PrincipalListField
from indico.web.menu import TopMenuItem
from indico_room_assistance import _
from indico_room_assistance.blueprint import blueprint
from indico_room_assistance.definition import RoomAssistanceRequest
from indico_room_assistance.util import (can_request_assistance_for_event, event_has_room_with_support_attached,
is_room_assistance_support)
def _order_func(object_list):
return sorted(object_list, key=lambda r: natural_sort_key(r[1].full_name))
class RoomAssistanceForm(IndicoForm):
_fieldsets = [
('Startup assistance emails', ['room_assistance_recipients', 'rooms_with_assistance',
'room_assistance_support']),
]
room_assistance_recipients = EmailListField(_('Recipients'),
description=_('Notifications about room assistance requests are sent '
'to these email addresses (one per line)'))
rooms_with_assistance = IndicoQuerySelectMultipleField('Rooms',
query_factory=lambda: Room.query,
description=_('Rooms for which users can request startup '
'assistance'),
get_label='full_name', collection_class=set,
render_kw={'size': 20}, modify_object_list=_order_func)
room_assistance_support = PrincipalListField(_('Room assistance support'), allow_groups=True,
description=_('List of users who can view the list of events with '
'room startup assistance.'))
class RoomAssistancePlugin(IndicoPlugin):
"""Room assistance request
This plugin lets users request assistance for meeting rooms.
"""
configurable = True
settings_form = RoomAssistanceForm
settings_converters = {
'rooms_with_assistance': ModelListConverter(Room)
}
acl_settings = {'room_assistance_support'}
default_settings = {
'room_assistance_recipients': [],
'rooms_with_assistance': [],
}
def init(self):
super().init()
self.inject_bundle('main.css', WPRequestsEventManagement, subclasses=False,
condition=lambda: request.view_args.get('type') == RoomAssistanceRequest.name)
self.template_hook('event-actions', self._room_assistance_action)
self.connect(signals.menu.items, self._extend_services_menu, sender='top-menu')
self.connect(signals.plugin.get_event_request_definitions, self._get_room_assistance_request)
self.connect(signals.event.updated, self._on_event_update)
def get_blueprints(self):
return blueprint
def _room_assistance_action(self, event, **kwargs):
return render_plugin_template('room_assistance_action.html', event=event,
can_request_assistance=can_request_assistance_for_event(event))
def _extend_services_menu(self, reservation, **kwargs):
if not session.user or not is_room_assistance_support(session.user):
return
return TopMenuItem('services-cern-room-assistance', _('Room assistance'),
url_for_plugin('room_assistance.request_list'), section='services')
def _get_room_assistance_request(self, sender, **kwargs):
return RoomAssistanceRequest
def _on_event_update(self, event, **kwargs):
changes = kwargs['changes']
if not changes.keys() & {'location_data', 'start_dt', 'end_dt'}:
return
request = Request.find_latest_for_event(event, RoomAssistanceRequest.name)
if not request or request.state != RequestState.accepted:
return
if 'location_data' in changes and not event_has_room_with_support_attached(event):
request.definition.reject(request, {'comment': render_plugin_template('auto_reject_no_supported_room.txt')},
User.get_system_user())
request.data = dict(request.data, occurrences=[])
flash(_("The new event location is not in the list of the rooms supported by the room assistance team. "
"Room assistance request has been rejected and support will not be provided."), 'warning')
if changes.keys() & {'start_dt', 'end_dt'}:
tz = pytz.timezone(config.DEFAULT_TIMEZONE)
occurrences = {dateutil.parser.parse(occ).astimezone(tz) for occ in request.data['occurrences']}
req_dates = {occ.date() for occ in occurrences}
event_dates = set(event.iter_days())
old_dates = req_dates - event_dates
has_overlapping_dates = req_dates & event_dates
if not has_overlapping_dates:
request.definition.reject(request,
{'comment': render_plugin_template('auto_reject_no_overlapping_dates.txt')},
User.get_system_user())
request.data = dict(request.data, occurrences=[])
flash(_("The new event dates don't overlap with the existing room assistance request for this event. "
"Room assistance request has been rejected and support will not be provided."), 'warning')
elif old_dates and has_overlapping_dates:
new_data = dict(request.data)
new_data['occurrences'] = [occ.astimezone(pytz.utc).isoformat() for occ in occurrences
if occ.date() in req_dates & event_dates]
request.data = new_data
flash(_("Room assistance had been requested for days that are not between the updated start/end "
"dates. Support will not be provided on these days anymore."), 'warning')
| [
"indico.modules.users.User.get_system_user",
"pytz.timezone",
"indico_room_assistance.util.event_has_room_with_support_attached",
"flask.request.view_args.get",
"indico.util.string.natural_sort_key",
"indico_room_assistance.util.can_request_assistance_for_event",
"flask_pluginengine.render_plugin_template",
"indico_room_assistance._",
"indico.core.settings.converters.ModelListConverter",
"flask_pluginengine.url_for_plugin",
"indico_room_assistance.util.is_room_assistance_support",
"indico.modules.events.requests.models.requests.Request.find_latest_for_event"
]
| [((1787, 1802), 'indico_room_assistance._', '_', (['"""Recipients"""'], {}), "('Recipients')\n", (1788, 1802), False, 'from indico_room_assistance import _\n'), ((2664, 2692), 'indico_room_assistance._', '_', (['"""Room assistance support"""'], {}), "('Room assistance support')\n", (2665, 2692), False, 'from indico_room_assistance import _\n'), ((3196, 3220), 'indico.core.settings.converters.ModelListConverter', 'ModelListConverter', (['Room'], {}), '(Room)\n', (3214, 3220), False, 'from indico.core.settings.converters import ModelListConverter\n'), ((4878, 4942), 'indico.modules.events.requests.models.requests.Request.find_latest_for_event', 'Request.find_latest_for_event', (['event', 'RoomAssistanceRequest.name'], {}), '(event, RoomAssistanceRequest.name)\n', (4907, 4942), False, 'from indico.modules.events.requests.models.requests import Request, RequestState\n'), ((1864, 1967), 'indico_room_assistance._', '_', (['"""Notifications about room assistance requests are sent to these email addresses (one per line)"""'], {}), "('Notifications about room assistance requests are sent to these email addresses (one per line)'\n )\n", (1865, 1967), False, 'from indico_room_assistance import _\n'), ((2261, 2318), 'indico_room_assistance._', '_', (['"""Rooms for which users can request startup assistance"""'], {}), "('Rooms for which users can request startup assistance')\n", (2262, 2318), False, 'from indico_room_assistance import _\n'), ((2774, 2859), 'indico_room_assistance._', '_', (['"""List of users who can view the list of events with room startup assistance."""'], {}), "('List of users who can view the list of events with room startup assistance.'\n )\n", (2775, 2859), False, 'from indico_room_assistance import _\n'), ((4464, 4484), 'indico_room_assistance._', '_', (['"""Room assistance"""'], {}), "('Room assistance')\n", (4465, 4484), False, 'from indico_room_assistance import _\n'), ((4513, 4559), 'flask_pluginengine.url_for_plugin', 'url_for_plugin', (['"""room_assistance.request_list"""'], {}), "('room_assistance.request_list')\n", (4527, 4559), False, 'from flask_pluginengine import render_plugin_template, url_for_plugin\n'), ((5662, 5700), 'pytz.timezone', 'pytz.timezone', (['config.DEFAULT_TIMEZONE'], {}), '(config.DEFAULT_TIMEZONE)\n', (5675, 5700), False, 'import pytz\n'), ((1477, 1509), 'indico.util.string.natural_sort_key', 'natural_sort_key', (['r[1].full_name'], {}), '(r[1].full_name)\n', (1493, 1509), False, 'from indico.util.string import natural_sort_key\n'), ((4205, 4244), 'indico_room_assistance.util.can_request_assistance_for_event', 'can_request_assistance_for_event', (['event'], {}), '(event)\n', (4237, 4244), False, 'from indico_room_assistance.util import can_request_assistance_for_event, event_has_room_with_support_attached, is_room_assistance_support\n'), ((4342, 4382), 'indico_room_assistance.util.is_room_assistance_support', 'is_room_assistance_support', (['session.user'], {}), '(session.user)\n', (4368, 4382), False, 'from indico_room_assistance.util import can_request_assistance_for_event, event_has_room_with_support_attached, is_room_assistance_support\n'), ((5075, 5118), 'indico_room_assistance.util.event_has_room_with_support_attached', 'event_has_room_with_support_attached', (['event'], {}), '(event)\n', (5111, 5118), False, 'from indico_room_assistance.util import can_request_assistance_for_event, event_has_room_with_support_attached, is_room_assistance_support\n'), ((5279, 5301), 'indico.modules.users.User.get_system_user', 'User.get_system_user', ([], {}), '()\n', (5299, 5301), False, 'from indico.modules.users import User\n'), ((5383, 5562), 'indico_room_assistance._', '_', (['"""The new event location is not in the list of the rooms supported by the room assistance team. Room assistance request has been rejected and support will not be provided."""'], {}), "('The new event location is not in the list of the rooms supported by the room assistance team. Room assistance request has been rejected and support will not be provided.'\n )\n", (5384, 5562), False, 'from indico_room_assistance import _\n'), ((5179, 5238), 'flask_pluginengine.render_plugin_template', 'render_plugin_template', (['"""auto_reject_no_supported_room.txt"""'], {}), "('auto_reject_no_supported_room.txt')\n", (5201, 5238), False, 'from flask_pluginengine import render_plugin_template, url_for_plugin\n'), ((6282, 6304), 'indico.modules.users.User.get_system_user', 'User.get_system_user', ([], {}), '()\n', (6302, 6304), False, 'from indico.modules.users import User\n'), ((6394, 6571), 'indico_room_assistance._', '_', (['"""The new event dates don\'t overlap with the existing room assistance request for this event. Room assistance request has been rejected and support will not be provided."""'], {}), '("The new event dates don\'t overlap with the existing room assistance request for this event. Room assistance request has been rejected and support will not be provided."\n )\n', (6395, 6571), False, 'from indico_room_assistance import _\n'), ((3557, 3586), 'flask.request.view_args.get', 'request.view_args.get', (['"""type"""'], {}), "('type')\n", (3578, 3586), False, 'from flask import flash, request, session\n'), ((6175, 6237), 'flask_pluginengine.render_plugin_template', 'render_plugin_template', (['"""auto_reject_no_overlapping_dates.txt"""'], {}), "('auto_reject_no_overlapping_dates.txt')\n", (6197, 6237), False, 'from flask_pluginengine import render_plugin_template, url_for_plugin\n'), ((6956, 7111), 'indico_room_assistance._', '_', (['"""Room assistance had been requested for days that are not between the updated start/end dates. Support will not be provided on these days anymore."""'], {}), "('Room assistance had been requested for days that are not between the updated start/end dates. Support will not be provided on these days anymore.'\n )\n", (6957, 7111), False, 'from indico_room_assistance import _\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# REST
from rest_framework.viewsets import ViewSetMixin
from rest_framework import routers, serializers, viewsets
from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication
from rest_framework.permissions import IsAuthenticated, BasePermission
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.views import APIView
from rest_framework import mixins, generics
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
# our models
from blockchain.models import Block, Prescription, Transaction, Address
from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri
from .exceptions import NonValidPubKey
# Define router
router = routers.DefaultRouter()
class PrescriptionSerializer(serializers.ModelSerializer):
""" Prescription serializer """
timestamp = serializers.DateTimeField(read_only=False)
data = serializers.JSONField(binary=False, read_only=False, required=False)
files = serializers.JSONField(binary=False, read_only=False, required=False)
previous_hash = serializers.CharField(read_only=False, required=False, default="0")
class Meta:
model = Prescription
fields = (
'id',
'public_key',
'data',
"files",
'timestamp',
'signature',
'previous_hash',
'raw_size',
'hash_id',
'is_valid',
'transaction',
'readable',
)
read_only_fields = ('id', 'hash_id', 'is_valid',' transaction',)
def validate(self, data):
''' Method to control Extra Keys on Payload!'''
extra_keys = set(self.initial_data.keys()) - set(self.fields.keys())
if extra_keys:
print(extra_keys)
return data
def create(self, validated_data):
return Transaction.objects.create_tx(data=validated_data)
class PrescriptionViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
# Temporally without auth
# authentication_classes = (TokenAuthentication, BasicAuthentication, )
# permission_classes = (IsAuthenticated, )
serializer_class = PrescriptionSerializer
lookup_field = "hash_id"
http_method_names = ['get', 'post', 'options']
def get_queryset(self):
''' Custom Get queryset '''
raw_public_key = self.request.query_params.get('public_key', None)
if raw_public_key:
try:
pub_key = pubkey_string_to_rsa(raw_public_key)
except:
pub_key , raw_public_key = pubkey_base64_to_rsa(raw_public_key)
hex_raw_pub_key = savify_key(pub_key)
return Prescription.objects.filter(public_key=hex_raw_pub_key).order_by('-id')
else:
return Prescription.objects.all().order_by('-id')
# add patient filter by email, after could modify with other
router.register(r'rx-endpoint', PrescriptionViewSet, 'prescription-endpoint')
class BlockSerializer(serializers.ModelSerializer):
""" Prescription serializer """
class Meta:
model = Block
fields = (
'id',
'hash_block',
'previous_hash',
'raw_size',
'data',
'timestamp',
'merkleroot',
'hashcash',
'nonce',
)
read_only_fields = ('id', 'hash_block','timestamp','previous_hash', 'raw_size', 'data', 'merkleroot','hashcash','nonce',)
class BlockViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
serializer_class = BlockSerializer
def get_queryset(self):
return Block.objects.all().order_by('-timestamp')
# add patient filter by email, after could modify with other
router.register(r'block', BlockViewSet, 'block-endpoint')
class AddressSerializer(serializers.ModelSerializer):
""" Address serializer """
pub_key = serializers.CharField(read_only=True,allow_null=True, source="get_pub_key" )
class Meta:
model = Address
fields = (
'public_key_b64',
'address',
'is_valid',
'pub_key',
)
read_only_fields = ('address','pub_key', )
class AddressViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
serializer_class = AddressSerializer
lookup_field = "address"
http_method_names = ['get', 'options']
def get_queryset(self):
''' Custom Get queryset '''
raw_public_key = self.request.query_params.get('public_key', None)
if raw_public_key:
try:
pub_key_b64 = pubkey_base64_from_uri(raw_public_key)
except Exception as e:
raise NonValidPubKey
else:
_address = Address.objects.get_or_create_rsa_address(pub_key_b64)
return Address.objects.filter(address=_address)
else:
return Address.objects.all()
# add patient filter by email, after could modify with other
router.register(r'address', AddressViewSet, 'address_endpoint')
| [
"rest_framework.serializers.DateTimeField",
"blockchain.models.Address.objects.all",
"rest_framework.serializers.JSONField",
"blockchain.models.Address.objects.get_or_create_rsa_address",
"blockchain.models.Prescription.objects.filter",
"blockchain.models.Address.objects.filter",
"blockchain.utils.savify_key",
"rest_framework.serializers.CharField",
"blockchain.models.Prescription.objects.all",
"blockchain.utils.pubkey_string_to_rsa",
"blockchain.models.Transaction.objects.create_tx",
"blockchain.utils.pubkey_base64_from_uri",
"blockchain.utils.pubkey_base64_to_rsa",
"blockchain.models.Block.objects.all",
"rest_framework.routers.DefaultRouter"
]
| [((885, 908), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (906, 908), False, 'from rest_framework import routers, serializers, viewsets\n'), ((1022, 1064), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'read_only': '(False)'}), '(read_only=False)\n', (1047, 1064), False, 'from rest_framework import routers, serializers, viewsets\n'), ((1076, 1144), 'rest_framework.serializers.JSONField', 'serializers.JSONField', ([], {'binary': '(False)', 'read_only': '(False)', 'required': '(False)'}), '(binary=False, read_only=False, required=False)\n', (1097, 1144), False, 'from rest_framework import routers, serializers, viewsets\n'), ((1157, 1225), 'rest_framework.serializers.JSONField', 'serializers.JSONField', ([], {'binary': '(False)', 'read_only': '(False)', 'required': '(False)'}), '(binary=False, read_only=False, required=False)\n', (1178, 1225), False, 'from rest_framework import routers, serializers, viewsets\n'), ((1246, 1313), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(False)', 'required': '(False)', 'default': '"""0"""'}), "(read_only=False, required=False, default='0')\n", (1267, 1313), False, 'from rest_framework import routers, serializers, viewsets\n'), ((4084, 4160), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)', 'allow_null': '(True)', 'source': '"""get_pub_key"""'}), "(read_only=True, allow_null=True, source='get_pub_key')\n", (4105, 4160), False, 'from rest_framework import routers, serializers, viewsets\n'), ((2039, 2089), 'blockchain.models.Transaction.objects.create_tx', 'Transaction.objects.create_tx', ([], {'data': 'validated_data'}), '(data=validated_data)\n', (2068, 2089), False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((2831, 2850), 'blockchain.utils.savify_key', 'savify_key', (['pub_key'], {}), '(pub_key)\n', (2841, 2850), False, 'from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri\n'), ((5099, 5120), 'blockchain.models.Address.objects.all', 'Address.objects.all', ([], {}), '()\n', (5118, 5120), False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((2664, 2700), 'blockchain.utils.pubkey_string_to_rsa', 'pubkey_string_to_rsa', (['raw_public_key'], {}), '(raw_public_key)\n', (2684, 2700), False, 'from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri\n'), ((3819, 3838), 'blockchain.models.Block.objects.all', 'Block.objects.all', ([], {}), '()\n', (3836, 3838), False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((4789, 4827), 'blockchain.utils.pubkey_base64_from_uri', 'pubkey_base64_from_uri', (['raw_public_key'], {}), '(raw_public_key)\n', (4811, 4827), False, 'from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri\n'), ((4946, 5000), 'blockchain.models.Address.objects.get_or_create_rsa_address', 'Address.objects.get_or_create_rsa_address', (['pub_key_b64'], {}), '(pub_key_b64)\n', (4987, 5000), False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((5024, 5064), 'blockchain.models.Address.objects.filter', 'Address.objects.filter', ([], {'address': '_address'}), '(address=_address)\n', (5046, 5064), False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((2764, 2800), 'blockchain.utils.pubkey_base64_to_rsa', 'pubkey_base64_to_rsa', (['raw_public_key'], {}), '(raw_public_key)\n', (2784, 2800), False, 'from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri\n'), ((2870, 2925), 'blockchain.models.Prescription.objects.filter', 'Prescription.objects.filter', ([], {'public_key': 'hex_raw_pub_key'}), '(public_key=hex_raw_pub_key)\n', (2897, 2925), False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((2975, 3001), 'blockchain.models.Prescription.objects.all', 'Prescription.objects.all', ([], {}), '()\n', (2999, 3001), False, 'from blockchain.models import Block, Prescription, Transaction, Address\n')] |
from collections import deque, defaultdict
import os
import sys
import logging
import time
import json
import gym
import torch.nn as nn
import torch
import numpy as np
import matplotlib.pyplot as plt
from model import RL_Policy, Semantic_Mapping
from utils.storage import GlobalRolloutStorage
from envs import make_vec_envs
from arguments import get_args
import algo
os.environ["OMP_NUM_THREADS"] = "1"
def main():
args = get_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Setup Logging
log_dir = "{}/models/{}/".format(args.dump_location, args.exp_name)
dump_dir = "{}/dump/{}/".format(args.dump_location, args.exp_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
logging.basicConfig(
filename=log_dir + 'train.log',
level=logging.INFO)
print("Dumping at {}".format(log_dir))
print(args)
logging.info(args)
# Logging and loss variables
num_scenes = args.num_processes
num_episodes = int(args.num_eval_episodes)
device = args.device = torch.device("cuda:0" if args.cuda else "cpu")
g_masks = torch.ones(num_scenes).float().to(device)
best_g_reward = -np.inf
# one episode per process for both train and eval
# for eval, one scene per process
if args.eval:
episode_success = []
episode_spl = []
episode_dist = []
for _ in range(args.num_processes):
episode_success.append(deque(maxlen=num_episodes))
episode_spl.append(deque(maxlen=num_episodes))
episode_dist.append(deque(maxlen=num_episodes))
# for train, different episodes of same scene per process
else:
episode_success = deque(maxlen=1000)
episode_spl = deque(maxlen=1000)
episode_dist = deque(maxlen=1000)
finished = np.zeros((args.num_processes))
wait_env = np.zeros((args.num_processes))
g_episode_rewards = deque(maxlen=1000)
g_value_losses = deque(maxlen=1000)
g_action_losses = deque(maxlen=1000)
g_dist_entropies = deque(maxlen=1000)
per_step_g_rewards = deque(maxlen=1000)
g_process_rewards = np.zeros((num_scenes))
# Starting environments
torch.set_num_threads(1)
envs = make_vec_envs(args)
obs, infos = envs.reset()
full_episode_data = []
episode_data = [None] * num_scenes
for e, info in enumerate(infos):
cInfo = info.copy()
cInfo["episode_data"]["positions"] = []
cInfo["episode_data"]["gt_positions"] = []
cInfo["episode_data"]["goal_rewards"] = []
cInfo["episode_data"]["explore_rewards"] = []
cInfo["episode_data"]["policy_goals"] = []
cInfo["episode_data"]["used_policy"] = []
episode_data[e] = cInfo["episode_data"]
torch.set_grad_enabled(False)
# Initialize map variables:
# Full map consists of multiple channels containing the following:
# 1. Obstacle Map
# 2. Exploread Area (places that are known to be free or occupied)
# 3. Current Agent Location
# 4. Past Agent Locations
# 5,6,7,.. : Semantic Categories
nc = args.num_sem_categories + 4 # num channels
# Calculating full and local map sizes
map_size = args.map_size_cm // args.map_resolution
full_w, full_h = map_size, map_size
local_w = int(full_w / args.global_downscaling)
local_h = int(full_h / args.global_downscaling)
# Initializing full and local map
full_map = torch.zeros(num_scenes, nc, full_w, full_h).float().to(device)
local_map = torch.zeros(num_scenes, nc, local_w,
local_h).float().to(device)
# Initial full and local pose
full_pose = torch.zeros(num_scenes, 3).float().to(device)
local_pose = torch.zeros(num_scenes, 3).float().to(device)
# Origin of local map
origins = np.zeros((num_scenes, 3))
# Local Map Boundaries
lmb = np.zeros((num_scenes, 4)).astype(int)
# Planner pose inputs has 7 dimensions
# 1-3 store continuous global agent location
# 4-7 store local map boundaries
planner_pose_inputs = np.zeros((num_scenes, 7))
# get local boundary (x1, x2, y1, y2) given local agent position (x, y) and map size
def get_local_map_boundaries(agent_loc, local_sizes, full_sizes):
loc_r, loc_c = agent_loc
local_w, local_h = local_sizes
full_w, full_h = full_sizes
if args.global_downscaling > 1:
gx1, gy1 = loc_r - local_w // 2, loc_c - local_h // 2
gx2, gy2 = gx1 + local_w, gy1 + local_h
if gx1 < 0:
gx1, gx2 = 0, local_w
if gx2 > full_w:
gx1, gx2 = full_w - local_w, full_w
if gy1 < 0:
gy1, gy2 = 0, local_h
if gy2 > full_h:
gy1, gy2 = full_h - local_h, full_h
else:
gx1, gx2, gy1, gy2 = 0, full_w, 0, full_h
return [gx1, gx2, gy1, gy2]
# initialize global and local maps and poses given that initial position
# is at map center with 0 orientation
def init_map_and_pose():
full_map.fill_(0.)
full_pose.fill_(0.)
full_pose[:, :2] = args.map_size_cm / 100.0 / 2.0
locs = full_pose.cpu().numpy()
planner_pose_inputs[:, :3] = locs
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
# 3x3 grid around agent location is considered explored
full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
for e in range(num_scenes):
local_map[e] = full_map[e, :,
lmb[e, 0]:lmb[e, 1],
lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
# identical to above, except for specific environment
def init_map_and_pose_for_env(e):
full_map[e].fill_(0.)
full_pose[e].fill_(0.)
full_pose[e, :2] = args.map_size_cm / 100.0 / 2.0
locs = full_pose[e].cpu().numpy()
planner_pose_inputs[e, :3] = locs
r, c = locs[1], locs[0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
local_map[e] = full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
# reward is the newly explored area in a given step (in m^2)
def update_intrinsic_rew(e):
prev_explored_area = full_map[e, 1].sum(1).sum(0)
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \
local_map[e]
curr_explored_area = full_map[e, 1].sum(1).sum(0)
intrinsic_rews[e] = curr_explored_area - prev_explored_area
intrinsic_rews[e] *= (args.map_resolution / 100.)**2 # to m^2
def get_random_goal(e):
for _ in range(20):
goal = np.random.rand(2)
goal = [int(goal[0] * local_w), int(goal[1] * local_w)]
goal = [min(goal[0], int(local_w-1)), min(goal[1], int(local_w-1))]
if not local_map[e, 1, goal[0], goal[1]]: break
return goal
init_map_and_pose()
# Global policy observation space
ngc = 8 + args.num_sem_categories
es = 2
g_observation_space = gym.spaces.Box(0, 1, # binary local map
(ngc,
local_w,
local_h), dtype='uint8')
# Semantic Mapping
sem_map_module = Semantic_Mapping(args).to(device)
sem_map_module.eval()
intrinsic_rews = torch.zeros(num_scenes).to(device)
# Predict semantic map from frame 1
poses = torch.from_numpy(np.asarray(
[infos[env_idx]['sensor_pose'] for env_idx in range(num_scenes)])
).float().to(device)
# args (obs, pose_obs, maps_last, poses_last)
_, local_map, _, local_pose = \
sem_map_module(obs, poses, local_map, local_pose)
locs = local_pose.cpu().numpy()
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
local_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.
episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])])
episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"]))
global_goals = [get_random_goal(e) for e in range(num_scenes)]
goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)]
for e in range(num_scenes):
goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1
episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0))
episode_data[e]["used_policy"].append(True)
planner_inputs = [{} for e in range(num_scenes)]
for e, p_input in enumerate(planner_inputs):
p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy() # obstacles
p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy() # explored
p_input['pose_pred'] = planner_pose_inputs[e] # global location+local map bounds
p_input['goal'] = goal_maps[e] # global_goals[e]
p_input['new_goal'] = 1
p_input['found_goal'] = 0
p_input['wait'] = wait_env[e] or finished[e]
if args.visualize or args.print_images:
local_map[e, -1, :, :] = 1e-5 # TODO: what is this?
# single channel where each grid loc is cat ID
p_input['sem_map_pred'] = local_map[e, 4:, :, :
].argmax(0).cpu().numpy()
obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs)
start = time.time()
g_reward = 0
torch.set_grad_enabled(False)
spl_per_category = defaultdict(list)
success_per_category = defaultdict(list)
for step in range(args.num_training_frames // args.num_processes + 1):
if finished.sum() == args.num_processes:
break
g_step = (step // args.num_local_steps) % args.num_global_steps # global step num in PPO
l_step = step % args.num_local_steps # local step num in global step
# ------------------------------------------------------------------
# Reinitialize variables when episode ends
l_masks = torch.FloatTensor([0 if x else 1
for x in done]).to(device)
g_masks *= l_masks
for e, x in enumerate(done):
if x:
spl = infos[e]['spl']
success = infos[e]['success']
dist = infos[e]['distance_to_goal']
spl_per_category[infos[e]['goal_name']].append(spl)
success_per_category[infos[e]['goal_name']].append(success)
if args.eval:
episode_success[e].append(success)
episode_spl[e].append(spl)
episode_dist[e].append(dist)
if len(episode_success[e]) == num_episodes:
finished[e] = 1
episode_data[e]["success"] = success
episode_data[e]["spl"] = spl
episode_data[e]["distance_to_goal"] = dist
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = local_map[e]
episode_data[e]["explored_area"] = full_map[e, 1].sum(1).sum(0).item()
scene = episode_data[e]["scene_id"][16:-4]
if args.save_maps:
np.save('{}/maparr_{}_{}'.format(dump_dir, scene, episode_data[e]['episode_id']), full_map[e].cpu().numpy())
full_episode_data.append(episode_data[e])
cInfo = infos[e].copy()
cInfo["episode_data"]["positions"] = []
cInfo["episode_data"]["gt_positions"] = []
cInfo["episode_data"]["goal_rewards"] = []
cInfo["episode_data"]["explore_rewards"] = []
cInfo["episode_data"]["policy_goals"] = []
cInfo["episode_data"]["used_policy"] = []
episode_data[e] = cInfo["episode_data"]
else:
episode_success.append(success)
episode_spl.append(spl)
episode_dist.append(dist)
wait_env[e] = 1.
update_intrinsic_rew(e)
init_map_and_pose_for_env(e)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Semantic Mapping Module
poses = torch.from_numpy(np.asarray(
[infos[env_idx]['sensor_pose'] for env_idx
in range(num_scenes)])
).float().to(device)
_, local_map, _, local_pose = \
sem_map_module(obs, poses, local_map, local_pose)
locs = local_pose.cpu().numpy()
planner_pose_inputs[:, :3] = locs + origins
local_map[:, 2, :, :].fill_(0.) # Resetting current location channel
# update current location
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
local_map[e, 2:4, loc_r - 2:loc_r + 3, loc_c - 2:loc_c + 3] = 1.
if args.eval and not wait_env[e]:
episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])])
episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"]))
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Global Policy
if l_step == args.num_local_steps - 1:
# For every global step, update the full and local maps
for e in range(num_scenes):
if wait_env[e] == 1: # New episode
wait_env[e] = 0.
else:
update_intrinsic_rew(e)
# update global map and pose based on new position in old local frame
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \
local_map[e]
full_pose[e] = local_pose[e] + \
torch.from_numpy(origins[e]).to(device).float()
# center the local frame based on new position
locs = full_pose[e].cpu().numpy()
r, c = locs[1], locs[0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
# compute new local map and pose based on new local frame
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
local_map[e] = full_map[e, :,
lmb[e, 0]:lmb[e, 1],
lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
locs = local_pose.cpu().numpy()
# Get exploration reward and metrics
g_reward = torch.from_numpy(np.asarray(
[infos[env_idx]['g_reward'] for env_idx in range(num_scenes)])
).float().to(device)
g_reward += args.intrinsic_rew_coeff * intrinsic_rews.detach()
for e in range(num_scenes):
if args.eval and not wait_env[e]:
episode_data[e]["goal_rewards"].append(infos[e]["g_reward"])
episode_data[e]["explore_rewards"].append(intrinsic_rews[e].item())
g_process_rewards += g_reward.cpu().numpy()
g_total_rewards = g_process_rewards * \
(1 - g_masks.cpu().numpy())
g_process_rewards *= g_masks.cpu().numpy()
per_step_g_rewards.append(np.mean(g_reward.cpu().numpy()))
if np.sum(g_total_rewards) != 0:
for total_rew in g_total_rewards:
if total_rew != 0:
g_episode_rewards.append(total_rew)
global_goals = [get_random_goal(e) for e in range(num_scenes)]
for e in range(num_scenes):
if args.eval and not wait_env[e]:
episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0))
g_reward = 0
g_masks = torch.ones(num_scenes).float().to(device)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Update long-term goal if target object is found
found_goal = [0 for _ in range(num_scenes)]
goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)]
# If goal category not found in map, goal is the location sampled by
# policy
for e in range(num_scenes):
goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1
if args.eval and not wait_env[e]:
episode_data[e]["used_policy"].append(True)
# Else if goal category found in map, use all locations where prob of goal
# obj existing is > 0 as the goal map for planner
for e in range(num_scenes):
cn = infos[e]['goal_cat_id'] + 4
if local_map[e, cn, :, :].sum() != 0.:
cat_semantic_map = local_map[e, cn, :, :].cpu().numpy()
cat_semantic_scores = cat_semantic_map
cat_semantic_scores[cat_semantic_scores > 0] = 1.
goal_maps[e] = cat_semantic_scores
found_goal[e] = 1
if args.eval and not wait_env[e]:
episode_data[e]["used_policy"][-1] = False
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Take action and get next observation
planner_inputs = [{} for e in range(num_scenes)]
for e, p_input in enumerate(planner_inputs):
p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy()
p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy()
p_input['pose_pred'] = planner_pose_inputs[e]
p_input['goal'] = goal_maps[e] # global_goals[e]
p_input['new_goal'] = l_step == args.num_local_steps - 1
p_input['found_goal'] = found_goal[e]
p_input['wait'] = wait_env[e] or finished[e]
if args.visualize or args.print_images:
local_map[e, -1, :, :] = 1e-5
p_input['sem_map_pred'] = local_map[e, 4:, :,
:].argmax(0).cpu().numpy()
obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs)
# ------------------------------------------------------------------
# Logging
if len(full_episode_data) % args.episode_save_interval == 0:
with open('{}/{}_episode_data.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(full_episode_data, f)
if step % args.log_interval == 0:
end = time.time()
time_elapsed = time.gmtime(end - start)
log = " ".join([
"Time: {0:0=2d}d".format(time_elapsed.tm_mday - 1),
"{},".format(time.strftime("%Hh %Mm %Ss", time_elapsed)),
"num timesteps {},".format(step * num_scenes),
"FPS {},".format(int(step * num_scenes / (end - start)))
])
log += "\n\tRewards:"
if len(g_episode_rewards) > 0:
log += " ".join([
" Global step mean/med rew:",
"{:.4f}/{:.4f},".format(
np.mean(per_step_g_rewards),
np.median(per_step_g_rewards)),
" Global eps mean/med/min/max eps rew:",
"{:.3f}/{:.3f}/{:.3f}/{:.3f},".format(
np.mean(g_episode_rewards),
np.median(g_episode_rewards),
np.min(g_episode_rewards),
np.max(g_episode_rewards))
])
if args.eval:
total_success = []
total_spl = []
total_dist = []
for e in range(args.num_processes):
for acc in episode_success[e]:
total_success.append(acc)
for dist in episode_dist[e]:
total_dist.append(dist)
for spl in episode_spl[e]:
total_spl.append(spl)
if len(total_spl) > 0:
log += " ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(total_success),
np.mean(total_spl),
np.mean(total_dist),
len(total_spl))
else:
if len(episode_success) > 100:
log += " ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(episode_success),
np.mean(episode_spl),
np.mean(episode_dist),
len(episode_spl))
log += "\n\tLosses:"
if len(g_value_losses) > 0 and not args.eval:
log += " ".join([
" Policy Loss value/action/dist:",
"{:.3f}/{:.3f}/{:.3f},".format(
np.mean(g_value_losses),
np.mean(g_action_losses),
np.mean(g_dist_entropies))
])
print(log)
logging.info(log)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Save best models
if (step * num_scenes) % args.save_interval < \
num_scenes:
if len(g_episode_rewards) >= 1000 and \
(np.mean(g_episode_rewards) >= best_g_reward) \
and not args.eval:
torch.save(g_policy.state_dict(),
os.path.join(log_dir, "model_best.pth"))
best_g_reward = np.mean(g_episode_rewards)
# Save periodic models
if (step * num_scenes) % args.save_periodic < \
num_scenes:
total_steps = step * num_scenes
if not args.eval:
torch.save(g_policy.state_dict(),
os.path.join(dump_dir,
"periodic_{}.pth".format(total_steps)))
# ------------------------------------------------------------------
# Print and save model performance numbers during evaluation
if args.eval:
print("Dumping eval details...")
total_success = []
total_spl = []
total_dist = []
for e in range(args.num_processes):
for acc in episode_success[e]:
total_success.append(acc)
for dist in episode_dist[e]:
total_dist.append(dist)
for spl in episode_spl[e]:
total_spl.append(spl)
if len(total_spl) > 0:
log = "Final ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(total_success),
np.mean(total_spl),
np.mean(total_dist),
len(total_spl))
print(log)
logging.info(log)
# Save the spl per category
log = "Success | SPL per category\n"
for key in success_per_category:
log += "{}: {} | {}\n".format(key,
sum(success_per_category[key]) /
len(success_per_category[key]),
sum(spl_per_category[key]) /
len(spl_per_category[key]))
print(log)
logging.info(log)
with open('{}/{}_spl_per_cat_pred_thr.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(spl_per_category, f)
with open('{}/{}_success_per_cat_pred_thr.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(success_per_category, f)
with open('{}/{}_episode_data.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(full_episode_data, f)
if __name__ == "__main__":
main()
| [
"numpy.random.rand",
"torch.from_numpy",
"logging.info",
"os.path.exists",
"numpy.mean",
"collections.deque",
"torch.set_num_threads",
"numpy.max",
"numpy.random.seed",
"numpy.min",
"arguments.get_args",
"model.Semantic_Mapping",
"envs.make_vec_envs",
"time.time",
"time.gmtime",
"torch.device",
"logging.basicConfig",
"torch.manual_seed",
"numpy.median",
"os.makedirs",
"time.strftime",
"os.path.join",
"torch.FloatTensor",
"gym.spaces.Box",
"numpy.sum",
"numpy.zeros",
"collections.defaultdict",
"torch.set_grad_enabled",
"torch.cuda.manual_seed",
"torch.zeros",
"json.dump",
"torch.ones"
]
| [((430, 440), 'arguments.get_args', 'get_args', ([], {}), '()\n', (438, 440), False, 'from arguments import get_args\n'), ((446, 471), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (460, 471), True, 'import numpy as np\n'), ((476, 504), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (493, 504), False, 'import torch\n'), ((868, 939), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': "(log_dir + 'train.log')", 'level': 'logging.INFO'}), "(filename=log_dir + 'train.log', level=logging.INFO)\n", (887, 939), False, 'import logging\n'), ((1020, 1038), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (1032, 1038), False, 'import logging\n'), ((1183, 1229), 'torch.device', 'torch.device', (["('cuda:0' if args.cuda else 'cpu')"], {}), "('cuda:0' if args.cuda else 'cpu')\n", (1195, 1229), False, 'import torch\n'), ((1949, 1977), 'numpy.zeros', 'np.zeros', (['args.num_processes'], {}), '(args.num_processes)\n', (1957, 1977), True, 'import numpy as np\n'), ((1995, 2023), 'numpy.zeros', 'np.zeros', (['args.num_processes'], {}), '(args.num_processes)\n', (2003, 2023), True, 'import numpy as np\n'), ((2051, 2069), 'collections.deque', 'deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (2056, 2069), False, 'from collections import deque, defaultdict\n'), ((2092, 2110), 'collections.deque', 'deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (2097, 2110), False, 'from collections import deque, defaultdict\n'), ((2133, 2151), 'collections.deque', 'deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (2138, 2151), False, 'from collections import deque, defaultdict\n'), ((2175, 2193), 'collections.deque', 'deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (2180, 2193), False, 'from collections import deque, defaultdict\n'), ((2220, 2238), 'collections.deque', 'deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (2225, 2238), False, 'from collections import deque, defaultdict\n'), ((2264, 2284), 'numpy.zeros', 'np.zeros', (['num_scenes'], {}), '(num_scenes)\n', (2272, 2284), True, 'import numpy as np\n'), ((2320, 2344), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (2341, 2344), False, 'import torch\n'), ((2356, 2375), 'envs.make_vec_envs', 'make_vec_envs', (['args'], {}), '(args)\n', (2369, 2375), False, 'from envs import make_vec_envs\n'), ((2896, 2925), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (2918, 2925), False, 'import torch\n'), ((3945, 3970), 'numpy.zeros', 'np.zeros', (['(num_scenes, 3)'], {}), '((num_scenes, 3))\n', (3953, 3970), True, 'import numpy as np\n'), ((4203, 4228), 'numpy.zeros', 'np.zeros', (['(num_scenes, 7)'], {}), '((num_scenes, 7))\n', (4211, 4228), True, 'import numpy as np\n'), ((8410, 8470), 'gym.spaces.Box', 'gym.spaces.Box', (['(0)', '(1)', '(ngc, local_w, local_h)'], {'dtype': '"""uint8"""'}), "(0, 1, (ngc, local_w, local_h), dtype='uint8')\n", (8424, 8470), False, 'import gym\n'), ((10905, 10916), 'time.time', 'time.time', ([], {}), '()\n', (10914, 10916), False, 'import time\n'), ((10939, 10968), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (10961, 10968), False, 'import torch\n'), ((10992, 11009), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (11003, 11009), False, 'from collections import deque, defaultdict\n'), ((11037, 11054), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (11048, 11054), False, 'from collections import deque, defaultdict\n'), ((532, 565), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (554, 565), False, 'import torch\n'), ((742, 765), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (756, 765), False, 'import os\n'), ((775, 795), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (786, 795), False, 'import os\n'), ((807, 831), 'os.path.exists', 'os.path.exists', (['dump_dir'], {}), '(dump_dir)\n', (821, 831), False, 'import os\n'), ((841, 862), 'os.makedirs', 'os.makedirs', (['dump_dir'], {}), '(dump_dir)\n', (852, 862), False, 'import os\n'), ((1831, 1849), 'collections.deque', 'deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (1836, 1849), False, 'from collections import deque, defaultdict\n'), ((1872, 1890), 'collections.deque', 'deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (1877, 1890), False, 'from collections import deque, defaultdict\n'), ((1914, 1932), 'collections.deque', 'deque', ([], {'maxlen': '(1000)'}), '(maxlen=1000)\n', (1919, 1932), False, 'from collections import deque, defaultdict\n'), ((9688, 9716), 'numpy.zeros', 'np.zeros', (['(local_w, local_h)'], {}), '((local_w, local_h))\n', (9696, 9716), True, 'import numpy as np\n'), ((25493, 25510), 'logging.info', 'logging.info', (['log'], {}), '(log)\n', (25505, 25510), False, 'import logging\n'), ((26011, 26028), 'logging.info', 'logging.info', (['log'], {}), '(log)\n', (26023, 26028), False, 'import logging\n'), ((4009, 4034), 'numpy.zeros', 'np.zeros', (['(num_scenes, 4)'], {}), '((num_scenes, 4))\n', (4017, 4034), True, 'import numpy as np\n'), ((8025, 8042), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (8039, 8042), True, 'import numpy as np\n'), ((8660, 8682), 'model.Semantic_Mapping', 'Semantic_Mapping', (['args'], {}), '(args)\n', (8676, 8682), False, 'from model import RL_Policy, Semantic_Mapping\n'), ((8742, 8765), 'torch.zeros', 'torch.zeros', (['num_scenes'], {}), '(num_scenes)\n', (8753, 8765), False, 'import torch\n'), ((18474, 18502), 'numpy.zeros', 'np.zeros', (['(local_w, local_h)'], {}), '((local_w, local_h))\n', (18482, 18502), True, 'import numpy as np\n'), ((20949, 20960), 'time.time', 'time.time', ([], {}), '()\n', (20958, 20960), False, 'import time\n'), ((20988, 21012), 'time.gmtime', 'time.gmtime', (['(end - start)'], {}), '(end - start)\n', (20999, 21012), False, 'import time\n'), ((23616, 23633), 'logging.info', 'logging.info', (['log'], {}), '(log)\n', (23628, 23633), False, 'import logging\n'), ((26152, 26182), 'json.dump', 'json.dump', (['spl_per_category', 'f'], {}), '(spl_per_category, f)\n', (26161, 26182), False, 'import json\n'), ((26310, 26344), 'json.dump', 'json.dump', (['success_per_category', 'f'], {}), '(success_per_category, f)\n', (26319, 26344), False, 'import json\n'), ((26468, 26499), 'json.dump', 'json.dump', (['full_episode_data', 'f'], {}), '(full_episode_data, f)\n', (26477, 26499), False, 'import json\n'), ((1586, 1612), 'collections.deque', 'deque', ([], {'maxlen': 'num_episodes'}), '(maxlen=num_episodes)\n', (1591, 1612), False, 'from collections import deque, defaultdict\n'), ((1645, 1671), 'collections.deque', 'deque', ([], {'maxlen': 'num_episodes'}), '(maxlen=num_episodes)\n', (1650, 1671), False, 'from collections import deque, defaultdict\n'), ((1705, 1731), 'collections.deque', 'deque', ([], {'maxlen': 'num_episodes'}), '(maxlen=num_episodes)\n', (1710, 1731), False, 'from collections import deque, defaultdict\n'), ((11520, 11570), 'torch.FloatTensor', 'torch.FloatTensor', (['[(0 if x else 1) for x in done]'], {}), '([(0 if x else 1) for x in done])\n', (11537, 11570), False, 'import torch\n'), ((17643, 17666), 'numpy.sum', 'np.sum', (['g_total_rewards'], {}), '(g_total_rewards)\n', (17649, 17666), True, 'import numpy as np\n'), ((20856, 20887), 'json.dump', 'json.dump', (['full_episode_data', 'f'], {}), '(full_episode_data, f)\n', (20865, 20887), False, 'import json\n'), ((24209, 24235), 'numpy.mean', 'np.mean', (['g_episode_rewards'], {}), '(g_episode_rewards)\n', (24216, 24235), True, 'import numpy as np\n'), ((25336, 25358), 'numpy.mean', 'np.mean', (['total_success'], {}), '(total_success)\n', (25343, 25358), True, 'import numpy as np\n'), ((25376, 25394), 'numpy.mean', 'np.mean', (['total_spl'], {}), '(total_spl)\n', (25383, 25394), True, 'import numpy as np\n'), ((25412, 25431), 'numpy.mean', 'np.mean', (['total_dist'], {}), '(total_dist)\n', (25419, 25431), True, 'import numpy as np\n'), ((1245, 1267), 'torch.ones', 'torch.ones', (['num_scenes'], {}), '(num_scenes)\n', (1255, 1267), False, 'import torch\n'), ((3572, 3615), 'torch.zeros', 'torch.zeros', (['num_scenes', 'nc', 'full_w', 'full_h'], {}), '(num_scenes, nc, full_w, full_h)\n', (3583, 3615), False, 'import torch\n'), ((3651, 3696), 'torch.zeros', 'torch.zeros', (['num_scenes', 'nc', 'local_w', 'local_h'], {}), '(num_scenes, nc, local_w, local_h)\n', (3662, 3696), False, 'import torch\n'), ((3795, 3821), 'torch.zeros', 'torch.zeros', (['num_scenes', '(3)'], {}), '(num_scenes, 3)\n', (3806, 3821), False, 'import torch\n'), ((3858, 3884), 'torch.zeros', 'torch.zeros', (['num_scenes', '(3)'], {}), '(num_scenes, 3)\n', (3869, 3884), False, 'import torch\n'), ((23973, 23999), 'numpy.mean', 'np.mean', (['g_episode_rewards'], {}), '(g_episode_rewards)\n', (23980, 23999), True, 'import numpy as np\n'), ((24136, 24175), 'os.path.join', 'os.path.join', (['log_dir', '"""model_best.pth"""'], {}), "(log_dir, 'model_best.pth')\n", (24148, 24175), False, 'import os\n'), ((21139, 21181), 'time.strftime', 'time.strftime', (['"""%Hh %Mm %Ss"""', 'time_elapsed'], {}), "('%Hh %Mm %Ss', time_elapsed)\n", (21152, 21181), False, 'import time\n'), ((22653, 22675), 'numpy.mean', 'np.mean', (['total_success'], {}), '(total_success)\n', (22660, 22675), True, 'import numpy as np\n'), ((22701, 22719), 'numpy.mean', 'np.mean', (['total_spl'], {}), '(total_spl)\n', (22708, 22719), True, 'import numpy as np\n'), ((22745, 22764), 'numpy.mean', 'np.mean', (['total_dist'], {}), '(total_dist)\n', (22752, 22764), True, 'import numpy as np\n'), ((23017, 23041), 'numpy.mean', 'np.mean', (['episode_success'], {}), '(episode_success)\n', (23024, 23041), True, 'import numpy as np\n'), ((23067, 23087), 'numpy.mean', 'np.mean', (['episode_spl'], {}), '(episode_spl)\n', (23074, 23087), True, 'import numpy as np\n'), ((23113, 23134), 'numpy.mean', 'np.mean', (['episode_dist'], {}), '(episode_dist)\n', (23120, 23134), True, 'import numpy as np\n'), ((7453, 7481), 'torch.from_numpy', 'torch.from_numpy', (['origins[e]'], {}), '(origins[e])\n', (7469, 7481), False, 'import torch\n'), ((18145, 18167), 'torch.ones', 'torch.ones', (['num_scenes'], {}), '(num_scenes)\n', (18155, 18167), False, 'import torch\n'), ((21567, 21594), 'numpy.mean', 'np.mean', (['per_step_g_rewards'], {}), '(per_step_g_rewards)\n', (21574, 21594), True, 'import numpy as np\n'), ((21620, 21649), 'numpy.median', 'np.median', (['per_step_g_rewards'], {}), '(per_step_g_rewards)\n', (21629, 21649), True, 'import numpy as np\n'), ((21796, 21822), 'numpy.mean', 'np.mean', (['g_episode_rewards'], {}), '(g_episode_rewards)\n', (21803, 21822), True, 'import numpy as np\n'), ((21848, 21876), 'numpy.median', 'np.median', (['g_episode_rewards'], {}), '(g_episode_rewards)\n', (21857, 21876), True, 'import numpy as np\n'), ((21902, 21927), 'numpy.min', 'np.min', (['g_episode_rewards'], {}), '(g_episode_rewards)\n', (21908, 21927), True, 'import numpy as np\n'), ((21953, 21978), 'numpy.max', 'np.max', (['g_episode_rewards'], {}), '(g_episode_rewards)\n', (21959, 21978), True, 'import numpy as np\n'), ((23435, 23458), 'numpy.mean', 'np.mean', (['g_value_losses'], {}), '(g_value_losses)\n', (23442, 23458), True, 'import numpy as np\n'), ((23484, 23508), 'numpy.mean', 'np.mean', (['g_action_losses'], {}), '(g_action_losses)\n', (23491, 23508), True, 'import numpy as np\n'), ((23534, 23559), 'numpy.mean', 'np.mean', (['g_dist_entropies'], {}), '(g_dist_entropies)\n', (23541, 23559), True, 'import numpy as np\n'), ((6384, 6412), 'torch.from_numpy', 'torch.from_numpy', (['origins[e]'], {}), '(origins[e])\n', (6400, 6412), False, 'import torch\n'), ((15649, 15677), 'torch.from_numpy', 'torch.from_numpy', (['origins[e]'], {}), '(origins[e])\n', (15665, 15677), False, 'import torch\n'), ((16707, 16735), 'torch.from_numpy', 'torch.from_numpy', (['origins[e]'], {}), '(origins[e])\n', (16723, 16735), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v0449gRpc.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0fv0449gRpc.proto\x12\tv0449gRpc\"\x1b\n\x0b\x64\x61taRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1a\n\x08\x64\x61ta2Plc\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1f\n\x0cslaveReq2Plc\x12\x0f\n\x07request\x18\x01 \x01(\x05\"\x1a\n\x08\x64\x61ta2Hmi\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1b\n\ndata2PlcJs\x12\r\n\x05jsSer\x18\x01 \x01(\t\"\x1b\n\ndata2HmiJs\x12\r\n\x05jsSer\x18\x01 \x01(\t\"\x1c\n\ndata2PlcPb\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1d\n\ndataAnswer\x12\x0f\n\x07message\x18\x01 \x01(\t2\x93\x01\n\x0cv0449gRpcSvc\x12=\n\x0bxchRtDataJs\x12\x15.v0449gRpc.data2PlcJs\x1a\x15.v0449gRpc.data2HmiJs\"\x00\x12\x44\n\x10xchRtDataJsSlave\x12\x17.v0449gRpc.slaveReq2Plc\x1a\x15.v0449gRpc.data2HmiJs\"\x00\x62\x06proto3')
_DATAREQUEST = DESCRIPTOR.message_types_by_name['dataRequest']
_DATA2PLC = DESCRIPTOR.message_types_by_name['data2Plc']
_SLAVEREQ2PLC = DESCRIPTOR.message_types_by_name['slaveReq2Plc']
_DATA2HMI = DESCRIPTOR.message_types_by_name['data2Hmi']
_DATA2PLCJS = DESCRIPTOR.message_types_by_name['data2PlcJs']
_DATA2HMIJS = DESCRIPTOR.message_types_by_name['data2HmiJs']
_DATA2PLCPB = DESCRIPTOR.message_types_by_name['data2PlcPb']
_DATAANSWER = DESCRIPTOR.message_types_by_name['dataAnswer']
dataRequest = _reflection.GeneratedProtocolMessageType('dataRequest', (_message.Message,), {
'DESCRIPTOR' : _DATAREQUEST,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.dataRequest)
})
_sym_db.RegisterMessage(dataRequest)
data2Plc = _reflection.GeneratedProtocolMessageType('data2Plc', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLC,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2Plc)
})
_sym_db.RegisterMessage(data2Plc)
slaveReq2Plc = _reflection.GeneratedProtocolMessageType('slaveReq2Plc', (_message.Message,), {
'DESCRIPTOR' : _SLAVEREQ2PLC,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.slaveReq2Plc)
})
_sym_db.RegisterMessage(slaveReq2Plc)
data2Hmi = _reflection.GeneratedProtocolMessageType('data2Hmi', (_message.Message,), {
'DESCRIPTOR' : _DATA2HMI,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2Hmi)
})
_sym_db.RegisterMessage(data2Hmi)
data2PlcJs = _reflection.GeneratedProtocolMessageType('data2PlcJs', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLCJS,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2PlcJs)
})
_sym_db.RegisterMessage(data2PlcJs)
data2HmiJs = _reflection.GeneratedProtocolMessageType('data2HmiJs', (_message.Message,), {
'DESCRIPTOR' : _DATA2HMIJS,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2HmiJs)
})
_sym_db.RegisterMessage(data2HmiJs)
data2PlcPb = _reflection.GeneratedProtocolMessageType('data2PlcPb', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLCPB,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2PlcPb)
})
_sym_db.RegisterMessage(data2PlcPb)
dataAnswer = _reflection.GeneratedProtocolMessageType('dataAnswer', (_message.Message,), {
'DESCRIPTOR' : _DATAANSWER,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.dataAnswer)
})
_sym_db.RegisterMessage(dataAnswer)
_V0449GRPCSVC = DESCRIPTOR.services_by_name['v0449gRpcSvc']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DATAREQUEST._serialized_start=30
_DATAREQUEST._serialized_end=57
_DATA2PLC._serialized_start=59
_DATA2PLC._serialized_end=85
_SLAVEREQ2PLC._serialized_start=87
_SLAVEREQ2PLC._serialized_end=118
_DATA2HMI._serialized_start=120
_DATA2HMI._serialized_end=146
_DATA2PLCJS._serialized_start=148
_DATA2PLCJS._serialized_end=175
_DATA2HMIJS._serialized_start=177
_DATA2HMIJS._serialized_end=204
_DATA2PLCPB._serialized_start=206
_DATA2PLCPB._serialized_end=234
_DATAANSWER._serialized_start=236
_DATAANSWER._serialized_end=265
_V0449GRPCSVC._serialized_start=268
_V0449GRPCSVC._serialized_end=415
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.descriptor_pool.Default",
"google.protobuf.reflection.GeneratedProtocolMessageType",
"google.protobuf.symbol_database.Default"
]
| [((478, 504), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (502, 504), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1803, 1944), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""dataRequest"""', '(_message.Message,)', "{'DESCRIPTOR': _DATAREQUEST, '__module__': 'v0449gRpc_pb2'}"], {}), "('dataRequest', (_message.Message,),\n {'DESCRIPTOR': _DATAREQUEST, '__module__': 'v0449gRpc_pb2'})\n", (1843, 1944), True, 'from google.protobuf import reflection as _reflection\n'), ((2064, 2200), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""data2Plc"""', '(_message.Message,)', "{'DESCRIPTOR': _DATA2PLC, '__module__': 'v0449gRpc_pb2'}"], {}), "('data2Plc', (_message.Message,), {\n 'DESCRIPTOR': _DATA2PLC, '__module__': 'v0449gRpc_pb2'})\n", (2104, 2200), True, 'from google.protobuf import reflection as _reflection\n'), ((2317, 2461), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""slaveReq2Plc"""', '(_message.Message,)', "{'DESCRIPTOR': _SLAVEREQ2PLC, '__module__': 'v0449gRpc_pb2'}"], {}), "('slaveReq2Plc', (_message.Message,\n ), {'DESCRIPTOR': _SLAVEREQ2PLC, '__module__': 'v0449gRpc_pb2'})\n", (2357, 2461), True, 'from google.protobuf import reflection as _reflection\n'), ((2582, 2718), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""data2Hmi"""', '(_message.Message,)', "{'DESCRIPTOR': _DATA2HMI, '__module__': 'v0449gRpc_pb2'}"], {}), "('data2Hmi', (_message.Message,), {\n 'DESCRIPTOR': _DATA2HMI, '__module__': 'v0449gRpc_pb2'})\n", (2622, 2718), True, 'from google.protobuf import reflection as _reflection\n'), ((2833, 2972), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""data2PlcJs"""', '(_message.Message,)', "{'DESCRIPTOR': _DATA2PLCJS, '__module__': 'v0449gRpc_pb2'}"], {}), "('data2PlcJs', (_message.Message,),\n {'DESCRIPTOR': _DATA2PLCJS, '__module__': 'v0449gRpc_pb2'})\n", (2873, 2972), True, 'from google.protobuf import reflection as _reflection\n'), ((3092, 3231), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""data2HmiJs"""', '(_message.Message,)', "{'DESCRIPTOR': _DATA2HMIJS, '__module__': 'v0449gRpc_pb2'}"], {}), "('data2HmiJs', (_message.Message,),\n {'DESCRIPTOR': _DATA2HMIJS, '__module__': 'v0449gRpc_pb2'})\n", (3132, 3231), True, 'from google.protobuf import reflection as _reflection\n'), ((3351, 3490), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""data2PlcPb"""', '(_message.Message,)', "{'DESCRIPTOR': _DATA2PLCPB, '__module__': 'v0449gRpc_pb2'}"], {}), "('data2PlcPb', (_message.Message,),\n {'DESCRIPTOR': _DATA2PLCPB, '__module__': 'v0449gRpc_pb2'})\n", (3391, 3490), True, 'from google.protobuf import reflection as _reflection\n'), ((3610, 3749), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""dataAnswer"""', '(_message.Message,)', "{'DESCRIPTOR': _DATAANSWER, '__module__': 'v0449gRpc_pb2'}"], {}), "('dataAnswer', (_message.Message,),\n {'DESCRIPTOR': _DATAANSWER, '__module__': 'v0449gRpc_pb2'})\n", (3650, 3749), True, 'from google.protobuf import reflection as _reflection\n'), ((522, 548), 'google.protobuf.descriptor_pool.Default', '_descriptor_pool.Default', ([], {}), '()\n', (546, 548), True, 'from google.protobuf import descriptor_pool as _descriptor_pool\n')] |
import datetime
from django.core.management import call_command
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from resources_portal.management.commands.populate_dev_database import populate_dev_database
from resources_portal.models import Material, Organization, User
class SearchMaterialsEndpointTestCase(APITestCase):
"""
Tests /search/materials operations.
"""
@classmethod
def setUpClass(cls):
super(SearchMaterialsEndpointTestCase, cls).setUpClass()
populate_dev_database()
# Put newly created materials in the search index
call_command("search_index", "-f", "--rebuild")
cls.primary_prof = User.objects.get(username="PrimaryProf")
cls.secondary_prof = User.objects.get(username="SecondaryProf")
cls.post_doc = User.objects.get(username="PostDoc")
cls.primary_lab = Organization.objects.get(name="PrimaryLab")
cls.material1 = Material.objects.get(title="Melanoma Reduction Plasmid")
cls.material2 = Material.objects.get(title="Allele Extraction Protocol")
@classmethod
def tearDownClass(cls):
super(SearchMaterialsEndpointTestCase, cls).tearDownClass()
# Rebuild search index with what's actaully in the django database
call_command("search_index", "-f", "--rebuild")
def test_search_for_title_finds_a_given_material(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?search=" + self.material1.title
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_result_id = int(response.json()["results"][0]["id"])
self.assertEqual(first_result_id, self.material1.id)
def test_filter_on_organization_retrieves_all_organization_materials(self):
# Archive one material to make sure it goes to the bottom of the list.
archived_material = Material.objects.first()
archived_material.is_archived = True
archived_material.save()
self.client.force_authenticate(user=self.primary_prof)
search_url = (
reverse("search-materials-list")
+ "?organization="
+ self.primary_lab.name
+ "&limit=25"
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_json = response.json()
material_count = int(response_json["count"])
# Make sure archived materials are last:
self.assertEqual(response_json["results"][-1]["id"], archived_material.id)
material_titles = []
for material in response_json["results"]:
material_titles.append(material["title"])
self.assertEqual(material_count, len(self.primary_lab.materials.all()))
for title in material_titles:
self.assertTrue(
Material.objects.filter(title=title, organization=self.primary_lab).exists()
)
def test_filter_on_category_retrieves_all_materials_of_a_given_category(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?category=" + "MODEL_ORGANISM"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_count = int(response.json()["count"])
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
self.assertEqual(material_count, len(Material.objects.filter(category="MODEL_ORGANISM")))
for title in material_titles:
self.assertTrue(
Material.objects.filter(title=title, category="MODEL_ORGANISM").exists()
)
def test_filter_on_organisms_retrieves_all_materials_with_one_organism(self):
self.client.force_authenticate(user=self.primary_prof)
# Search with one organism name
search_url = reverse("search-materials-list") + "?organisms=" + "danio rerio"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organism_count = int(response.json()["count"])
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
database_organism_count = 0
database_titles = []
for material in Material.objects.all():
if material.organisms:
if "Danio rerio" in material.organisms:
database_organism_count += 1
database_titles.append(material.title)
self.assertEqual(organism_count, database_organism_count)
for title in material_titles:
self.assertTrue(title in database_titles)
def test_filter_on_organisms_retrieves_all_materials_with_multiple_organisms(self):
self.client.force_authenticate(user=self.primary_prof)
# Search with one organism name
search_url = (
reverse("search-materials-list")
+ "?organisms="
+ "danio rerio"
+ "&organisms="
+ "mus musculus"
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organism_count = int(response.json()["count"])
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
database_organism_count = 0
database_titles = []
for material in Material.objects.all():
if material.organisms:
if ("Danio rerio" in material.organisms) or ("Mus musculus" in material.organisms):
database_organism_count += 1
database_titles.append(material.title)
self.assertEqual(organism_count, database_organism_count)
for title in material_titles:
self.assertTrue(title in database_titles)
def test_ordering_on_updated_at_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?ordering=" + "updated_at"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_dates = []
for material in response.json()["results"]:
date = datetime.datetime.strptime(
material["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z"
).date()
material_dates.append(date)
self.assertEqual(material_dates, sorted(material_dates))
def test_combine_search_and_filter_and_ordering_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = (
reverse("search-materials-list")
+ "?search=MODEL_ORGANISM"
+ "ordering=updated_at"
+ "has_pre_print=true"
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_dates = []
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
date = datetime.datetime.strptime(
material["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z"
).date()
material_dates.append(date)
self.assertEqual(material_dates, sorted(material_dates))
for title in material_titles:
self.assertTrue(
Material.objects.filter(
title=title, category="MODEL_ORGANISM", has_pre_print=True
).exists()
)
def test_facets_return_number_of_materials(self):
self.client.force_authenticate(user=self.primary_prof)
# Search with no params
search_url = reverse("search-materials-list")
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
model_organism_count = int(response.json()["facets"]["category"]["MODEL_ORGANISM"])
self.assertEqual(
model_organism_count, len(Material.objects.filter(category="MODEL_ORGANISM"))
)
# Search for only danio rerio organisms
search_url = reverse("search-materials-list") + "?search=danio rerio"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
model_organism_count = int(response.json()["facets"]["category"]["MODEL_ORGANISM"])
database_count = 0
for material in Material.objects.all():
if material.organisms:
if ("Danio rerio" in material.organisms) and (
material.category == "MODEL_ORGANISM"
):
database_count += 1
self.assertEqual(model_organism_count, database_count)
def test_empty_search_returns_no_results(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?search="
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_count = int(response.json()["count"])
self.assertEqual(material_count, 0)
class SearchUsersEndpointTestCase(APITestCase):
"""
Tests /search/users operations.
"""
@classmethod
def setUpClass(cls):
super(SearchUsersEndpointTestCase, cls).setUpClass()
populate_dev_database()
# Put newly created materials in the search index
call_command("search_index", "-f", "--rebuild")
cls.primary_prof = User.objects.get(username="PrimaryProf")
@classmethod
def tearDownClass(cls):
super(SearchUsersEndpointTestCase, cls).tearDownClass()
# Rebuild search index with what's actaully in the django database
call_command("search_index", "-f", "--rebuild")
def test_search_for_name_returns_given_user(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = (
reverse("search-users-list")
+ "?search="
+ self.primary_prof.first_name
+ " "
+ self.primary_prof.last_name
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_result_id = response.json()["results"][0]["id"]
self.assertEqual(first_result_id, str(self.primary_prof.id))
def test_order_by_published_name_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-users-list") + "?ordering=published_name"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_published_names = []
for user in response.json()["results"]:
if user["published_name"]:
user_published_names.append(user["published_name"])
self.assertEqual(user_published_names, sorted(user_published_names))
def test_empty_search_returns_no_results(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-users-list") + "?search="
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_count = int(response.json()["count"])
self.assertEqual(user_count, 0)
class SearchOrganizationsEndpointTestCase(APITestCase):
"""
Tests /search/organizations operations.
"""
@classmethod
def setUpClass(cls):
super(SearchOrganizationsEndpointTestCase, cls).setUpClass()
populate_dev_database()
# Put newly created materials in the search index
call_command("search_index", "-f", "--rebuild")
cls.primary_prof = User.objects.get(username="PrimaryProf")
cls.primary_lab = Organization.objects.get(name="PrimaryLab")
@classmethod
def tearDownClass(cls):
super(SearchOrganizationsEndpointTestCase, cls).tearDownClass()
# Rebuild search index with what's actaully in the django database
call_command("search_index", "-f", "--rebuild")
def test_search_for_organization_name_returns_given_organization(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-organizations-list") + "?search=" + self.primary_lab.name
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_result_id = int(response.json()["results"][0]["id"])
self.assertEqual(first_result_id, self.primary_lab.id)
def test_search_for_owner_attribute_returns_related_organizations(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-organizations-list") + "?search=" + self.primary_prof.email
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organization_count = int(response.json()["count"])
organization_names = []
for org in response.json()["results"]:
organization_names.append(org["name"])
self.assertEqual(
organization_count, len(Organization.objects.filter(owner=self.primary_prof))
)
for name in organization_names:
self.assertTrue(
Organization.objects.filter(name=name, owner=self.primary_prof).exists()
)
def test_ordering_on_updated_at_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-organizations-list") + "?ordering=" + "updated_at"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organization_dates = []
for org in response.json()["results"]:
date = datetime.datetime.strptime(org["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z").date()
organization_dates.append(date)
self.assertEqual(organization_dates, sorted(organization_dates))
| [
"resources_portal.models.Material.objects.filter",
"django.core.management.call_command",
"datetime.datetime.strptime",
"resources_portal.models.Organization.objects.filter",
"resources_portal.models.Organization.objects.get",
"resources_portal.models.Material.objects.get",
"resources_portal.models.Material.objects.all",
"django.urls.reverse",
"resources_portal.management.commands.populate_dev_database.populate_dev_database",
"resources_portal.models.Material.objects.first",
"resources_portal.models.User.objects.get"
]
| [((561, 584), 'resources_portal.management.commands.populate_dev_database.populate_dev_database', 'populate_dev_database', ([], {}), '()\n', (582, 584), False, 'from resources_portal.management.commands.populate_dev_database import populate_dev_database\n'), ((652, 699), 'django.core.management.call_command', 'call_command', (['"""search_index"""', '"""-f"""', '"""--rebuild"""'], {}), "('search_index', '-f', '--rebuild')\n", (664, 699), False, 'from django.core.management import call_command\n'), ((728, 768), 'resources_portal.models.User.objects.get', 'User.objects.get', ([], {'username': '"""PrimaryProf"""'}), "(username='PrimaryProf')\n", (744, 768), False, 'from resources_portal.models import Material, Organization, User\n'), ((798, 840), 'resources_portal.models.User.objects.get', 'User.objects.get', ([], {'username': '"""SecondaryProf"""'}), "(username='SecondaryProf')\n", (814, 840), False, 'from resources_portal.models import Material, Organization, User\n'), ((864, 900), 'resources_portal.models.User.objects.get', 'User.objects.get', ([], {'username': '"""PostDoc"""'}), "(username='PostDoc')\n", (880, 900), False, 'from resources_portal.models import Material, Organization, User\n'), ((928, 971), 'resources_portal.models.Organization.objects.get', 'Organization.objects.get', ([], {'name': '"""PrimaryLab"""'}), "(name='PrimaryLab')\n", (952, 971), False, 'from resources_portal.models import Material, Organization, User\n'), ((997, 1053), 'resources_portal.models.Material.objects.get', 'Material.objects.get', ([], {'title': '"""Melanoma Reduction Plasmid"""'}), "(title='Melanoma Reduction Plasmid')\n", (1017, 1053), False, 'from resources_portal.models import Material, Organization, User\n'), ((1078, 1134), 'resources_portal.models.Material.objects.get', 'Material.objects.get', ([], {'title': '"""Allele Extraction Protocol"""'}), "(title='Allele Extraction Protocol')\n", (1098, 1134), False, 'from resources_portal.models import Material, Organization, User\n'), ((1333, 1380), 'django.core.management.call_command', 'call_command', (['"""search_index"""', '"""-f"""', '"""--rebuild"""'], {}), "('search_index', '-f', '--rebuild')\n", (1345, 1380), False, 'from django.core.management import call_command\n'), ((2029, 2053), 'resources_portal.models.Material.objects.first', 'Material.objects.first', ([], {}), '()\n', (2051, 2053), False, 'from resources_portal.models import Material, Organization, User\n'), ((4583, 4605), 'resources_portal.models.Material.objects.all', 'Material.objects.all', ([], {}), '()\n', (4603, 4605), False, 'from resources_portal.models import Material, Organization, User\n'), ((5747, 5769), 'resources_portal.models.Material.objects.all', 'Material.objects.all', ([], {}), '()\n', (5767, 5769), False, 'from resources_portal.models import Material, Organization, User\n'), ((8051, 8083), 'django.urls.reverse', 'reverse', (['"""search-materials-list"""'], {}), "('search-materials-list')\n", (8058, 8083), False, 'from django.urls import reverse\n'), ((8806, 8828), 'resources_portal.models.Material.objects.all', 'Material.objects.all', ([], {}), '()\n', (8826, 8828), False, 'from resources_portal.models import Material, Organization, User\n'), ((9724, 9747), 'resources_portal.management.commands.populate_dev_database.populate_dev_database', 'populate_dev_database', ([], {}), '()\n', (9745, 9747), False, 'from resources_portal.management.commands.populate_dev_database import populate_dev_database\n'), ((9815, 9862), 'django.core.management.call_command', 'call_command', (['"""search_index"""', '"""-f"""', '"""--rebuild"""'], {}), "('search_index', '-f', '--rebuild')\n", (9827, 9862), False, 'from django.core.management import call_command\n'), ((9891, 9931), 'resources_portal.models.User.objects.get', 'User.objects.get', ([], {'username': '"""PrimaryProf"""'}), "(username='PrimaryProf')\n", (9907, 9931), False, 'from resources_portal.models import Material, Organization, User\n'), ((10126, 10173), 'django.core.management.call_command', 'call_command', (['"""search_index"""', '"""-f"""', '"""--rebuild"""'], {}), "('search_index', '-f', '--rebuild')\n", (10138, 10173), False, 'from django.core.management import call_command\n'), ((11951, 11974), 'resources_portal.management.commands.populate_dev_database.populate_dev_database', 'populate_dev_database', ([], {}), '()\n', (11972, 11974), False, 'from resources_portal.management.commands.populate_dev_database import populate_dev_database\n'), ((12042, 12089), 'django.core.management.call_command', 'call_command', (['"""search_index"""', '"""-f"""', '"""--rebuild"""'], {}), "('search_index', '-f', '--rebuild')\n", (12054, 12089), False, 'from django.core.management import call_command\n'), ((12118, 12158), 'resources_portal.models.User.objects.get', 'User.objects.get', ([], {'username': '"""PrimaryProf"""'}), "(username='PrimaryProf')\n", (12134, 12158), False, 'from resources_portal.models import Material, Organization, User\n'), ((12185, 12228), 'resources_portal.models.Organization.objects.get', 'Organization.objects.get', ([], {'name': '"""PrimaryLab"""'}), "(name='PrimaryLab')\n", (12209, 12228), False, 'from resources_portal.models import Material, Organization, User\n'), ((12431, 12478), 'django.core.management.call_command', 'call_command', (['"""search_index"""', '"""-f"""', '"""--rebuild"""'], {}), "('search_index', '-f', '--rebuild')\n", (12443, 12478), False, 'from django.core.management import call_command\n'), ((8489, 8521), 'django.urls.reverse', 'reverse', (['"""search-materials-list"""'], {}), "('search-materials-list')\n", (8496, 8521), False, 'from django.urls import reverse\n'), ((9247, 9279), 'django.urls.reverse', 'reverse', (['"""search-materials-list"""'], {}), "('search-materials-list')\n", (9254, 9279), False, 'from django.urls import reverse\n'), ((10883, 10911), 'django.urls.reverse', 'reverse', (['"""search-users-list"""'], {}), "('search-users-list')\n", (10890, 10911), False, 'from django.urls import reverse\n'), ((11462, 11490), 'django.urls.reverse', 'reverse', (['"""search-users-list"""'], {}), "('search-users-list')\n", (11469, 11490), False, 'from django.urls import reverse\n'), ((1527, 1559), 'django.urls.reverse', 'reverse', (['"""search-materials-list"""'], {}), "('search-materials-list')\n", (1534, 1559), False, 'from django.urls import reverse\n'), ((3269, 3301), 'django.urls.reverse', 'reverse', (['"""search-materials-list"""'], {}), "('search-materials-list')\n", (3276, 3301), False, 'from django.urls import reverse\n'), ((3689, 3739), 'resources_portal.models.Material.objects.filter', 'Material.objects.filter', ([], {'category': '"""MODEL_ORGANISM"""'}), "(category='MODEL_ORGANISM')\n", (3712, 3739), False, 'from resources_portal.models import Material, Organization, User\n'), ((4121, 4153), 'django.urls.reverse', 'reverse', (['"""search-materials-list"""'], {}), "('search-materials-list')\n", (4128, 4153), False, 'from django.urls import reverse\n'), ((6312, 6344), 'django.urls.reverse', 'reverse', (['"""search-materials-list"""'], {}), "('search-materials-list')\n", (6319, 6344), False, 'from django.urls import reverse\n'), ((8357, 8407), 'resources_portal.models.Material.objects.filter', 'Material.objects.filter', ([], {'category': '"""MODEL_ORGANISM"""'}), "(category='MODEL_ORGANISM')\n", (8380, 8407), False, 'from resources_portal.models import Material, Organization, User\n'), ((12641, 12677), 'django.urls.reverse', 'reverse', (['"""search-organizations-list"""'], {}), "('search-organizations-list')\n", (12648, 12677), False, 'from django.urls import reverse\n'), ((13125, 13161), 'django.urls.reverse', 'reverse', (['"""search-organizations-list"""'], {}), "('search-organizations-list')\n", (13132, 13161), False, 'from django.urls import reverse\n'), ((13570, 13622), 'resources_portal.models.Organization.objects.filter', 'Organization.objects.filter', ([], {'owner': 'self.primary_prof'}), '(owner=self.primary_prof)\n', (13597, 13622), False, 'from resources_portal.models import Material, Organization, User\n'), ((13945, 13981), 'django.urls.reverse', 'reverse', (['"""search-organizations-list"""'], {}), "('search-organizations-list')\n", (13952, 13981), False, 'from django.urls import reverse\n'), ((2232, 2264), 'django.urls.reverse', 'reverse', (['"""search-materials-list"""'], {}), "('search-materials-list')\n", (2239, 2264), False, 'from django.urls import reverse\n'), ((6590, 6666), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["material['updated_at']", '"""%Y-%m-%dT%H:%M:%S.%f%z"""'], {}), "(material['updated_at'], '%Y-%m-%dT%H:%M:%S.%f%z')\n", (6616, 6666), False, 'import datetime\n'), ((6978, 7010), 'django.urls.reverse', 'reverse', (['"""search-materials-list"""'], {}), "('search-materials-list')\n", (6985, 7010), False, 'from django.urls import reverse\n'), ((7430, 7506), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["material['updated_at']", '"""%Y-%m-%dT%H:%M:%S.%f%z"""'], {}), "(material['updated_at'], '%Y-%m-%dT%H:%M:%S.%f%z')\n", (7456, 7506), False, 'import datetime\n'), ((14226, 14297), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["org['updated_at']", '"""%Y-%m-%dT%H:%M:%S.%f%z"""'], {}), "(org['updated_at'], '%Y-%m-%dT%H:%M:%S.%f%z')\n", (14252, 14297), False, 'import datetime\n'), ((3009, 3076), 'resources_portal.models.Material.objects.filter', 'Material.objects.filter', ([], {'title': 'title', 'organization': 'self.primary_lab'}), '(title=title, organization=self.primary_lab)\n', (3032, 3076), False, 'from resources_portal.models import Material, Organization, User\n'), ((3826, 3889), 'resources_portal.models.Material.objects.filter', 'Material.objects.filter', ([], {'title': 'title', 'category': '"""MODEL_ORGANISM"""'}), "(title=title, category='MODEL_ORGANISM')\n", (3849, 3889), False, 'from resources_portal.models import Material, Organization, User\n'), ((5194, 5226), 'django.urls.reverse', 'reverse', (['"""search-materials-list"""'], {}), "('search-materials-list')\n", (5201, 5226), False, 'from django.urls import reverse\n'), ((7734, 7821), 'resources_portal.models.Material.objects.filter', 'Material.objects.filter', ([], {'title': 'title', 'category': '"""MODEL_ORGANISM"""', 'has_pre_print': '(True)'}), "(title=title, category='MODEL_ORGANISM',\n has_pre_print=True)\n", (7757, 7821), False, 'from resources_portal.models import Material, Organization, User\n'), ((10329, 10357), 'django.urls.reverse', 'reverse', (['"""search-users-list"""'], {}), "('search-users-list')\n", (10336, 10357), False, 'from django.urls import reverse\n'), ((13720, 13783), 'resources_portal.models.Organization.objects.filter', 'Organization.objects.filter', ([], {'name': 'name', 'owner': 'self.primary_prof'}), '(name=name, owner=self.primary_prof)\n', (13747, 13783), False, 'from resources_portal.models import Material, Organization, User\n')] |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Util for log module. """
import logging
_LOGGER = logging.getLogger('MA')
def _find_caller():
"""
Bind findCaller() method, which is used to find the stack frame of the
caller so that we can note the source file name, line number and
function name.
"""
return _LOGGER.findCaller()
class LogUtil:
"""
Logging module.
Raises:
SyntaxError: If create this class.
"""
_instance = None
_logger = None
_extra_fmt = ' [%s] [%s] '
def __init__(self):
raise SyntaxError('can not instance, please use get_instance.')
@staticmethod
def get_instance():
"""
Get instance of class `LogUtil`.
Returns:
Object, instance of class `LogUtil`.
"""
if LogUtil._instance is None:
LogUtil._instance = object.__new__(LogUtil)
LogUtil._logger = _LOGGER
LogUtil._init_logger()
return LogUtil._instance
@staticmethod
def _init_logger():
"""
Initialize logger.
"""
LogUtil._logger.setLevel(logging.WARNING)
log_fmt = '[%(levelname)s] %(name)s(%(process)d:%(thread)d,' \
'%(processName)s):%(asctime)s%(message)s'
log_fmt = logging.Formatter(log_fmt)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_fmt)
# add the handlers to the logger
LogUtil._logger.handlers = []
LogUtil._logger.addHandler(console_handler)
LogUtil._logger.propagate = False
def set_level(self, level):
"""
Set the logging level of this logger, level must be an integer or a
string. Supported levels are 'NOTSET'(integer: 0), 'ERROR'(integer: 1-40),
'WARNING'('WARN', integer: 1-30), 'INFO'(integer: 1-20) and 'DEBUG'(integer: 1-10).
For example, if logger.set_level('WARNING') or logger.set_level(21), then
logger.warn() and logger.error() in scripts would be printed while running,
while logger.info() or logger.debug() would not be printed.
Args:
level (Union[int, str]): Level of logger.
"""
self._logger.setLevel(level)
def add_handler(self, handler):
"""
Add other handler supported by logging module.
Args:
handler (logging.Handler): Other handler supported by logging module.
Raises:
ValueError: If handler is not an instance of logging.Handler.
"""
if isinstance(handler, logging.Handler):
self._logger.addHandler(handler)
else:
raise ValueError('handler must be an instance of logging.Handler,'
' but got {}'.format(type(handler)))
def debug(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'DEBUG'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.debug(self._extra_fmt + msg, file_info, tag, *args)
def info(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'INFO'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.info(self._extra_fmt + msg, file_info, tag, *args)
def warn(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'WARNING'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.warning(self._extra_fmt + msg, file_info, tag, *args)
def error(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'ERROR'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.error(self._extra_fmt + msg, file_info, tag, *args)
| [
"logging.getLogger",
"logging.Formatter",
"logging.StreamHandler"
]
| [((643, 666), 'logging.getLogger', 'logging.getLogger', (['"""MA"""'], {}), "('MA')\n", (660, 666), False, 'import logging\n'), ((1846, 1872), 'logging.Formatter', 'logging.Formatter', (['log_fmt'], {}), '(log_fmt)\n', (1863, 1872), False, 'import logging\n'), ((1957, 1980), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1978, 1980), False, 'import logging\n')] |
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from datetime import datetime
from typing import Dict, Generator
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
from airbyte_cdk.sources import Source
class SourceScaffoldSourcePython(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the integration
e.g: if a provided Stripe API token can be used to connect to the Stripe API.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
# Not Implemented
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {str(e)}")
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
"""
Returns an AirbyteCatalog representing the available streams and fields in this integration.
For example, given valid credentials to a Postgres database,
returns an Airbyte catalog where each postgres table is a stream, and each table column is a field.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteCatalog is an object describing a list of all available streams in this source.
A stream is an AirbyteStream object that includes:
- its stream name (or table name in the case of Postgres)
- json_schema providing the specifications of expected schema for this stream (a list of columns described
by their names and types)
"""
streams = []
stream_name = "TableName" # Example
json_schema = { # Example
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {"columnName": {"type": "string"}},
}
# Not Implemented
streams.append(AirbyteStream(name=stream_name, json_schema=json_schema))
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
"""
Returns a generator of the AirbyteMessages generated by reading the source with the given configuration,
catalog, and state.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:param catalog: The input catalog is a ConfiguredAirbyteCatalog which is almost the same as AirbyteCatalog
returned by discover(), but
in addition, it's been configured in the UI! For each particular stream and field, there may have been provided
with extra modifications such as: filtering streams and/or columns out, renaming some entities, etc
:param state: When a Airbyte reads data from a source, it might need to keep a checkpoint cursor to resume
replication in the future from that saved checkpoint.
This is the object that is provided with state from previous runs and avoid replicating the entire set of
data everytime.
:return: A generator that produces a stream of AirbyteRecordMessage contained in AirbyteMessage object.
"""
stream_name = "TableName" # Example
data = {"columnName": "Hello World"} # Example
# Not Implemented
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=stream_name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
| [
"datetime.datetime.now",
"airbyte_cdk.models.AirbyteCatalog",
"airbyte_cdk.models.AirbyteStream",
"airbyte_cdk.models.AirbyteConnectionStatus"
]
| [((3981, 4012), 'airbyte_cdk.models.AirbyteCatalog', 'AirbyteCatalog', ([], {'streams': 'streams'}), '(streams=streams)\n', (3995, 4012), False, 'from airbyte_cdk.models import AirbyteCatalog, AirbyteConnectionStatus, AirbyteMessage, AirbyteRecordMessage, AirbyteStream, ConfiguredAirbyteCatalog, Status, Type\n'), ((2282, 2330), 'airbyte_cdk.models.AirbyteConnectionStatus', 'AirbyteConnectionStatus', ([], {'status': 'Status.SUCCEEDED'}), '(status=Status.SUCCEEDED)\n', (2305, 2330), False, 'from airbyte_cdk.models import AirbyteCatalog, AirbyteConnectionStatus, AirbyteMessage, AirbyteRecordMessage, AirbyteStream, ConfiguredAirbyteCatalog, Status, Type\n'), ((3908, 3964), 'airbyte_cdk.models.AirbyteStream', 'AirbyteStream', ([], {'name': 'stream_name', 'json_schema': 'json_schema'}), '(name=stream_name, json_schema=json_schema)\n', (3921, 3964), False, 'from airbyte_cdk.models import AirbyteCatalog, AirbyteConnectionStatus, AirbyteMessage, AirbyteRecordMessage, AirbyteStream, ConfiguredAirbyteCatalog, Status, Type\n'), ((5790, 5804), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5802, 5804), False, 'from datetime import datetime\n')] |
from datetime import datetime
from typing import Optional, Dict, List, Union
from schema import Schema, Or
from src.monitorables.nodes.node import Node
from src.utils.exceptions import InvalidDictSchemaException
class ChainlinkNode(Node):
def __init__(self, node_name: str, node_id: str, parent_id: str) -> None:
super().__init__(node_name, node_id, parent_id)
# Metrics
self._went_down_at_prometheus = None
self._current_height = None
self._total_block_headers_received = None
self._max_pending_tx_delay = None
self._process_start_time_seconds = None
self._total_gas_bumps = None
self._total_gas_bumps_exceeds_limit = None
self._no_of_unconfirmed_txs = None
self._total_errored_job_runs = None
self._current_gas_price_info = {
'percentile': None,
'price': None,
}
self._eth_balance_info = {}
# This variable stores the url of the source used to get prometheus node
# data. Note that this had to be done because multiple prometheus
# sources can be associated with the same node, where at the same time
# only one source is available, and sources switch from time to time.
self._last_prometheus_source_used = None
# This stores the timestamp of the last successful monitoring round.
self._last_monitored_prometheus = None
@property
def is_down_prometheus(self) -> bool:
return self._went_down_at_prometheus is not None
@property
def went_down_at_prometheus(self) -> Optional[float]:
return self._went_down_at_prometheus
@property
def current_height(self) -> Optional[int]:
return self._current_height
@property
def total_block_headers_received(self) -> Optional[int]:
return self._total_block_headers_received
@property
def max_pending_tx_delay(self) -> Optional[int]:
return self._max_pending_tx_delay
@property
def process_start_time_seconds(self) -> Optional[float]:
return self._process_start_time_seconds
@property
def total_gas_bumps(self) -> Optional[int]:
return self._total_gas_bumps
@property
def total_gas_bumps_exceeds_limit(self) -> Optional[int]:
return self._total_gas_bumps_exceeds_limit
@property
def no_of_unconfirmed_txs(self) -> Optional[int]:
return self._no_of_unconfirmed_txs
@property
def total_errored_job_runs(self) -> Optional[int]:
return self._total_errored_job_runs
@property
def current_gas_price_info(self) -> Dict[str, Optional[float]]:
return self._current_gas_price_info
@property
def eth_balance_info(self) -> Dict[str, Union[str, float]]:
return self._eth_balance_info
@property
def last_prometheus_source_used(self) -> Optional[str]:
return self._last_prometheus_source_used
@property
def last_monitored_prometheus(self) -> Optional[float]:
return self._last_monitored_prometheus
@staticmethod
def get_int_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing integer prometheus
: metrics.
"""
return [
'current_height',
'total_block_headers_received',
'max_pending_tx_delay', 'total_gas_bumps',
'total_gas_bumps_exceeds_limit', 'no_of_unconfirmed_txs',
'total_errored_job_runs'
]
@staticmethod
def get_float_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing float prometheus
: metrics.
"""
return [
'went_down_at_prometheus', 'process_start_time_seconds',
'last_monitored_prometheus'
]
@staticmethod
def get_dict_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing dict prometheus
: metrics.
"""
return ['current_gas_price_info', 'eth_balance_info']
@staticmethod
def get_str_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing string prometheus
: metrics.
"""
return ['last_prometheus_source_used']
def get_all_prometheus_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing prometheus metrics
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [
*str_prometheus_metric_attributes,
*int_prometheus_metric_attributes,
*float_prometheus_metric_attributes,
*dict_prometheus_metric_attributes
]
def get_int_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing int metrics.
"""
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
return [*int_prometheus_metric_attributes]
def get_float_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing float metrics.
"""
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
return [*float_prometheus_metric_attributes]
def get_dict_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing dict metrics.
"""
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [*dict_prometheus_metric_attributes]
def get_str_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing str metrics.
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
return [*str_prometheus_metric_attributes]
def get_all_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing metrics
"""
prometheus_metric_attributes = \
self.get_all_prometheus_metric_attributes()
return [*prometheus_metric_attributes]
def set_went_down_at_prometheus(
self, went_down_at_prometheus: Optional[float]) -> None:
self._went_down_at_prometheus = went_down_at_prometheus
def set_prometheus_as_down(self, downtime: Optional[float]) -> None:
"""
This function sets the node's prometheus interface as down. It sets the
time that the interface was initially down to the parameter 'downtime'
if it is not None, otherwise it sets it to the current timestamp.
:param downtime:
:return:
"""
if downtime is None:
self.set_went_down_at_prometheus(datetime.now().timestamp())
else:
self.set_went_down_at_prometheus(downtime)
def set_prometheus_as_up(self) -> None:
"""
This function sets a node's prometheus interface as up. A node's
interface is said to be up if went_down_at_prometheus is None.
:return: None
"""
self.set_went_down_at_prometheus(None)
def set_current_height(self, new_height: Optional[int]) -> None:
self._current_height = new_height
def set_total_block_headers_received(
self, new_total_block_headers_received: Optional[int]) -> None:
self._total_block_headers_received = new_total_block_headers_received
def set_max_pending_tx_delay(
self, new_max_pending_tx_delay: Optional[int]) -> None:
self._max_pending_tx_delay = new_max_pending_tx_delay
def set_process_start_time_seconds(
self, new_process_start_time_seconds: Optional[float]) -> None:
self._process_start_time_seconds = new_process_start_time_seconds
def set_total_gas_bumps(self, new_total_gas_bumps: Optional[int]) -> None:
self._total_gas_bumps = new_total_gas_bumps
def set_total_gas_bumps_exceeds_limit(
self, new_total_gas_bumps_exceeds_limit: Optional[int]) -> None:
self._total_gas_bumps_exceeds_limit = new_total_gas_bumps_exceeds_limit
def set_no_of_unconfirmed_txs(
self, new_no_of_unconfirmed_txs: Optional[int]) -> None:
self._no_of_unconfirmed_txs = new_no_of_unconfirmed_txs
def set_total_errored_job_runs(
self, new_total_errored_job_runs: Optional[int]) -> None:
self._total_errored_job_runs = new_total_errored_job_runs
def set_current_gas_price_info(self, new_percentile: Optional[float],
new_price: Optional[float]) -> None:
"""
This method sets the current_gas_price_info dict based on the new
percentile and price. This is done in this way to protect the Dict
schema.
:param new_percentile: The new percentile to be stored
:param new_price: The new gas to be stored
:return: None
"""
self._current_gas_price_info['percentile'] = new_percentile
self._current_gas_price_info['price'] = new_price
@staticmethod
def _new_eth_balance_info_valid(new_eth_balance_info: Dict) -> bool:
"""
This method checks that the new eth_balance_info dict obeys the required
schema.
:param new_eth_balance_info: The dict to check
:return: True if the dict obeys the required schema
: False otherwise
"""
schema = Schema(Or({
'address': str,
'balance': float,
'latest_usage': float,
}, {}))
return schema.is_valid(new_eth_balance_info)
def set_eth_balance_info(
self, new_eth_balance_info: Dict[str, Union[str, float]]) -> None:
"""
This method sets the new_eth_balance_info. It first checks that the new
dict obeys the required schema. If not, an InvalidDictSchemaException is
raised.
:param new_eth_balance_info: The new eth_balance_info to store.
:return: None
"""""
if self._new_eth_balance_info_valid(new_eth_balance_info):
self._eth_balance_info = new_eth_balance_info
else:
raise InvalidDictSchemaException('new_eth_balance_info')
def set_last_prometheus_source_used(
self, new_last_prometheus_source_used: Optional[str]) -> None:
self._last_prometheus_source_used = new_last_prometheus_source_used
def set_last_monitored_prometheus(
self, new_last_monitored_prometheus: Optional[float]) -> None:
self._last_monitored_prometheus = new_last_monitored_prometheus
def reset(self) -> None:
"""
This method resets all metrics to their initial state
:return: None
"""
self.set_went_down_at_prometheus(None)
self.set_current_height(None)
self.set_total_block_headers_received(None)
self.set_max_pending_tx_delay(None)
self.set_process_start_time_seconds(None)
self.set_total_gas_bumps(None)
self.set_total_gas_bumps_exceeds_limit(None)
self.set_no_of_unconfirmed_txs(None)
self.set_total_errored_job_runs(None)
self.set_current_gas_price_info(None, None)
self.set_eth_balance_info({})
self.set_last_prometheus_source_used(None)
self.set_last_monitored_prometheus(None)
| [
"src.utils.exceptions.InvalidDictSchemaException",
"datetime.datetime.now",
"schema.Or"
]
| [((10028, 10093), 'schema.Or', 'Or', (["{'address': str, 'balance': float, 'latest_usage': float}", '{}'], {}), "({'address': str, 'balance': float, 'latest_usage': float}, {})\n", (10030, 10093), False, 'from schema import Schema, Or\n'), ((10760, 10810), 'src.utils.exceptions.InvalidDictSchemaException', 'InvalidDictSchemaException', (['"""new_eth_balance_info"""'], {}), "('new_eth_balance_info')\n", (10786, 10810), False, 'from src.utils.exceptions import InvalidDictSchemaException\n'), ((7326, 7340), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7338, 7340), False, 'from datetime import datetime\n')] |
import pickle
import numpy as np
import os
def _analyze_query_point_assignment(
query_data_dict: dict,
init_Rdata_dict: dict,
init_Edata_dict: dict,
num_R: int,
query_point_assignment_array: np.ndarray,
root: str,
n_points_to_copy=50,
):
"""
Analyzes and visualizes qDCA results.
:param query_data_dict: raw query data.
:param init_Rdata_dict: raw R data.
:param init_Edata_dict: raw E data.
:param num_R: total number of R points.
:param query_point_assignment_array: query point assignments results.
:param root: root directory of the experiment.
:param n_points_to_copy: number of images to save.
:return: accuracy of qDCA assignments; list of (R, query) points with same label;
list of (R, query) points with different label
"""
true_query_data_labels = query_data_dict["labels"]
assigned_R = query_point_assignment_array[
query_point_assignment_array[:, 1] < num_R, 1
]
assigned_E = query_point_assignment_array[
query_point_assignment_array[:, 1] >= num_R, 1
]
assigned_R_labels = init_Rdata_dict["labels"][assigned_R]
assigned_E_labels = init_Edata_dict["labels"][assigned_E - num_R]
assigned_query_data_labels = np.empty(
shape=query_point_assignment_array.shape[0]
).astype(np.int32)
assigned_query_data_labels[
query_point_assignment_array[:, 1] < num_R
] = assigned_R_labels
assigned_query_data_labels[
query_point_assignment_array[:, 1] >= num_R
] = assigned_E_labels
accuracy = (
true_query_data_labels == assigned_query_data_labels
).sum() / assigned_query_data_labels.shape[0]
same_label_idx = np.where(true_query_data_labels == assigned_query_data_labels)[0]
wrong_label_idx = np.where(true_query_data_labels != assigned_query_data_labels)[0]
correct_pairs = []
for i in query_point_assignment_array[same_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
wrong_pairs = []
for i in query_point_assignment_array[wrong_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
with open(
os.path.join(root, "logs", "analyzed_query_point_assignments.pkl"), "wb"
) as f:
pickle.dump(
{
"accuracy": accuracy,
"same_label_idx": same_label_idx,
"wrong_label_idx": wrong_label_idx,
"correct_pairs": correct_pairs,
"wrong_pairs": wrong_pairs,
"query_point_assignment_array": query_point_assignment_array,
},
f,
)
same_label_image_path = os.path.join(root, "visualization", "same_label_images")
wrong_label_image_path = os.path.join(root, "visualization", "wrong_label_images")
if not os.path.exists(wrong_label_image_path):
os.mkdir(wrong_label_image_path)
if not os.path.exists(same_label_image_path):
os.mkdir(same_label_image_path)
for i in range(n_points_to_copy):
query_image_path, init_image_path, query_label, init_label = correct_pairs[i]
path_to_copy = os.path.join(
same_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(init_image_path, path_to_copy))
path_to_copy2 = os.path.join(
same_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(query_image_path, path_to_copy2))
(
w_query_image_path,
w_init_image_path,
w_query_label,
w_init_label,
) = wrong_pairs[i]
path_to_copy_w = os.path.join(
wrong_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(w_query_label), str(w_init_label)
),
)
os.system("cp {0} {1}".format(w_init_image_path, path_to_copy_w))
path_to_copy_w2 = os.path.join(
wrong_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
i, w_query_label, w_init_label
),
)
os.system("cp {0} {1}".format(w_query_image_path, path_to_copy_w2))
return accuracy, correct_pairs, wrong_pairs
def _generate_query_sets(version: str, N: int = 5000):
"""
Generates query sets for qDCA experiment in Section 4.3.
:param version: either version1 (dogs vs kitchen utils) or version2 (random).
:param N: number of points to sample for R used in DCA.
"""
with open(f"representations/vgg16/{version}/Rfeatures.pkl", "rb") as f:
Rdata_v1 = pickle.load(f)
with open(f"representations/vgg16/{version}/Efeatures.pkl", "rb") as f:
Edata_v1 = pickle.load(f)
init_Ridxs = np.random.choice(
np.arange(len(Rdata_v1["feat_lin1"])), size=N, replace=False
)
query_Ridxs = np.setdiff1d(np.arange(len(Rdata_v1["feat_lin1"])), init_Ridxs)
init_Eidxs = np.random.choice(
np.arange(len(Edata_v1["feat_lin1"])), size=N, replace=False
)
query_Eidxs = np.setdiff1d(np.arange(len(Edata_v1["feat_lin1"])), init_Eidxs)
with open(f"representations/vgg16/{version}/sampled_Rfeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Rdata_v1["feat_lin1"][init_Ridxs],
"feat_lin2": Rdata_v1["feat_lin2"][init_Ridxs],
"labels": Rdata_v1["labels"][init_Ridxs],
"paths": np.array(Rdata_v1["paths"])[init_Ridxs],
"init_Ridx": init_Ridxs,
"query_Ridx": query_Ridxs,
},
f,
)
with open(f"representations/vgg16/{version}/sampled_Efeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Edata_v1["feat_lin1"][init_Eidxs],
"feat_lin2": Edata_v1["feat_lin2"][init_Eidxs],
"labels": Edata_v1["labels"][init_Eidxs],
"paths": np.array(Edata_v1["paths"])[init_Eidxs],
"init_Eidx": init_Eidxs,
"query_Eidx": query_Eidxs,
},
f,
)
with open(f"representations/vgg16/{version}/query_features.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": np.concatenate(
[
Rdata_v1["feat_lin1"][query_Ridxs],
Edata_v1["feat_lin1"][query_Eidxs],
]
),
"feat_lin2": np.concatenate(
[
Rdata_v1["feat_lin2"][query_Ridxs],
Edata_v1["feat_lin2"][query_Eidxs],
]
),
"labels": np.concatenate(
[Rdata_v1["labels"][query_Ridxs], Edata_v1["labels"][query_Eidxs]]
),
"paths": np.concatenate(
[
np.array(Rdata_v1["paths"])[query_Ridxs],
np.array(Edata_v1["paths"])[query_Eidxs],
]
),
"init_Eidxs": init_Eidxs,
"query_Eidxs": query_Eidxs,
"init_Ridxs": init_Ridxs,
"query_Ridxs": query_Ridxs,
},
f,
)
| [
"os.path.exists",
"pickle.dump",
"numpy.where",
"os.path.join",
"pickle.load",
"numpy.array",
"numpy.empty",
"os.mkdir",
"numpy.concatenate"
]
| [((4090, 4146), 'os.path.join', 'os.path.join', (['root', '"""visualization"""', '"""same_label_images"""'], {}), "(root, 'visualization', 'same_label_images')\n", (4102, 4146), False, 'import os\n'), ((4176, 4233), 'os.path.join', 'os.path.join', (['root', '"""visualization"""', '"""wrong_label_images"""'], {}), "(root, 'visualization', 'wrong_label_images')\n", (4188, 4233), False, 'import os\n'), ((1702, 1764), 'numpy.where', 'np.where', (['(true_query_data_labels == assigned_query_data_labels)'], {}), '(true_query_data_labels == assigned_query_data_labels)\n', (1710, 1764), True, 'import numpy as np\n'), ((1790, 1852), 'numpy.where', 'np.where', (['(true_query_data_labels != assigned_query_data_labels)'], {}), '(true_query_data_labels != assigned_query_data_labels)\n', (1798, 1852), True, 'import numpy as np\n'), ((3684, 3926), 'pickle.dump', 'pickle.dump', (["{'accuracy': accuracy, 'same_label_idx': same_label_idx, 'wrong_label_idx':\n wrong_label_idx, 'correct_pairs': correct_pairs, 'wrong_pairs':\n wrong_pairs, 'query_point_assignment_array': query_point_assignment_array}", 'f'], {}), "({'accuracy': accuracy, 'same_label_idx': same_label_idx,\n 'wrong_label_idx': wrong_label_idx, 'correct_pairs': correct_pairs,\n 'wrong_pairs': wrong_pairs, 'query_point_assignment_array':\n query_point_assignment_array}, f)\n", (3695, 3926), False, 'import pickle\n'), ((4245, 4283), 'os.path.exists', 'os.path.exists', (['wrong_label_image_path'], {}), '(wrong_label_image_path)\n', (4259, 4283), False, 'import os\n'), ((4293, 4325), 'os.mkdir', 'os.mkdir', (['wrong_label_image_path'], {}), '(wrong_label_image_path)\n', (4301, 4325), False, 'import os\n'), ((4338, 4375), 'os.path.exists', 'os.path.exists', (['same_label_image_path'], {}), '(same_label_image_path)\n', (4352, 4375), False, 'import os\n'), ((4385, 4416), 'os.mkdir', 'os.mkdir', (['same_label_image_path'], {}), '(same_label_image_path)\n', (4393, 4416), False, 'import os\n'), ((6310, 6324), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6321, 6324), False, 'import pickle\n'), ((6421, 6435), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6432, 6435), False, 'import pickle\n'), ((1247, 1300), 'numpy.empty', 'np.empty', ([], {'shape': 'query_point_assignment_array.shape[0]'}), '(shape=query_point_assignment_array.shape[0])\n', (1255, 1300), True, 'import numpy as np\n'), ((3591, 3657), 'os.path.join', 'os.path.join', (['root', '"""logs"""', '"""analyzed_query_point_assignments.pkl"""'], {}), "(root, 'logs', 'analyzed_query_point_assignments.pkl')\n", (3603, 3657), False, 'import os\n'), ((7960, 8053), 'numpy.concatenate', 'np.concatenate', (["[Rdata_v1['feat_lin1'][query_Ridxs], Edata_v1['feat_lin1'][query_Eidxs]]"], {}), "([Rdata_v1['feat_lin1'][query_Ridxs], Edata_v1['feat_lin1'][\n query_Eidxs]])\n", (7974, 8053), True, 'import numpy as np\n'), ((8188, 8281), 'numpy.concatenate', 'np.concatenate', (["[Rdata_v1['feat_lin2'][query_Ridxs], Edata_v1['feat_lin2'][query_Eidxs]]"], {}), "([Rdata_v1['feat_lin2'][query_Ridxs], Edata_v1['feat_lin2'][\n query_Eidxs]])\n", (8202, 8281), True, 'import numpy as np\n'), ((8413, 8500), 'numpy.concatenate', 'np.concatenate', (["[Rdata_v1['labels'][query_Ridxs], Edata_v1['labels'][query_Eidxs]]"], {}), "([Rdata_v1['labels'][query_Ridxs], Edata_v1['labels'][\n query_Eidxs]])\n", (8427, 8500), True, 'import numpy as np\n'), ((7153, 7180), 'numpy.array', 'np.array', (["Rdata_v1['paths']"], {}), "(Rdata_v1['paths'])\n", (7161, 7180), True, 'import numpy as np\n'), ((7649, 7676), 'numpy.array', 'np.array', (["Edata_v1['paths']"], {}), "(Edata_v1['paths'])\n", (7657, 7676), True, 'import numpy as np\n'), ((8622, 8649), 'numpy.array', 'np.array', (["Rdata_v1['paths']"], {}), "(Rdata_v1['paths'])\n", (8630, 8649), True, 'import numpy as np\n'), ((8688, 8715), 'numpy.array', 'np.array', (["Edata_v1['paths']"], {}), "(Edata_v1['paths'])\n", (8696, 8715), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
###############################################################################
#
# Category Summaries
#
#
###############################################################################
import datetime
import io
import json
import logging
import pprint
import sys
from typing import Dict, Any
from dateutil import tz
# set logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create handler
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
# Create formatters and add it to handlers
LOG_FORMAT = "[%(asctime)s - %(levelname)-8s - %(module)s:%(name)s ] %(message)s"
c_format = logging.Formatter(LOG_FORMAT)
c_handler.setFormatter(c_format)
# Add handlers to the logger
logger.addHandler(c_handler)
DATE_FORMAT = "%Y%m%dT%H%M%SZ"
# TODO: Convert to defaultdict
# https://www.accelebrate.com/blog/using-defaultdict-python
# https://stackoverflow.com/questions/9358983/dictionaries-and-default-values
# https://docs.python.org/2/library/collections.html#collections.defaultdict
CATEGORIES: dict = {
"PT": "Personal Time",
"PW": "Planned Work",
"UW": "Unplanned Work",
"OW": "Other Work",
}
def main():
print("~" * 100)
totals = calculate_totals(sys.stdin)
# print(totals)
if not totals:
sys.exit(0)
categories_total = extract_categories(totals)
# All Categories Statistics
category_percent_breakdown = get_category_percent_breakdown(categories_total)
formatted_category_breakdown = format_category_breakdown(category_percent_breakdown)
display_category_breakdown(formatted_category_breakdown)
# remove personal category
categories_total.pop("Personal Time", None)
work_category_percent_breakdown = get_category_percent_breakdown(categories_total)
formatted_work_category_breakdown = format_category_breakdown(work_category_percent_breakdown)
display_category_breakdown(formatted_work_category_breakdown)
# formatted_category_breakdown.pop("Personal Time", None)
# formatted
# print(type(formatted_category_breakdown))
# print(formatted_category_breakdown.keys())
def format_seconds(seconds: int) -> str:
"""
Convert seconds to a formatted string
Convert seconds: 3661
To formatted: " 1:01:01"
"""
# print(seconds, type(seconds))
hours = seconds // 3600
minutes = seconds % 3600 // 60
seconds = seconds % 60
return f"{hours:4d}:{minutes:02d}:{seconds:02d}"
def calculate_totals(input_stream: io.TextIOWrapper) -> Dict[str, datetime.timedelta]:
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
# Extract the configuration settings.
header = 1
configuration = dict()
body = ""
for line in input_stream:
if header:
if line == "\n":
header = 0
else:
fields = line.strip().split(": ", 2)
if len(fields) == 2:
configuration[fields[0]] = fields[1]
else:
configuration[fields[0]] = ""
else:
body += line
# Sum the seconds tracked by tag
totals = dict()
untagged = None
j = json.loads(body)
for object in j:
start = datetime.datetime.strptime(object["start"], DATE_FORMAT)
if "end" in object:
end = datetime.datetime.strptime(object["end"], DATE_FORMAT)
else:
end = datetime.datetime.utcnow()
tracked = end - start
if "tags" not in object or object["tags"] == []:
if untagged is None:
untagged = tracked
else:
untagged += tracked
else:
for tag in object["tags"]:
if tag in totals:
totals[tag] += tracked
else:
totals[tag] = tracked
if "temp.report.start" not in configuration:
print("There is no data in the database")
return totals
start_utc = datetime.datetime.strptime(configuration["temp.report.start"], DATE_FORMAT)
start_utc = start_utc.replace(tzinfo=from_zone)
start = start_utc.astimezone(to_zone)
if "temp.report.end" in configuration:
end_utc = datetime.datetime.strptime(configuration["temp.report.end"], DATE_FORMAT)
end_utc = end_utc.replace(tzinfo=from_zone)
end = end_utc.astimezone(to_zone)
else:
end = datetime.datetime.now()
if len(totals) == 0 and untagged is None:
print(f"No data in the range {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}")
return totals
print(f"\nCategory Summary Data for {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}")
return totals
def extract_categories(totals: Dict[str, datetime.timedelta]) -> Dict[str, datetime.timedelta]:
categories_total = {}
for category, category_full_name in CATEGORIES.items():
categories_total[category_full_name] = totals.get(category, datetime.timedelta(0))
return categories_total
def get_category_percent_breakdown(
category_run_times: Dict[str, datetime.timedelta]
) -> Dict[str, Any]:
logger.debug("Getting category percentage breakdown...")
total_time = sum([run_time.total_seconds() for run_time in category_run_times.values()])
logger.debug(f"Total Time:{total_time}")
category_percentage_breakdown: dict = {}
for category, run_time in category_run_times.items():
category_percent = run_time.total_seconds() / total_time
category_percentage_breakdown[category] = {
"percent": category_percent,
"duration": run_time.total_seconds() / 60,
"run_time": format_seconds(int(run_time.total_seconds())),
}
# add total time statistics
category_percentage_breakdown["Total"] = {
"percent": total_time / total_time,
"duration": total_time / 60,
"run_time": format_seconds(int(total_time)),
}
logger.debug(pprint.pformat(category_percentage_breakdown))
return category_percentage_breakdown
def format_category_breakdown(category_breakdown: dict) -> Dict[str, Any]:
# print(type(category_breakdown))
# pprint.pprint(category_breakdown)
formatted_category_breakdown = {}
for category, category_statistics in category_breakdown.items():
formatted_category_breakdown[category] = {
# convert duration to mins
"duration": round(category_statistics["duration"], 2),
"percent": round(category_statistics["percent"] * 100, 2),
"run_time": category_statistics["run_time"],
}
return formatted_category_breakdown
def display_category_breakdown(category_breakdown: dict, title: str = "Category Breakdown"):
# Determine largest width
max_width = len("Category")
for category_statistics in category_breakdown.values():
if len(category_statistics) > max_width:
max_width = len(category_statistics)
print_dotted_line()
print(f"\t\t{title.capitalize():>{max_width}}")
print(
f"{'Category':{max_width}}\t"
f"{'Duration':{max_width}}\t"
f"{'Run_Time':>{max_width + 2}}\t"
f"{'Percent':{max_width + 1}}"
)
for category, category_statistics in category_breakdown.items():
print(
f"{category:{max_width}}\t"
f"{category_statistics['duration']:{max_width}}\t"
f"{category_statistics['run_time']:}\t"
f"{category_statistics['percent']}%"
)
print_dotted_line()
def print_dotted_line(width: int = 72):
"""Print a dotted (rather 'dashed') line"""
print("-" * width)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"json.loads",
"logging.StreamHandler",
"dateutil.tz.tzlocal",
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"logging.Formatter",
"dateutil.tz.tzutc",
"pprint.pformat",
"datetime.datetime.now",
"sys.exit",
"datetime.timedelta"
]
| [((369, 396), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (386, 396), False, 'import logging\n'), ((457, 480), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (478, 480), False, 'import logging\n'), ((651, 680), 'logging.Formatter', 'logging.Formatter', (['LOG_FORMAT'], {}), '(LOG_FORMAT)\n', (668, 680), False, 'import logging\n'), ((2583, 2593), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (2591, 2593), False, 'from dateutil import tz\n'), ((2608, 2620), 'dateutil.tz.tzlocal', 'tz.tzlocal', ([], {}), '()\n', (2618, 2620), False, 'from dateutil import tz\n'), ((3187, 3203), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (3197, 3203), False, 'import json\n'), ((4003, 4078), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["configuration['temp.report.start']", 'DATE_FORMAT'], {}), "(configuration['temp.report.start'], DATE_FORMAT)\n", (4029, 4078), False, 'import datetime\n'), ((1304, 1315), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1312, 1315), False, 'import sys\n'), ((3241, 3297), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["object['start']", 'DATE_FORMAT'], {}), "(object['start'], DATE_FORMAT)\n", (3267, 3297), False, 'import datetime\n'), ((4235, 4308), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["configuration['temp.report.end']", 'DATE_FORMAT'], {}), "(configuration['temp.report.end'], DATE_FORMAT)\n", (4261, 4308), False, 'import datetime\n'), ((4427, 4450), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4448, 4450), False, 'import datetime\n'), ((5976, 6021), 'pprint.pformat', 'pprint.pformat', (['category_percentage_breakdown'], {}), '(category_percentage_breakdown)\n', (5990, 6021), False, 'import pprint\n'), ((3345, 3399), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["object['end']", 'DATE_FORMAT'], {}), "(object['end'], DATE_FORMAT)\n", (3371, 3399), False, 'import datetime\n'), ((3432, 3458), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3456, 3458), False, 'import datetime\n'), ((4977, 4998), 'datetime.timedelta', 'datetime.timedelta', (['(0)'], {}), '(0)\n', (4995, 4998), False, 'import datetime\n')] |
from typing import ParamSpecArgs
from flask_restful import Resource, reqparse
from models.hotel import HotelModel
from flask_jwt_extended import jwt_required
from models.site import SiteModel
from resources.filtros import *
import sqlite3
path_params = reqparse.RequestParser()
path_params.add_argument('cidade', type=str)
path_params.add_argument('estrelas_min', type=float)
path_params.add_argument('estrelas_max', type=float)
path_params.add_argument('diaria_min', type=float)
path_params.add_argument('diaria_max', type=float)
path_params.add_argument('limit', type=float)
path_params.add_argument('offset', type=float)
class Hoteis(Resource):
def get(self):
connection = sqlite3.connect('banco.db')
cursor = connection.cursor()
dados = path_params.parse_args()
dados_validos = {chave:dados[chave] for chave in dados if dados[chave] is not None}
parametros = normalize_path_params(**dados_validos)
if not parametros.get('cidade'):
tupla = tuple([parametros[chave] for chave in parametros])
resultado = cursor.execute(consulta_sem_cidade, tupla)
else:
tupla = tuple([parametros[chave] for chave in parametros])
resultado = cursor.execute(consulta_com_cidade, tupla)
hoteis = []
for linha in resultado:
hoteis.append({
'hotel_id': linha[0],
'nome': linha[1],
'estrelas': linha[2],
'diaria': linha[3],
'cidade': linha[4],
'site_id': linha[5]
})
return {'hoteis': hoteis}
class Hotel(Resource):
argumentos = reqparse.RequestParser()
argumentos.add_argument('nome', type=str, required=True, help="The field 'nome' cannot be left blank")
argumentos.add_argument('estrelas', type=float, required=True, help="The field 'estrelas' cannot be left blank")
argumentos.add_argument('diaria')
argumentos.add_argument('cidade')
argumentos.add_argument('site_id', type=int, required=True, help="Every hotel needs to be linked with site")
def get(self, hotel_id):
hotel = HotelModel.find_hotel(hotel_id)
if hotel:
return hotel.json()
return {'message': 'Hotel not found.'}, 404
@jwt_required()
def post(self, hotel_id):
if HotelModel.find_hotel(hotel_id):
return {"message": "Hotel id '{}' already exists.".format(hotel_id)}, 400
dados = Hotel.argumentos.parse_args()
hotel = HotelModel(hotel_id, **dados)
if not SiteModel.find_by_id(dados.get('site_id')):
return {'message': 'The hotel must be associated to a valid site id'}, 400
try:
hotel.save_hotel()
except:
return {'message': 'An internal error occurred trying to save hotel.'}, 500
return hotel.json()
@jwt_required()
def put(self, hotel_id):
dados = Hotel.argumentos.parse_args()
hotel_encontrado = HotelModel.find_hotel(hotel_id)
if hotel_encontrado:
hotel_encontrado.update_hotel(**dados)
hotel_encontrado.save_hotel()
return hotel_encontrado.json(), 200
hotel = HotelModel(hotel_id, **dados)
try:
hotel.save_hotel()
except:
return {'message': 'An internal error occurred trying to save hotel.'}, 500
return hotel.json(), 201 #created
@jwt_required()
def delete(self, hotel_id):
global hoteis
hotel = HotelModel.find_hotel(hotel_id)
if hotel:
try:
hotel.delete_hotel()
except:
return {'message': 'An error occurred trying to delete hotel.'}, 500
return {'message': 'Hotel deleted.'}
return {'message': 'Hotel not found.'}, 404
| [
"sqlite3.connect",
"flask_restful.reqparse.RequestParser",
"models.hotel.HotelModel.find_hotel",
"flask_jwt_extended.jwt_required",
"models.hotel.HotelModel"
]
| [((254, 278), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (276, 278), False, 'from flask_restful import Resource, reqparse\n'), ((1671, 1695), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (1693, 1695), False, 'from flask_restful import Resource, reqparse\n'), ((2295, 2309), 'flask_jwt_extended.jwt_required', 'jwt_required', ([], {}), '()\n', (2307, 2309), False, 'from flask_jwt_extended import jwt_required\n'), ((2910, 2924), 'flask_jwt_extended.jwt_required', 'jwt_required', ([], {}), '()\n', (2922, 2924), False, 'from flask_jwt_extended import jwt_required\n'), ((3483, 3497), 'flask_jwt_extended.jwt_required', 'jwt_required', ([], {}), '()\n', (3495, 3497), False, 'from flask_jwt_extended import jwt_required\n'), ((699, 726), 'sqlite3.connect', 'sqlite3.connect', (['"""banco.db"""'], {}), "('banco.db')\n", (714, 726), False, 'import sqlite3\n'), ((2155, 2186), 'models.hotel.HotelModel.find_hotel', 'HotelModel.find_hotel', (['hotel_id'], {}), '(hotel_id)\n', (2176, 2186), False, 'from models.hotel import HotelModel\n'), ((2352, 2383), 'models.hotel.HotelModel.find_hotel', 'HotelModel.find_hotel', (['hotel_id'], {}), '(hotel_id)\n', (2373, 2383), False, 'from models.hotel import HotelModel\n'), ((2550, 2579), 'models.hotel.HotelModel', 'HotelModel', (['hotel_id'], {}), '(hotel_id, **dados)\n', (2560, 2579), False, 'from models.hotel import HotelModel\n'), ((3029, 3060), 'models.hotel.HotelModel.find_hotel', 'HotelModel.find_hotel', (['hotel_id'], {}), '(hotel_id)\n', (3050, 3060), False, 'from models.hotel import HotelModel\n'), ((3256, 3285), 'models.hotel.HotelModel', 'HotelModel', (['hotel_id'], {}), '(hotel_id, **dados)\n', (3266, 3285), False, 'from models.hotel import HotelModel\n'), ((3568, 3599), 'models.hotel.HotelModel.find_hotel', 'HotelModel.find_hotel', (['hotel_id'], {}), '(hotel_id)\n', (3589, 3599), False, 'from models.hotel import HotelModel\n')] |
# Generated by Django 3.2.2 on 2021-09-02 15:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('django_db_logger', '0001_initial'),
('policyengine', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='evaluationlog',
name='community',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='policyengine.community'),
),
migrations.AddField(
model_name='evaluationlog',
name='proposal',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='policyengine.proposal'),
),
]
| [
"django.db.models.ForeignKey"
]
| [((437, 557), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""policyengine.community"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='policyengine.community')\n", (454, 557), False, 'from django.db import migrations, models\n'), ((681, 800), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""policyengine.proposal"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='policyengine.proposal')\n", (698, 800), False, 'from django.db import migrations, models\n')] |
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
# TODO: db_uri
# dialect+driver://username:password@host:port/database?charset=utf8
DB_URI = 'mysql+pymysql://root:[email protected]:3300/first_sqlalchemy?charset=utf8'
engine = create_engine(DB_URI)
Base = declarative_base(bind=engine)
session = sessionmaker(bind=engine)()
# TODO: 定义User模型
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50), nullable=False)
def __repr__(self):
return '<User(id={id}, name={name})>'.format(id=self.id, name=self.name)
# TODO: 创建Article模型
class Article(Base):
__tablename__ = 'article'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(50), nullable=False)
# TODO: 外键约束
uid = Column(Integer, ForeignKey('user.id'), nullable=False)
authors = relationship('User', backref='articles')
# TODO: 删除数据库
# Base.metadata.drop_all()
# TODO: 创建数据库
# Base.metadata.create_all()
#
# user = User(name='zhiliao')
# article1 = Article(title='python')
# article2 = Article(title='flask')
#
# user.articles.append(article1)
# user.articles.append(article2)
# TODO: 提交数据
# session.add(user)
# session.commit()
# TODO: 1.session.delete进行删除,不指定`nullable=False`
# TODO: 2.session.delete进行删除,指定`nullable=False`,避免删除行为
user = session.query(User).first()
print(user)
session.delete(user)
session.commit()
| [
"sqlalchemy.orm.relationship",
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.create_engine",
"sqlalchemy.ForeignKey",
"sqlalchemy.String",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Column"
]
| [((364, 385), 'sqlalchemy.create_engine', 'create_engine', (['DB_URI'], {}), '(DB_URI)\n', (377, 385), False, 'from sqlalchemy import create_engine, Column, Integer, String, ForeignKey\n'), ((394, 423), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {'bind': 'engine'}), '(bind=engine)\n', (410, 423), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((435, 460), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (447, 460), False, 'from sqlalchemy.orm import sessionmaker, relationship\n'), ((536, 589), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(Integer, primary_key=True, autoincrement=True)\n', (542, 589), False, 'from sqlalchemy import create_engine, Column, Integer, String, ForeignKey\n'), ((824, 877), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(Integer, primary_key=True, autoincrement=True)\n', (830, 877), False, 'from sqlalchemy import create_engine, Column, Integer, String, ForeignKey\n'), ((1022, 1062), 'sqlalchemy.orm.relationship', 'relationship', (['"""User"""'], {'backref': '"""articles"""'}), "('User', backref='articles')\n", (1034, 1062), False, 'from sqlalchemy.orm import sessionmaker, relationship\n'), ((608, 618), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (614, 618), False, 'from sqlalchemy import create_engine, Column, Integer, String, ForeignKey\n'), ((897, 907), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (903, 907), False, 'from sqlalchemy import create_engine, Column, Integer, String, ForeignKey\n'), ((968, 989), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (978, 989), False, 'from sqlalchemy import create_engine, Column, Integer, String, ForeignKey\n')] |
# coding: utf-8
from __future__ import unicode_literals
from ...lemmatizer import read_index, read_exc
import pytest
@pytest.mark.models
@pytest.mark.parametrize('text,lemmas', [("aardwolves", ["aardwolf"]),
("aardwolf", ["aardwolf"]),
("planets", ["planet"]),
("ring", ["ring"]),
("axes", ["axis", "axe", "ax"])])
def test_tagger_lemmatizer_noun_lemmas(lemmatizer, text, lemmas):
if lemmatizer is None:
return None
assert lemmatizer.noun(text) == set(lemmas)
@pytest.mark.models
def test_tagger_lemmatizer_base_forms(lemmatizer):
if lemmatizer is None:
return None
assert lemmatizer.noun('dive', {'number': 'sing'}) == set(['dive'])
assert lemmatizer.noun('dive', {'number': 'plur'}) == set(['diva'])
@pytest.mark.models
def test_tagger_lemmatizer_base_form_verb(lemmatizer):
if lemmatizer is None:
return None
assert lemmatizer.verb('saw', {'verbform': 'past'}) == set(['see'])
@pytest.mark.models
def test_tagger_lemmatizer_punct(lemmatizer):
if lemmatizer is None:
return None
assert lemmatizer.punct('“') == set(['"'])
assert lemmatizer.punct('“') == set(['"'])
@pytest.mark.models
def test_tagger_lemmatizer_read_index(path):
if path is not None:
with (path / 'wordnet' / 'index.noun').open() as file_:
index = read_index(file_)
assert 'man' in index
assert 'plantes' not in index
assert 'plant' in index
@pytest.mark.models
@pytest.mark.parametrize('text,lemma', [("was", "be")])
def test_tagger_lemmatizer_read_exc(path, text, lemma):
if path is not None:
with (path / 'wordnet' / 'verb.exc').open() as file_:
exc = read_exc(file_)
assert exc[text] == (lemma,)
@pytest.mark.models
def test_tagger_lemmatizer_lemma_assignment(EN):
text = "Bananas in pyjamas are geese."
doc = EN.tokenizer(text)
assert all(t.lemma_ == '' for t in doc)
EN.tagger(doc)
assert all(t.lemma_ != '' for t in doc)
| [
"pytest.mark.parametrize"
]
| [((142, 327), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text,lemmas"""', "[('aardwolves', ['aardwolf']), ('aardwolf', ['aardwolf']), ('planets', [\n 'planet']), ('ring', ['ring']), ('axes', ['axis', 'axe', 'ax'])]"], {}), "('text,lemmas', [('aardwolves', ['aardwolf']), (\n 'aardwolf', ['aardwolf']), ('planets', ['planet']), ('ring', ['ring']),\n ('axes', ['axis', 'axe', 'ax'])])\n", (165, 327), False, 'import pytest\n'), ((1630, 1684), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text,lemma"""', "[('was', 'be')]"], {}), "('text,lemma', [('was', 'be')])\n", (1653, 1684), False, 'import pytest\n')] |
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
def visualize_training_results(results):
"""
Plots the loss and accuracy for the training and testing data
"""
history = results.history
plt.figure(figsize=(12,4))
plt.plot(history['val_loss'])
plt.plot(history['loss'])
plt.legend(['val_loss', 'loss'])
plt.title('Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
plt.figure(figsize=(12,4))
plt.plot(history['val_accuracy'])
plt.plot(history['accuracy'])
plt.legend(['val_accuracy', 'accuracy'])
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()
def split_sequence(seq, n_steps_in, n_steps_out):
"""
Splits the univariate time sequence
"""
X, y = [], []
for i in range(len(seq)):
end = i + n_steps_in
out_end = end + n_steps_out
if out_end > len(seq):
break
seq_x, seq_y = seq[i:end], seq[end:out_end]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def layer_maker(n_layers, n_nodes, activation, drop=None, d_rate=.5):
"""
Create a specified number of hidden layers for an RNN
Optional: Adds regularization option, dropout layer to prevent potential overfitting if necessary
"""
model = Sequential()
# Creating the specified number of hidden layers with the specified number of nodes
for x in range(1,n_layers+1):
model.add(LSTM(n_nodes, activation=activation, return_sequences=True))
# Adds a Dropout layer after every Nth hidden layer (the 'drop' variable)
try:
if x % drop == 0:
model.add(Dropout(d_rate))
except:
pass
| [
"numpy.array",
"keras.layers.LSTM",
"keras.layers.Dropout",
"keras.models.Sequential"
]
| [((1429, 1441), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1439, 1441), False, 'from keras.models import Sequential\n'), ((1144, 1155), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1152, 1155), True, 'import numpy as np\n'), ((1157, 1168), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1165, 1168), True, 'import numpy as np\n'), ((1582, 1641), 'keras.layers.LSTM', 'LSTM', (['n_nodes'], {'activation': 'activation', 'return_sequences': '(True)'}), '(n_nodes, activation=activation, return_sequences=True)\n', (1586, 1641), False, 'from keras.layers import LSTM, Dense, Dropout\n'), ((1795, 1810), 'keras.layers.Dropout', 'Dropout', (['d_rate'], {}), '(d_rate)\n', (1802, 1810), False, 'from keras.layers import LSTM, Dense, Dropout\n')] |
import torch
import torch.nn.functional as F
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
mask,
is_additive_mask,
dropout_prob,
):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
scale_t = torch.tensor([scale])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
# Input Linear GEMM
# input1: (activations) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)] (transpose [0,1])
# output: [seql_q, seqs, embed_dim*3]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (seql_q*seqs x embed_dim*3)
if use_biases_t[0]:
input_lin_results = torch.addmm(
input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
input_lin_results = torch.mm(
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)), input_weights.transpose(0, 1)
)
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul1_results = torch.empty(
(queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype, device=torch.device("cuda")
)
matmul1_results = torch.baddbmm(
matmul1_results,
queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results,
beta=0.0,
alpha=scale_t[0],
)
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert len(mask.size()) == 2, "Timing mask is not 2D!"
assert mask.size(0) == mask.size(1), "Sequence length should match!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float("-inf"))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
if is_additive_mask:
matmul1_results = matmul1_results + mask.unsqueeze(1).unsqueeze(2)
else:
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float("-inf"))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1.0 - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
matmul2_results = torch.empty(
(dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype,
device=torch.device("cuda"),
).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
matmul2_results = (
matmul2_results.transpose(0, 1).contiguous().view(inputs.size(0), inputs.size(1), inputs.size(2))
)
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
if use_biases_t[0]:
outputs = torch.addmm(
output_biases,
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
output_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
outputs = torch.mm(
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)), output_weights.transpose(0, 1)
)
outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
ctx.save_for_backward(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
) = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t[0]
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Slice out q,k,v from one big set of gradients entering the input linear's bprop (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights
)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)),
)
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
if use_biases_t[0]:
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0
)
else:
output_bias_grads = None
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
queries_grads = torch.baddbmm(
queries_grads.transpose(0, 1),
softmax_grads,
keys.transpose(0, 1),
out=queries_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
keys_grads = torch.baddbmm(
keys_grads.transpose(0, 1),
softmax_grads.transpose(1, 2),
queries.transpose(0, 1),
out=keys_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Input Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_results_grads = input_lin_results_grads.view(
inputs.size(0) * inputs.size(1), heads_t[0] * 3 * head_dim
)
input_grads = torch.mm(input_lin_results_grads, input_weights)
input_grads = input_grads.view(inputs.size(0), inputs.size(1), inputs.size(2))
# Input Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, 3*embed_dim(3072)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [3*embed_dim, embed_dim]
# GEMM: ( 3*embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (3*embed_dim x embed_dim)
input_weight_grads = torch.mm(
input_lin_results_grads.transpose(0, 1), inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2))
)
if use_biases_t[0]:
input_bias_grads = torch.sum(input_lin_results_grads, 0)
else:
input_bias_grads = None
return (
None,
None,
None,
None,
input_grads,
input_weight_grads,
output_weight_grads,
input_bias_grads,
output_bias_grads,
None,
None,
)
self_attn_func = SelfAttnFunc.apply
| [
"torch._fused_dropout",
"torch.empty_like",
"torch.mm",
"torch.tensor",
"torch._softmax_backward_data",
"torch.sum",
"torch._masked_scale",
"torch.nn.functional.softmax",
"torch.device"
]
| [((414, 454), 'torch.tensor', 'torch.tensor', (['[input_biases is not None]'], {}), '([input_biases is not None])\n', (426, 454), False, 'import torch\n'), ((473, 494), 'torch.tensor', 'torch.tensor', (['[heads]'], {}), '([heads])\n', (485, 494), False, 'import torch\n'), ((513, 534), 'torch.tensor', 'torch.tensor', (['[scale]'], {}), '([scale])\n', (525, 534), False, 'import torch\n'), ((560, 588), 'torch.tensor', 'torch.tensor', (['[dropout_prob]'], {}), '([dropout_prob])\n', (572, 588), False, 'import torch\n'), ((611, 627), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (623, 627), False, 'import torch\n'), ((4214, 4248), 'torch.nn.functional.softmax', 'F.softmax', (['matmul1_results'], {'dim': '(-1)'}), '(matmul1_results, dim=-1)\n', (4223, 4248), True, 'import torch.nn.functional as F\n'), ((8252, 8287), 'torch.empty_like', 'torch.empty_like', (['input_lin_results'], {}), '(input_lin_results)\n', (8268, 8287), False, 'import torch\n'), ((11067, 11153), 'torch._masked_scale', 'torch._masked_scale', (['matmul2_dgrad1', 'dropout_mask', '(1.0 / (1.0 - dropout_prob_t[0]))'], {}), '(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 -\n dropout_prob_t[0]))\n', (11086, 11153), False, 'import torch\n'), ((11231, 11316), 'torch._softmax_backward_data', 'torch._softmax_backward_data', (['dropout_grads', 'softmax_results', '(-1)', 'softmax_results'], {}), '(dropout_grads, softmax_results, -1,\n softmax_results)\n', (11259, 11316), False, 'import torch\n'), ((13023, 13071), 'torch.mm', 'torch.mm', (['input_lin_results_grads', 'input_weights'], {}), '(input_lin_results_grads, input_weights)\n', (13031, 13071), False, 'import torch\n'), ((4368, 4432), 'torch._fused_dropout', 'torch._fused_dropout', (['softmax_results'], {'p': '(1.0 - dropout_prob_t[0])'}), '(softmax_results, p=1.0 - dropout_prob_t[0])\n', (4388, 4432), False, 'import torch\n'), ((13707, 13744), 'torch.sum', 'torch.sum', (['input_lin_results_grads', '(0)'], {}), '(input_lin_results_grads, 0)\n', (13716, 13744), False, 'import torch\n'), ((2888, 2908), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2900, 2908), False, 'import torch\n'), ((5293, 5313), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5305, 5313), False, 'import torch\n')] |
"""This module contains helper functions and utilities for nelpy."""
__all__ = ['spatial_information',
'frange',
'swap_cols',
'swap_rows',
'pairwise',
'is_sorted',
'linear_merge',
'PrettyDuration',
'ddt_asa',
'get_contiguous_segments',
'get_events_boundaries',
'get_threshold_crossing_epochs',
'_bst_get_bins']
import numpy as np
import logging
from itertools import tee, repeat
from collections import namedtuple
from math import floor
from scipy.signal import hilbert
import scipy.ndimage.filters #import gaussian_filter1d, gaussian_filter
from numpy import log, ceil
import copy
import sys
import ctypes
from multiprocessing import Array, cpu_count
from multiprocessing.pool import Pool
import pdb
from . import core # so that core.RegularlySampledAnalogSignalArray is exposed
from . import auxiliary # so that auxiliary.TuningCurve1D is epxosed
from . import filtering
from .utils_.decorators import keyword_deprecation
# def sub2ind(array_shape, rows, cols):
# ind = rows*array_shape[1] + cols
# ind[ind < 0] = -1
# ind[ind >= array_shape[0]*array_shape[1]] = -1
# return ind
# def ind2sub(array_shape, ind):
# # see also np.unravel_index(ind, array.shape)
# ind[ind < 0] = -1
# ind[ind >= array_shape[0]*array_shape[1]] = -1
# rows = (ind.astype('int') / array_shape[1])
# cols = ind % array_shape[1]
# return (rows, cols)
def ragged_array(arr):
"""Takes a list of arrays, and returns a ragged array.
See https://github.com/numpy/numpy/issues/12468
"""
n_elem = len(arr)
out = np.array(n_elem*[None])
for ii in range(out.shape[0]):
out[ii] = arr[ii]
return out
def asa_indices_within_epochs(asa, intervalarray):
"""Return indices of ASA within epochs.
[[start, stop]
...
[start, stop]]
so that data can be associated with asa._data[:,start:stop] for each epoch.
"""
indices = []
intervalarray = intervalarray[asa.support]
for interval in intervalarray.merge().data:
a_start = interval[0]
a_stop = interval[1]
frm, to = np.searchsorted(asa._abscissa_vals, (a_start, a_stop))
indices.append((frm, to))
indices = np.array(indices, ndmin=2)
return indices
def frange(start, stop, step):
"""arange with floating point step"""
# TODO: this function is not very general; we can extend it to work
# for reverse (stop < start), empty, and default args, etc.
# there are also many edge cases where this is weird.
# see https://stackoverflow.com/questions/7267226/range-for-floats
# for better alternatives.
num_steps = int(np.floor((stop-start)/step))
return np.linspace(start, stop, num=num_steps, endpoint=False)
def spatial_information(ratemap):
"""Compute the spatial information and firing sparsity...
The specificity index examines the amount of information
(in bits) that a single spike conveys about the animal's
location (i.e., how well cell firing predicts the animal's
location).The spatial information content of cell discharge was
calculated using the formula:
information content = \Sum P_i(R_i/R)log_2(R_i/R)
where i is the bin number, P_i, is the probability for occupancy
of bin i, R_i, is the mean firing rate for bin i, and R is the
overall mean firing rate.
In order to account for the effects of low firing rates (with
fewer spikes there is a tendency toward higher information
content) or random bursts of firing, the spike firing
time-series was randomly offset in time from the rat location
time-series, and the information content was calculated. A
distribution of the information content based on 100 such random
shifts was obtained and was used to compute a standardized score
(Zscore) of information content for that cell. While the
distribution is not composed of independent samples, it was
nominally normally distributed, and a Z value of 2.29 was chosen
as a cut-off for significance (the equivalent of a one-tailed
t-test with P = 0.01 under a normal distribution).
Reference(s)
------------
<NAME>., <NAME>., <NAME>., <NAME>.,
and <NAME>. (1994). "Spatial information content and
reliability of hippocampal CA1 neurons: effects of visual
input", Hippocampus, 4(4), 410-421.
Parameters
----------
ratemap : array of shape (n_units, n_bins)
Rate map in Hz.
Returns
-------
si : array of shape (n_units,)
spatial information (in bits) per unit
"""
ratemap = copy.deepcopy(ratemap)
# ensure that the ratemap always has nonzero firing rates,
# otherwise the spatial information might return NaNs:
bkg_rate = ratemap[ratemap>0].min()
ratemap[ratemap < bkg_rate] = bkg_rate
number_of_spatial_bins = np.prod(ratemap.shape[1:])
weight_per_bin = 1/number_of_spatial_bins
Pi = 1
if len(ratemap.shape) == 3:
# we have 2D tuning curve, (n_units, n_x, n_y)
R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate
Ri = np.transpose(ratemap, (2,1,0))
si = np.sum(np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1), axis=1)
elif len(ratemap.shape) == 2:
# we have 1D tuning curve, (n_units, n_x)
R = ratemap.mean(axis=1) # mean firing rate
Ri = ratemap.T
si = np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1)
else:
raise TypeError("rate map shape not supported / understood!")
return si/number_of_spatial_bins
def spatial_sparsity(ratemap):
"""Compute the firing sparsity...
The specificity index examines the amount of information
(in bits) that a single spike conveys about the animal's
location (i.e., how well cell firing predicts the animal's
location).The spatial information content of cell discharge was
calculated using the formula:
information content = \Sum P_i(R_i/R)log_2(R_i/R)
where i is the bin number, P_i, is the probability for occupancy
of bin i, R_i, is the mean firing rate for bin i, and R is the
overall mean firing rate.
In order to account for the effects of low firing rates (with
fewer spikes there is a tendency toward higher information
content) or random bursts of firing, the spike firing
time-series was randomly offset in time from the rat location
time-series, and the information content was calculated. A
distribution of the information content based on 100 such random
shifts was obtained and was used to compute a standardized score
(Zscore) of information content for that cell. While the
distribution is not composed of independent samples, it was
nominally normally distributed, and a Z value of 2.29 was chosen
as a cut-off for significance (the equivalent of a one-tailed
t-test with P = 0.01 under a normal distribution).
Reference(s)
------------
<NAME>., <NAME>., <NAME>., <NAME>.,
and <NAME>. (1994). "Spatial information content and
reliability of hippocampal CA1 neurons: effects of visual
input", Hippocampus, 4(4), 410-421.
Parameters
----------
occupancy : array of shape (n_bins,)
Occupancy of the animal.
ratemap : array of shape (n_units, n_bins)
Rate map in Hz.
Returns
-------
si : array of shape (n_units,)
spatial information (in bits) per unit
sparsity: array of shape (n_units,)
sparsity (in percent) for each unit
"""
number_of_spatial_bins = np.prod(ratemap.shape[1:])
weight_per_bin = 1/number_of_spatial_bins
Pi = 1
if len(ratemap.shape) == 3:
# we have 2D tuning curve, (n_units, n_x, n_y)
R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate
Ri = ratemap
sparsity = np.sum(np.sum((Ri*Pi), axis=1), axis=1)/(R**2)
elif len(ratemap.shape) == 2:
# we have 1D tuning curve, (n_units, n_x)
R = ratemap.mean(axis=1) # mean firing rate
Ri = ratemap.T
sparsity = np.sum((Pi*Ri.T), axis=1)/(R**2)
else:
raise TypeError("rate map shape not supported / understood!")
return sparsity/number_of_spatial_bins
def _bst_get_bins_inside_interval(interval, ds, w=1):
"""(np.array) Return bin edges entirely contained inside an interval.
Bin edges always start at interval.start, and continue for as many
bins as would fit entirely inside the interval.
NOTE 1: there are (n+1) bin edges associated with n bins.
WARNING: if an interval is smaller than ds, then no bin will be
associated with the particular interval.
NOTE 2: nelpy uses half-open intervals [a,b), but if the bin
width divides b-a, then the bins will cover the entire
range. For example, if interval = [0,2) and ds = 1, then
bins = [0,1,2], even though [0,2] is not contained in
[0,2). There might be numerical precision deviations from this?
Parameters
----------
interval : EpochArray
EpochArray containing a single interval with a start, and stop
ds : float
Time bin width, in seconds.
w : number of bins to use in a sliding window mode. Default is 1 (no sliding window).
For example, 40 ms bins, with a stride of 5 ms, can be achieved by using (ds=0.005, w=8)
For now, w has to be an integer, and therefore 5 second bins, with a stride of 2 seconds
are not supported within this framework.
Returns
-------
bins : array
Bin edges in an array of shape (n+1,) where n is the number
of bins
centers : array
Bin centers in an array of shape (n,) where n is the number
of bins
"""
if interval.length < ds:
return None, None
n_bins = int(np.floor(interval.length / ds)) # number of bins
# linspace is better than arange for non-integral steps
bins = np.linspace(interval.start, interval.start + n_bins*ds, n_bins+1)
if w > 1:
wn_bins = np.max((1, n_bins - w + 1))
wn_bins = bins[:wn_bins+1] + w/2*ds - ds/2
bins = wn_bins
centers = bins[:-1] + (ds / 2)
return bins, centers
def _bst_get_bins(intervalArray, ds, w=1):
"""
Docstring goes here. TBD. For use with bins that are contained
wholly inside the intervals.
"""
b = [] # bin list
c = [] # centers list
left_edges = []
right_edges = []
counter = 0
for interval in intervalArray:
bins, centers = _bst_get_bins_inside_interval(interval=interval, ds=ds, w=w)
if bins is not None:
left_edges.append(counter)
counter += len(centers) - 1
right_edges.append(counter)
counter += 1
b.extend(bins.tolist())
c.extend(centers.tolist())
bins = np.array(b)
bin_centers = np.array(c)
le = np.array(left_edges)
le = le[:, np.newaxis]
re = np.array(right_edges)
re = re[:, np.newaxis]
binned_support = np.hstack((le, re))
lengths = np.atleast_1d((binned_support[:,1] - binned_support[:,0] + 1).squeeze())
support_starts = bins[np.insert(np.cumsum(lengths+1),0,0)[:-1]]
support_stops = bins[np.insert(np.cumsum(lengths+1)-1,0,0)[1:]]
supportdata = np.vstack([support_starts, support_stops]).T
support = type(intervalArray)(supportdata) # set support to TRUE bin support
return bins, bin_centers, binned_support, support
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def get_mua(st, ds=None, sigma=None, truncate=None, _fast=True):
"""Compute the multiunit activity (MUA) from a spike train.
Parameters
----------
st : SpikeTrainArray
SpikeTrainArray containing one or more units.
-- OR --
st : BinnedSpikeTrainArray
BinnedSpikeTrainArray containing multiunit activity.
ds : float, optional
Time step in which to bin spikes. Default is 1 ms.
sigma : float, optional
Standard deviation (in seconds) of Gaussian smoothing kernel.
Default is 10 ms. If sigma==0 then no smoothing is applied.
truncate : float, optional
Bandwidth of the Gaussian filter. Default is 6.
Returns
-------
mua : AnalogSignalArray
AnalogSignalArray with MUA.
"""
if ds is None:
ds = 0.001 # 1 ms bin size
if sigma is None:
sigma = 0.01 # 10 ms standard deviation
if truncate is None:
truncate = 6
if isinstance(st, core.EventArray):
# bin spikes, so that we can count the spikes
mua_binned = st.bin(ds=ds).flatten()
elif isinstance(st, core.BinnedEventArray):
mua_binned = st.flatten()
ds = mua_binned.ds
else:
raise TypeError('st has to be one of (SpikeTrainArray, BinnedSpikeTrainArray)')
# make sure data type is float, so that smoothing works, and convert to rate
mua_binned._data = mua_binned._data.astype(float) / ds
# TODO: now that we can simply cast from BST to ASA and back, the following logic could be simplified:
# put mua rate inside an AnalogSignalArray
if _fast:
mua = core.AnalogSignalArray([], empty=True)
mua._data = mua_binned.data
mua._abscissa_vals = mua_binned.bin_centers
mua._abscissa.support = mua_binned.support
else:
mua = core.AnalogSignalArray(mua_binned.data, timestamps=mua_binned.bin_centers, fs=1/ds)
mua._fs = 1/ds
if (sigma != 0) and (truncate > 0):
mua = gaussian_filter(mua, sigma=sigma, truncate=truncate)
return mua
def is_odd(n):
"""Returns True if n is odd, and False if n is even.
Assumes integer.
"""
return bool(n & 1)
def swap_cols(arr, frm, to):
"""swap columns of a 2D np.array"""
if arr.ndim > 1:
arr[:,[frm, to]] = arr[:,[to, frm]]
else:
arr[frm], arr[to] = arr[to], arr[frm]
def swap_rows(arr, frm, to):
"""swap rows of a 2D np.array"""
if arr.ndim > 1:
arr[[frm, to],:] = arr[[to, frm],:]
else:
arr[frm], arr[to] = arr[to], arr[frm]
def pairwise(iterable):
"""returns a zip of all neighboring pairs.
This is used as a helper function for is_sorted.
Example
-------
>>> mylist = [2, 3, 6, 8, 7]
>>> list(pairwise(mylist))
[(2, 3), (3, 6), (6, 8), (8, 7)]
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def argsort(seq):
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
return sorted(range(len(seq)), key=seq.__getitem__)
def is_sorted_general(iterable, key=lambda a, b: a <= b):
"""Check to see if iterable is monotonic increasing (sorted)."""
return all(key(a, b) for a, b in pairwise(iterable))
def is_sorted(x, chunk_size=None):
"""Returns True if iterable is monotonic increasing (sorted).
NOTE: intended for 1D array, list or tuple. Will not work on
more than 1D
This function works in-core with memory footrpint XXX.
chunk_size = 100000 is probably a good choice.
"""
if not isinstance(x, (tuple, list, np.ndarray)):
raise TypeError("Unsupported type {}".format(type(x)))
x = np.atleast_1d(np.array(x).squeeze())
if x.ndim > 1:
raise ValueError("Input x must be 1-dimensional")
if chunk_size is None:
chunk_size = 500000
stop = x.size
for chunk_start in range(0, stop, chunk_size):
chunk_stop = int(min(stop, chunk_start + chunk_size + 1))
chunk = x[chunk_start:chunk_stop]
if not np.all(chunk[:-1] <= chunk[1:]):
return False
return True
def linear_merge(list1, list2):
"""Merge two SORTED lists in linear time.
UPDATED TO WORK WITH PYTHON 3.7+ (see https://stackoverflow.com/questions/51700960/runtimeerror-generator-raised-stopiteration-every-time-i-try-to-run-app)
Returns a generator of the merged result.
Examples
--------
>>> a = [1, 3, 5, 7]
>>> b = [2, 4, 6, 8]
>>> [i for i in linear_merge(a, b)]
[1, 2, 3, 4, 5, 6, 7, 8]
>>> [i for i in linear_merge(b, a)]
[1, 2, 3, 4, 5, 6, 7, 8]
>>> a = [1, 2, 2, 3]
>>> b = [2, 2, 4, 4]
>>> [i for i in linear_merge(a, b)]
[1, 2, 2, 2, 2, 3, 4, 4]
"""
# if any of the lists are empty, return the other (possibly also
# empty) list: (this is necessary because having either list1 or
# list2 be empty makes this quite a bit more complicated...)
if isinstance(list1, (list, np.ndarray)):
if len(list1) == 0:
list2 = iter(list2)
while True:
try:
yield next(list2)
except StopIteration:
return
if isinstance(list2, (list, np.ndarray)):
if len(list2) == 0:
list1 = iter(list1)
while True:
try:
yield next(list1)
except StopIteration:
return
list1 = iter(list1)
list2 = iter(list2)
value1 = next(list1)
value2 = next(list2)
# We'll normally exit this loop from a next() call raising
# StopIteration, which is how a generator function exits anyway.
while True:
if value1 <= value2:
# Yield the lower value.
try:
yield value1
except StopIteration:
return
try:
# Grab the next value from list1.
value1 = next(list1)
except StopIteration:
# list1 is empty. Yield the last value we received from list2, then
# yield the rest of list2.
try:
yield value2
except StopIteration:
return
while True:
try:
yield next(list2)
except StopIteration:
return
else:
try:
yield value2
except StopIteration:
return
try:
value2 = next(list2)
except StopIteration:
# list2 is empty.
try:
yield value1
except StopIteration:
return
while True:
try:
yield next(list1)
except StopIteration:
return
def get_mua_events(mua, fs=None, minLength=None, maxLength=None, PrimaryThreshold=None, minThresholdLength=None, SecondaryThreshold=None):
"""Determine MUA/PBEs from multiunit activity.
MUA : multiunit activity
PBE : population burst event
Parameters
----------
mua : AnalogSignalArray
AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
fs : float, optional
Sampling frequency of mua, in Hz. If not specified, it will be inferred from
mua.fs
minLength : float, optional
maxLength : float, optional
PrimaryThreshold : float, optional
SecondaryThreshold : float, optional
minThresholdLength : float, optional
Returns
-------
mua_epochs : EpochArray
EpochArray containing all the MUA events / PBEs.
Example
-------
mua = get_mua(spiketrain)
mua_epochs = get_mua_events(mua)
PBEs = get_PBEs(spiketrain, min_active=5)
= get_PBEs(get_mua_events(get_mua(*)), spiketrain, min_active=5)
"""
if fs is None:
fs = mua.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in mua!")
if PrimaryThreshold is None:
PrimaryThreshold = mua.mean() + 3*mua.std()
if SecondaryThreshold is None:
SecondaryThreshold = mua.mean()
if minLength is None:
minLength = 0.050 # 50 ms minimum event duration
if maxLength is None:
maxLength = 0.750 # 750 ms maximum event duration
if minThresholdLength is None:
minThresholdLength = 0.0
# determine MUA event bounds:
mua_bounds_idx, maxes, _ = get_events_boundaries(
x = mua.data,
PrimaryThreshold = PrimaryThreshold,
SecondaryThreshold = SecondaryThreshold,
minThresholdLength = minThresholdLength,
minLength = minLength,
maxLength = maxLength,
ds = 1/fs
)
if len(mua_bounds_idx) == 0:
logging.warning("no mua events detected")
return core.EpochArray(empty=True)
# store MUA bounds in an EpochArray
mua_epochs = core.EpochArray(mua.time[mua_bounds_idx])
return mua_epochs
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def get_PBEs(data, fs=None, ds=None, sigma=None, truncate=None, unsorted_id=0,
min_active=None, minLength=None, maxLength=None,
PrimaryThreshold=None, minThresholdLength=None,
SecondaryThreshold=None):
"""Determine PBEs from multiunit activity or spike trains.
Definitions
-----------
MUA : multiunit activity
PBE : population burst event
Summary
-------
This function can be used to identify PBE epochs from spike trains, binned
spike trains, or multiunit activity (in the form of an AnalogSignalArray).
It is recommended to either pass in a SpikeTrainArray or a
BinnedSpikeTrainArray, so that a `min_active` number of sorted units can be
set.
It is also recommended that the unsorted units (but not noise artifacts!)
should be included in the spike train that is used to estimate the PBEs. By
default, unit_id=0 is assumed to be unsorted, but this can be changed, or if
no unsorted units are present, you can set unsorted_id=None. Equivalently,
if min_active=0, then no restriction will apply, and the unsorted_id will
have no effect on the final PBE epochs.
Examples
--------
PBE_epochs = get_PBEs(mua_asa)
PBE_epochs = get_PBEs(spiketrain, min_active=5)
PBE_epochs = get_PBEs(binnedspiketrain, min_active=5)
Parameters
----------
data : AnalogSignalArray
AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
-- OR --
data : SpikeTrainArray
SpikeTrainArray with multiple units, including unsorted unit(s), but
excluding any noise artifects.
-- OR --
data : BinnedSpikeTrainArray
BinnedSpikeTrainArray containing multiunit activity.
fs : float, optional
Sampling frequency of mua, in Hz. If not specified, it will be inferred
from data.
ds : float, optional
Time step in which to bin spikes. Default is 1 ms.
sigma : float, optional
Standard deviation (in seconds) of Gaussian smoothing kernel.
Default is 10 ms. If sigma==0 then no smoothing is applied.
truncate : float, optional
Bandwidth of the Gaussian filter. Default is 6.
unsorted_id : int, optional
unit_id of the unsorted unit. Default is 0. If no unsorted unit is
present, then set unsorted_id = None
min_active : int, optional
Minimum number of active units per event, excluding unsorted unit.
Default is 5.
minLength : float, optional
Minimum event duration in seconds. Default is 50 ms.
maxLength : float, optional
Maximum event duration in seconds. Default is 750 ms.
PrimaryThreshold : float, optional
Primary threshold to exceed. Default is mean() + 3*std()
SecondaryThreshold : float, optional
Secondary threshold to fall back to. Default is mean().
minThresholdLength : float, optional
Minimum duration to stay above PrimaryThreshold. Default is 0 ms.
Returns
-------
PBE_epochs : EpochArray
EpochArray containing all the PBEs.
Future improvements
-------------------
As of now, it is possible, but not easy to specify the Primary and Secondary
thresholds for event detection. A slight change in API might be needed to
make this specification more flexible.
"""
if sigma is None:
sigma = 0.01 # 10 ms standard deviation
if truncate is None:
truncate = 6
if isinstance(data, core.AnalogSignalArray):
# if we have only mua, then we cannot set (ds, unsorted_id, min_active)
if ds is not None:
raise ValueError('if data is an AnalogSignalArray then ds cannot be specified!')
if unsorted_id:
raise ValueError('if data is an AnalogSignalArray then unsorted_id cannot be specified!')
if min_active is not None:
raise ValueError('if data is an AnalogSignalArray then min_active cannot be specified!')
mua = data
mua._data = mua._data.astype(float)
if (sigma != 0) and (truncate > 0):
mua = gaussian_filter(mua, sigma=sigma, truncate=truncate)
elif isinstance(data, (core.EventArray, core.BinnedEventArray)):
# set default parameter values:
if ds is None:
ds = 0.001 # default 1 ms
if min_active is None:
min_active = 5
mua = get_mua(data, ds=ds, sigma=sigma, truncate=truncate, _fast=True)
else:
raise TypeError('data has to be one of (AnalogSignalArray, SpikeTrainArray, BinnedSpikeTrainArray)')
# set default parameter values:
if fs is None:
fs = mua.fs
if minLength is None:
minLength = 0.050 # 50 ms minimum event duration
if maxLength is None:
maxLength = 0.750 # 750 ms maximum event duration
if minThresholdLength is None:
minThresholdLength = 0.0
# if PrimaryThreshold is None:
# PrimaryThreshold =
# if SecondaryThreshold is None:
# SecondaryThreshold =
PBE_epochs = get_mua_events(mua=mua,
fs=fs,
minLength=minLength,
maxLength=maxLength,
PrimaryThreshold=PrimaryThreshold,
minThresholdLength=minThresholdLength,
SecondaryThreshold=SecondaryThreshold)
# now require min_active number of sorted cells
if isinstance(data, (core.EventArray, core.BinnedEventArray)):
if min_active > 0:
if unsorted_id is not None:
# remove unsorted unit, if present:
unit_ids = copy.deepcopy(data.unit_ids)
try:
unit_ids.remove(unsorted_id)
except ValueError:
pass
# data_ = data._unit_subset(unit_ids)
data_ = data.loc[:,unit_ids]
else:
data_ = data
# determine number of active units per epoch:
n_active = np.array([snippet.n_active for snippet in data_[PBE_epochs]])
active_epochs_idx = np.argwhere(n_active > min_active).squeeze()
# only keep those epochs where sufficiently many units are active:
PBE_epochs = PBE_epochs[active_epochs_idx]
return PBE_epochs
def get_contiguous_segments(data, *, step=None, assume_sorted=None,
in_core=True, index=False, inclusive=False,
fs=None, sort=None, in_memory=None):
"""Compute contiguous segments (seperated by step) in a list.
Note! This function requires that a sorted list is passed.
It first checks if the list is sorted O(n), and only sorts O(n log(n))
if necessary. But if you know that the list is already sorted,
you can pass assume_sorted=True, in which case it will skip
the O(n) check.
Returns an array of size (n_segments, 2), with each row
being of the form ([start, stop]) [inclusive, exclusive].
NOTE: when possible, use assume_sorted=True, and step=1 as explicit
arguments to function call.
WARNING! Step is robustly computed in-core (i.e., when in_core is
True), but is assumed to be 1 when out-of-core.
Example
-------
>>> data = [1,2,3,4,10,11,12]
>>> get_contiguous_segments(data)
([1,5], [10,13])
>>> get_contiguous_segments(data, index=True)
([0,4], [4,7])
Parameters
----------
data : array-like
1D array of sequential data, typically assumed to be integral (sample
numbers).
step : float, optional
Expected step size for neighboring samples. Default uses numpy to find
the median, but it is much faster and memory efficient to explicitly
pass in step=1.
assume_sorted : bool, optional
If assume_sorted == True, then data is not inspected or re-ordered. This
can be significantly faster, especially for out-of-core computation, but
it should only be used when you are confident that the data is indeed
sorted, otherwise the results from get_contiguous_segments will not be
reliable.
in_core : bool, optional
If True, then we use np.diff which requires all the data to fit
into memory simultaneously, otherwise we use groupby, which uses
a generator to process potentially much larger chunks of data,
but also much slower.
index : bool, optional
If True, the indices of segment boundaries will be returned. Otherwise,
the segment boundaries will be returned in terms of the data itself.
Default is False.
inclusive : bool, optional
If True, the boundaries are returned as [(inclusive idx, inclusive idx)]
Default is False, and can only be used when index==True.
Deprecated
----------
in_memory : bool, optional
This is equivalent to the new 'in-core'.
sort : bool, optional
This is equivalent to the new 'assume_sorted'
fs : sampling rate (Hz) used to extend half-open interval support by 1/fs
"""
# handle deprecated API calls:
if in_memory:
in_core = in_memory
logging.warning("'in_memory' has been deprecated; use 'in_core' instead")
if sort:
assume_sorted = sort
logging.warning("'sort' has been deprecated; use 'assume_sorted' instead")
if fs:
step = 1/fs
logging.warning("'fs' has been deprecated; use 'step' instead")
if inclusive:
assert index, "option 'inclusive' can only be used with 'index=True'"
if in_core:
data = np.asarray(data)
if not assume_sorted:
if not is_sorted(data):
data = np.sort(data) # algorithm assumes sorted list
if step is None:
step = np.median(np.diff(data))
# assuming that data(t1) is sampled somewhere on [t, t+1/fs) we have a 'continuous' signal as long as
# data(t2 = t1+1/fs) is sampled somewhere on [t+1/fs, t+2/fs). In the most extreme case, it could happen
# that t1 = t and t2 = t + 2/fs, i.e. a difference of 2 steps.
if np.any(np.diff(data) < step):
logging.warning("some steps in the data are smaller than the requested step size.")
breaks = np.argwhere(np.diff(data)>=2*step)
starts = np.insert(breaks+1, 0, 0)
stops = np.append(breaks, len(data)-1)
bdries = np.vstack((data[starts], data[stops] + step)).T
if index:
if inclusive:
indices = np.vstack((starts, stops)).T
else:
indices = np.vstack((starts, stops + 1)).T
return indices
else:
from itertools import groupby
from operator import itemgetter
if not assume_sorted:
if not is_sorted(data):
# data = np.sort(data) # algorithm assumes sorted list
raise NotImplementedError("out-of-core sorting has not been implemented yet...")
if step is None:
step = 1
bdries = []
if not index:
for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
f = itemgetter(1)
gen = (f(x) for x in g)
start = next(gen)
stop = start
for stop in gen:
pass
bdries.append([start, stop + step])
else:
counter = 0
for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
f = itemgetter(1)
gen = (f(x) for x in g)
_ = next(gen)
start = counter
stop = start
for _ in gen:
stop +=1
if inclusive:
bdries.append([start, stop])
else:
bdries.append([start, stop + 1])
counter = stop + 1
return np.asarray(bdries)
def get_direction(asa, *, sigma=None):
"""Return epochs during which an animal was running left to right, or right
to left.
Parameters
----------
asa : AnalogSignalArray 1D
AnalogSignalArray containing the 1D position data.
sigma : float, optional
Smoothing to apply to position (x) before computing gradient estimate.
Default is 0.
Returns
-------
l2r, r2l : EpochArrays
EpochArrays corresponding to left-to-right and right-to-left movement.
"""
if sigma is None:
sigma = 0
if not isinstance(asa, core.AnalogSignalArray):
raise TypeError('AnalogSignalArray expected!')
assert asa.n_signals == 1, "1D AnalogSignalArray expected!"
direction = dxdt_AnalogSignalArray(asa.smooth(sigma=sigma),
rectify=False).data
direction[direction>=0] = 1
direction[direction<0] = -1
direction = direction.squeeze()
l2r = get_contiguous_segments(np.argwhere(direction>0).squeeze(), step=1)
l2r[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
l2r = core.EpochArray(asa.abscissa_vals[l2r])
r2l = get_contiguous_segments(np.argwhere(direction<0).squeeze(), step=1)
r2l[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
r2l = core.EpochArray(asa.abscissa_vals[r2l])
return l2r, r2l
class PrettyBytes(int):
"""Prints number of bytes in a more readable format"""
def __init__(self, val):
self.val = val
def __str__(self):
if self.val < 1024:
return '{} bytes'.format(self.val)
elif self.val < 1024**2:
return '{:.3f} kilobytes'.format(self.val/1024)
elif self.val < 1024**3:
return '{:.3f} megabytes'.format(self.val/1024**2)
elif self.val < 1024**4:
return '{:.3f} gigabytes'.format(self.val/1024**3)
def __repr__(self):
return self.__str__()
class PrettyInt(int):
"""Prints integers in a more readable format"""
def __init__(self, val):
self.val = val
def __str__(self):
return '{:,}'.format(self.val)
def __repr__(self):
return '{:,}'.format(self.val)
class PrettyDuration(float):
"""Time duration with pretty print.
Behaves like a float, and can always be cast to a float.
"""
def __init__(self, seconds):
self.duration = seconds
def __str__(self):
return self.time_string(self.duration)
def __repr__(self):
return self.time_string(self.duration)
@staticmethod
def to_dhms(seconds):
"""convert seconds into hh:mm:ss:ms"""
pos = seconds >= 0
if not pos:
seconds = -seconds
ms = seconds % 1; ms = round(ms*10000)/10
seconds = floor(seconds)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
Time = namedtuple('Time', 'pos dd hh mm ss ms')
time = Time(pos=pos, dd=d, hh=h, mm=m, ss=s, ms=ms)
return time
@staticmethod
def time_string(seconds):
"""returns a formatted time string."""
if np.isinf(seconds):
return 'inf'
pos, dd, hh, mm, ss, s = PrettyDuration.to_dhms(seconds)
if s > 0:
if mm == 0:
# in this case, represent milliseconds in terms of
# seconds (i.e. a decimal)
sstr = str(s/1000).lstrip('0')
if s >= 999.5:
ss += 1
s = 0
sstr = ""
# now propagate the carry:
if ss == 60:
mm += 1
ss = 0
if mm == 60:
hh +=1
mm = 0
if hh == 24:
dd += 1
hh = 0
else:
# for all other cases, milliseconds will be represented
# as an integer
if s >= 999.5:
ss += 1
s = 0
sstr = ""
# now propagate the carry:
if ss == 60:
mm += 1
ss = 0
if mm == 60:
hh +=1
mm = 0
if hh == 24:
dd += 1
hh = 0
else:
sstr = ":{:03d}".format(int(s))
else:
sstr = ""
if dd > 0:
daystr = "{:01d} days ".format(dd)
else:
daystr = ""
if hh > 0:
timestr = daystr + "{:01d}:{:02d}:{:02d}{} hours".format(hh, mm, ss, sstr)
elif mm > 0:
timestr = daystr + "{:01d}:{:02d}{} minutes".format(mm, ss, sstr)
elif ss > 0:
timestr = daystr + "{:01d}{} seconds".format(ss, sstr)
else:
timestr = daystr +"{} milliseconds".format(s)
if not pos:
timestr = "-" + timestr
return timestr
def __add__(self, other):
"""a + b"""
return PrettyDuration(self.duration + other)
def __radd__(self, other):
"""b + a"""
return self.__add__(other)
def __sub__(self, other):
"""a - b"""
return PrettyDuration(self.duration - other)
def __rsub__(self, other):
"""b - a"""
return other - self.duration
def __mul__(self, other):
"""a * b"""
return PrettyDuration(self.duration * other)
def __rmul__(self, other):
"""b * a"""
return self.__mul__(other)
def __truediv__(self, other):
"""a / b"""
return PrettyDuration(self.duration / other)
def shrinkMatColsTo(mat, numCols):
""" Docstring goes here
Shrinks a NxM1 matrix down to an NxM2 matrix, where M2 <= M1"""
import scipy.ndimage
numCells = mat.shape[0]
numColsMat = mat.shape[1]
a = np.zeros((numCells, numCols))
for row in np.arange(numCells):
niurou = scipy.ndimage.interpolation.zoom(input=mat[row,:], zoom=(numCols/numColsMat), order = 1)
a[row,:] = niurou
return a
def find_threshold_crossing_events(x, threshold, *, mode='above'):
"""Find threshold crossing events. INCLUSIVE
Parameters
----------
x : numpy array
Input data
threshold : float
The value whose crossing triggers an event
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
Returns
-------
eventlist : list
List containing the indices corresponding to threshold crossings
eventmax : list
List containing the maximum value of each event
"""
from itertools import groupby
from operator import itemgetter
if mode == 'below':
cross_threshold = np.where(x <= threshold, 1, 0)
elif mode == 'above':
cross_threshold = np.where(x >= threshold, 1, 0)
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
eventlist = []
eventmax = []
for k,v in groupby(enumerate(cross_threshold),key=itemgetter(1)):
if k:
v = list(v)
eventlist.append([v[0][0],v[-1][0]])
try :
eventmax.append(x[v[0][0]:(v[-1][0]+1)].max())
except :
print(v, x[v[0][0]:v[-1][0]])
eventmax = np.asarray(eventmax)
eventlist = np.asarray(eventlist)
return eventlist, eventmax
def get_events_boundaries(x, *, PrimaryThreshold=None,
SecondaryThreshold=None,
minThresholdLength=None, minLength=None,
maxLength=None, ds=None, mode='above'):
"""get event boundaries such that event.max >= PrimaryThreshold
and the event extent is defined by SecondaryThreshold.
Note that when PrimaryThreshold==SecondaryThreshold, then this is a
simple threshold crossing algorithm.
NB. minLength and maxLength are applied to the SecondaryThreshold
events, whereas minThresholdLength is applied to the
PrimaryThreshold events.
Parameters
----------
x : numpy array
Input data
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
PrimaryThreshold : float, optional
If mode=='above', requires that event.max >= PrimaryThreshold
If mode=='below', requires that event.min <= PrimaryThreshold
SecondaryThreshold : float, optional
The value that defines the event extent
minThresholdLength : float, optional
Minimum duration for which the PrimaryThreshold is crossed
minLength : float, optional
Minimum duration for which the SecondaryThreshold is crossed
maxLength : float, optional
Maximum duration for which the SecondaryThreshold is crossed
ds : float, optional
Time step of the input data x
Returns
-------
returns bounds, maxes, events
where bounds <==> SecondaryThreshold to SecondaryThreshold, inclusive
maxes <==> maximum value during each event
events <==> PrimaryThreshold to PrimaryThreshold, inclusive
"""
# TODO: x must be a numpy array
# TODO: ds is often used, but we have no default, and no check for when
# it is left as None.
# TODO: the Docstring should equally be improved.
x = x.squeeze()
if x.ndim > 1:
raise TypeError("multidimensional arrays not supported!")
if PrimaryThreshold is None: # by default, threshold is 3 SDs above mean of x
PrimaryThreshold = np.mean(x) + 3*np.std(x)
if SecondaryThreshold is None: # by default, revert back to mean of x
SecondaryThreshold = np.mean(x) # + 0*np.std(x)
events, _ = \
find_threshold_crossing_events(x=x,
threshold=PrimaryThreshold,
mode=mode)
# apply minThresholdLength criterion:
if minThresholdLength is not None and len(events) > 0:
durations = (events[:,1] - events[:,0] + 1) * ds
events = events[[durations >= minThresholdLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Find periods where value is > SecondaryThreshold; note that the previous periods should be within these!
if mode == 'above':
assert SecondaryThreshold <= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
elif mode == 'below':
assert SecondaryThreshold >= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
bounds, broader_maxes = \
find_threshold_crossing_events(x=x,
threshold=SecondaryThreshold,
mode=mode)
# Find corresponding big windows for potential events
# Specifically, look for closest left edge that is just smaller
outer_boundary_indices = np.searchsorted(bounds[:,0], events[:,0], side='right')
# searchsorted finds the index after, so subtract one to get index before
outer_boundary_indices = outer_boundary_indices - 1
# Find extended boundaries for events by pairing to larger windows
# (Note that there may be repeats if the larger window contains multiple > 3SD sections)
bounds = bounds[outer_boundary_indices,:]
maxes = broader_maxes[outer_boundary_indices]
if minLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations >= minLength]]
maxes = maxes[[durations >= minLength]]
events = events[[durations >= minLength]]
if maxLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations <= maxLength]]
maxes = maxes[[durations <= maxLength]]
events = events[[durations <= maxLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Now, since all that we care about are the larger windows, so we should get rid of repeats
_, unique_idx = np.unique(bounds[:,0], return_index=True)
bounds = bounds[unique_idx,:] # SecondaryThreshold to SecondaryThreshold
maxes = maxes[unique_idx] # maximum value during event
events = events[unique_idx,:] # PrimaryThreshold to PrimaryThreshold
return bounds, maxes, events
def signal_envelope1D(data, *, sigma=None, fs=None):
logging.warnings("'signal_envelope1D' is deprecated; use 'signal_envelope_1d' instead!")
return signal_envelope_1d(data, sigma=sigma, fs=fs)
def signal_envelope_1d(data, *, sigma=None, fs=None):
"""Finds the signal envelope by taking the absolute value
of the Hilbert transform
Parameters
----------
data : numpy array, list, or RegularlySampledAnalogSignalArray
Input data
If data is a numpy array, it is expected to have shape
(n_signals, n_samples)
If data is a list, it is expected to have length n_signals,
where each sublist has length n_samples, i.e. data is not
jagged
sigma : float, optional
Standard deviation of the Gaussian kernel used to
smooth the envelope after applying the Hilbert transform.
Units of seconds. Default is 4 ms
fs : float, optional
Sampling rate of the signal
Returns
-------
out : same type as the input object
An object containing the signal envelope
TODO: this is not yet epoch-aware!
UPDATE: this is actually epoch-aware by now!
"""
if sigma is None:
sigma = 0.004 # 4 ms standard deviation
if fs is None:
if isinstance(data, (np.ndarray, list)):
raise ValueError("sampling frequency must be specified!")
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
fs = data.fs
if isinstance(data, (np.ndarray, list)):
data_array = np.array(data)
n_dims = np.array(data).ndim
assert n_dims <= 2, "Only 1D signals supported!"
if n_dims == 1:
input_data = data_array.reshape((1, data_array.size))
else:
input_data = data_array
n_signals, n_samples = input_data.shape
# Compute number of samples to compute fast FFTs
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (input_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
if isinstance(data, list):
envelope = envelope.tolist()
return envelope
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
# Only ASA data of shape (n_signals, n_timepoints) -> 2D currently supported
assert data.data.ndim == 2
cum_lengths = np.insert(np.cumsum(data.lengths), 0, 0)
newasa = data.copy()
# for segment in data:
for idx in range(data.n_epochs):
# print('hilberting epoch {}/{}'.format(idx+1, data.n_epochs))
segment_data = data._data[:,cum_lengths[idx]:cum_lengths[idx+1]]
n_signals, n_samples = segment_data.shape
# Compute number of samples to compute fast FFTs:
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (segment_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
newasa._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.atleast_2d(envelope)
return newasa
def nextpower(n, base=2.0):
"""Return the next integral power of two greater than the given number.
Specifically, return m such that
m >= n
m == 2**x
where x is an integer. Use base argument to specify a base other than 2.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (<NAME>)
"""
x = base**ceil (log (n) / log (base))
if type(n) == np.ndarray:
return np.asarray (x, dtype=int)
else:
return int (x)
def nextfastpower(n):
"""Return the next integral power of small factors greater than the given
number. Specifically, return m such that
m >= n
m == 2**x * 3**y * 5**z
where x, y, and z are integers.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (<NAME>)
See also http://scipy.github.io/devdocs/generated/scipy.fftpack.next_fast_len.html
"""
if n < 7:
return max (n, 1)
# x, y, and z are all bounded from above by the formula of nextpower.
# Compute all possible combinations for powers of 3 and 5.
# (Not too many for reasonable FFT sizes.)
def power_series (x, base):
nmax = ceil (log (x) / log (base))
return np.logspace (0.0, nmax, num=nmax+1, base=base)
n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0))
n35 = n35[n35<=n]
# Lump the powers of 3 and 5 together and solve for the powers of 2.
n2 = nextpower (n / n35)
return int (min (n2 * n35))
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def gaussian_filter(obj, *, fs=None, sigma=None, truncate=None, inplace=False, mode=None, cval=None, within_intervals=False):
"""Smooths with a Gaussian kernel.
Smoothing is applied along the abscissa, and the same smoothing is applied to each
signal in the RegularlySampledAnalogSignalArray, or to each unit in a BinnedSpikeTrainArray.
Smoothing is applied ACROSS intervals, but smoothing WITHIN intervals is also supported.
Parameters
----------
obj : RegularlySampledAnalogSignalArray or BinnedSpikeTrainArray.
fs : float, optional
Sampling rate (in obj.base_unit^-1) of obj. If not provided, it will
be inferred.
sigma : float, optional
Standard deviation of Gaussian kernel, in obj.base_units. Default is 0.05
(50 ms if base_unit=seconds).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0.
inplace : bool
If True the data will be replaced with the smoothed data.
Default is False.
mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional
The mode parameter determines how the array borders are handled,
where cval is the value when mode is equal to ‘constant’. Default is
‘reflect’.
cval : scalar, optional
Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
within_intervals : boolean, optional
If True, then smooth within each epoch. Otherwise smooth across epochs.
Default is False.
Note that when mode = 'wrap', then smoothing within epochs aren't affected
by wrapping.
Returns
-------
out : same type as obj
An object with smoothed data is returned.
"""
if sigma is None:
sigma = 0.05
if truncate is None:
truncate = 4
if mode is None:
mode = 'reflect'
if cval is None:
cval = 0.0
if not inplace:
out = copy.deepcopy(obj)
else:
out = obj
if isinstance(out, core.RegularlySampledAnalogSignalArray):
if fs is None:
fs = out.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name))
elif isinstance(out, core.BinnedEventArray):
bst = out
if fs is None:
fs = 1/bst.ds
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name))
else:
raise NotImplementedError("gaussian_filter for {} is not yet supported!".format(str(type(out))))
sigma = sigma * fs
if not within_intervals:
# see https://stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python
# (1) if smoothing across intervals, we work on a merged support
# (2) build abscissa_vals, including existing ones, and out-of-support ones
# (3) to smooth U, build auxiliary arrays V and W, with (V=U).nan=0, and (W=1).nan=0
# (4) Z = smooth(V)/smooth(W)
# (5) only keep original support, and original abscissa_vals
if isinstance(out, (core.RegularlySampledAnalogSignalArray, core.BinnedEventArray)):
support = out._abscissa.support.merge()
if not support.domain.is_finite:
support.domain = (support.start, support.stop) #TODO: #FIXME might come from abscissa definition, and not from support
missing_abscissa_vals = []
for interval in (~support):
missing_vals = frange(interval.start, interval.stop, 1/fs)
missing_abscissa_vals.extend(missing_vals)
if isinstance(out, core.RegularlySampledAnalogSignalArray):
n_signals = out.n_signals
n_samples = out.n_samples
elif isinstance(out, core.BinnedEventArray):
n_signals = out.n_series
n_samples = out.n_bins
V = np.zeros((n_signals, n_samples + len(missing_abscissa_vals)))
W = np.ones(V.shape)
all_abscissa_vals = np.sort(np.append(out._abscissa_vals, missing_abscissa_vals))
data_idx = np.searchsorted(all_abscissa_vals, out._abscissa_vals)
missing_idx = np.searchsorted(all_abscissa_vals, missing_abscissa_vals)
V[:, data_idx] = out.data
W[:, missing_idx] = 0
VV = scipy.ndimage.filters.gaussian_filter(V, sigma=(0,sigma), truncate=truncate, mode=mode, cval=cval)
WW = scipy.ndimage.filters.gaussian_filter(W, sigma=(0,sigma), truncate=truncate, mode=mode, cval=cval)
Z = VV[:,data_idx]/WW[:,data_idx]
out._data = Z
else:
raise NotImplementedError("gaussian_filter across intervals for {} is not yet supported!".format(str(type(out))))
else: # within intervals:
cum_lengths = np.insert(np.cumsum(out.lengths), 0, 0)
out._data = out._data.astype(float)
if isinstance(out, core.RegularlySampledAnalogSignalArray):
# now smooth each interval separately
for idx in range(out.n_intervals):
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = scipy.ndimage.filters.gaussian_filter(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], sigma=(0,sigma), truncate=truncate)
elif isinstance(out, core.BinnedSpikeTrainArray):
# now smooth each interval separately
for idx in range(out.n_epochs):
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = scipy.ndimage.filters.gaussian_filter(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], sigma=(0,sigma), truncate=truncate)
# out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = self._smooth_array(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], w=w)
return out
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def ddt_asa(asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None, norm=False):
"""Numerical differentiation of a regularly sampled AnalogSignalArray.
Optionally also smooths result with a Gaussian kernel.
Smoothing is applied in time, and the same smoothing is applied to each
signal in the AnalogSignalArray.
Differentiation, (and if requested, smoothing) is applied within each epoch.
Parameters
----------
asa : nelpy.RegularlySampledAnalogSignalArray
Input object.
fs : float, optional
Sampling rate (in Hz) of input RSASA. If not provided, it will be obtained
from asa.fs.
smooth : bool, optional
If true, result will be smoothed. Default is False
rectify : bool, optional
If True, absolute value of derivative is computed. Default is True.
sigma : float, optional
Standard deviation of Gaussian kernel, in seconds. Default is 0.05
(50 ms).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0
norm: boolean, optional
If True, then apply the L2 norm to the result.
Returns
-------
out : nelpy.RegularlySampledAnalogSignalArray
A RegularlySampledAnalogSignalArray with derivative data (in units
per second) is returned.
Notes
-----
Central differences are used here.
"""
if not isinstance(asa, core.RegularlySampledAnalogSignalArray):
raise TypeError("Input object must be a RegularlySampledAnalogSignalArray!")
if fs is None:
fs = asa.fs
if sigma is None:
sigma = 0.05 # 50 ms default
out = asa.copy()
cum_lengths = np.insert(np.cumsum(asa.lengths), 0, 0)
# ensure that datatype is float
# TODO: this will break complex data
out._data = out.data.astype(float)
# now obtain the derivative for each epoch separately
for idx in range(asa.n_epochs):
# if 1D:
if asa.n_signals == 1:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[[0],cum_lengths[idx]:cum_lengths[idx+1]], axis=1)
else:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[:,cum_lengths[idx]:cum_lengths[idx+1]], axis=1)
out._data = out._data * fs
if norm:
out._data = np.atleast_2d(np.linalg.norm(out._data, axis=0))
if rectify:
out._data = np.abs(out._data)
if smooth:
out = gaussian_filter(out, fs=fs, sigma=sigma, truncate=truncate)
return out
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def dxdt_AnalogSignalArray(asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None):
"""Numerical differentiation of a regularly sampled AnalogSignalArray.
Optionally also smooths result with a Gaussian kernel.
Smoothing is applied in time, and the same smoothing is applied to each
signal in the AnalogSignalArray.
Differentiation, (and if requested, smoothing) is applied within each epoch.
Parameters
----------
asa : AnalogSignalArray
fs : float, optional
Sampling rate (in Hz) of AnalogSignalArray. If not provided, it will
be obtained from asa.fs
smooth : bool, optional
If true, result will be smoothed. Default is False
rectify : bool, optional
If True, absolute value of derivative is computed. Default is True.
sigma : float, optional
Standard deviation of Gaussian kernel, in seconds. Default is 0.05
(50 ms).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0
Returns
-------
out : AnalogSignalArray
An AnalogSignalArray with derivative data (in units per second) is returned.
"""
raise DeprecationWarning('use ddt_asa instead!')
if fs is None:
fs = asa.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the AnalogSignalArray!")
if sigma is None:
sigma = 0.05 # 50 ms default
out = copy.deepcopy(asa)
cum_lengths = np.insert(np.cumsum(asa.lengths), 0, 0)
# ensure that datatype is float
out._data = out.data.astype(float)
if asa.n_signals == 2:
out._data = out._data[[0],:]
# now obtain the derivative for each epoch separately
for idx in range(asa.n_epochs):
# if 1D:
if asa.n_signals == 1:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[[0],cum_lengths[idx]:cum_lengths[idx+1]], axis=1)
elif asa.n_signals == 2:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.linalg.norm(np.gradient(asa._data[:,cum_lengths[idx]:cum_lengths[idx+1]], axis=1), axis=0)
else:
raise TypeError("more than 2D not currently supported!")
out._data = out._data * fs
if rectify:
out._data = np.abs(out._data)
if smooth:
out = gaussian_filter(out, fs=fs, sigma=sigma, truncate=truncate)
return out
def get_threshold_crossing_epochs(asa, t1=None, t2=None, mode='above'):
"""Return epochs where a signal crosses a compound threshold specified by t1
and t2.
Parameters
----------
asa : AnalogSignalArray
AnalogSignalArray containing a single channel
t1 : float, optional
Primary threshold. Minimum signal value that has to be reached /
exceeded during an event. Default is 3 standard deviations above signal
mean.
t2 : float, optional
Secondary threshold. Signal value that defines the event boundaries.
Default is signal mean.
mode : string, optional
Mode of operation. One of ['above', 'below']. If 'above', then return
epochs where the signal exceeds the compound threshold, and if 'below',
then return epochs where the signal falls below the compound threshold.
Default is 'above'.
Returns
-------
epochs : EpochArray
EpochArray with all the epochs where the signal satisfied the criteria.
"""
if asa.n_signals > 1:
raise TypeError("multidimensional AnalogSignalArrays not supported!")
x = asa.data.squeeze()
if t1 is None: # by default, threshold is 3 SDs above mean of x
t1 = np.mean(x) + 3*np.std(x)
if t2 is None: # by default, revert back to mean of x
t2 = np.mean(x)
# compute periods where signal exceeds compound threshold
epoch_bounds, _, _ = get_events_boundaries(
x=x,
PrimaryThreshold=t1,
SecondaryThreshold=t2,
mode=mode
)
# convert bounds to time in seconds
epoch_bounds = asa.time[epoch_bounds]
if len(epoch_bounds) == 0:
return type(asa._abscissa.support)(empty=True)
# add 1/fs to stops for open interval
epoch_bounds[:,1] += 1/asa.fs
# create EpochArray with threshould exceeding bounds
epochs = type(asa._abscissa.support)(epoch_bounds)
return epochs
def get_run_epochs(speed, v1=10, v2=8):
"""Return epochs where animal is running at least as fast as
specified by v1 and v2.
Parameters
----------
speed : AnalogSignalArray
AnalogSignalArray containing single channel speed, in units/sec
v1 : float, optional
Minimum speed (in same units as speed) that has to be reached /
exceeded during an event. Default is 10 [units/sec]
v2 : float, optional
Speed that defines the event boundaries. Default is 8 [units/sec]
Returns
-------
run_epochs : EpochArray
EpochArray with all the epochs where speed satisfied the criteria.
"""
run_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='above')
return run_epochs
def get_inactive_epochs(speed, v1=5, v2=7):
"""Return epochs where animal is running no faster than specified by
v1 and v2.
Parameters
----------
speed : AnalogSignalArray
AnalogSignalArray containing single channel speed, in units/sec
v1 : float, optional
Minimum speed (in same units as speed) that has to be reached /
exceeded during an event. Default is 10 [units/sec]
v2 : float, optional
Speed that defines the event boundaries. Default is 8 [units/sec]
Returns
-------
inactive_epochs : EpochArray
EpochArray with all the epochs where speed satisfied the criteria.
"""
inactive_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='below')
return inactive_epochs
def spiketrain_union(st1, st2):
"""Join two spiketrains together.
WARNING! This function should be improved a lot!
"""
assert st1.n_units == st2.n_units
support = st1.support.join(st2.support)
newdata = []
for unit in range(st1.n_units):
newdata.append(np.append(st1.time[unit], st2.time[unit]))
fs = None
if st1.fs == st2.fs:
fs = st1.fs
return core.SpikeTrainArray(newdata, support=support, fs=fs)
########################################################################
# uncurated below this line!
########################################################################
def find_nearest_idx(array, val):
"""Finds nearest index in array to value.
Parameters
----------
array : np.array
val : float
Returns
-------
Index into array that is closest to val
TODO: this is a better version that should be incorporated:
# Based on answer here: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
def find_nearest(array,values):
right_idxs = np.searchsorted(array, values, side="left")
left_idxs = np.where(right_idxs > 0, right_idxs-1, right_idxs)
right_idxs = np.where(right_idxs == len(array), len(array)-1, right_idxs)
closest_idx = np.where(np.abs(values - array[right_idxs]) < np.abs(values - array[left_idxs]),
right_idxs, left_idxs)
return closest_idx
"""
return (np.abs(array-val)).argmin()
def find_nearest_indices(array, vals):
"""Finds nearest index in array to value.
Parameters
----------
array : np.array
This is the array you wish to index into.
vals : np.array
This is the array that you are getting your indices from.
Returns
-------
Indices into array that is closest to vals.
Notes
-----
Wrapper around find_nearest_idx().
"""
return np.array([find_nearest_idx(array, val) for val in vals], dtype=int)
def get_sort_idx(tuning_curves):
"""Finds indices to sort neurons by max firing in tuning curve.
Parameters
----------
tuning_curves : list of lists
Where each inner list is the tuning curves for an individual
neuron.
Returns
-------
sorted_idx : list
List of integers that correspond to the neuron in sorted order.
"""
tc_max_loc = []
for i, neuron_tc in enumerate(tuning_curves):
tc_max_loc.append((i, np.where(neuron_tc == np.max(neuron_tc))[0][0]))
sorted_by_tc = sorted(tc_max_loc, key=lambda x: x[1])
sorted_idx = []
for idx in sorted_by_tc:
sorted_idx.append(idx[0])
return sorted_idx
def collapse_time(obj, gap=0):
"""Collapse all epochs in a SpikeTrainArray and collapse them into a single, contiguous SpikeTrainArray"""
# TODO: redo SpikeTrainArray so as to keep the epochs separate!, and to support gaps!
# We'll have to ajust all the spikes per epoch... and we'll have to compute a new support. Also set a flag!
# If it's a SpikeTrainArray, then we left-shift the spike times. If it's an AnalogSignalArray, then we
# left-shift the time and tdata.
# Also set a new attribute, with the boundaries in seconds.
if isinstance(obj, core.RegularlySampledAnalogSignalArray):
new_obj = type(obj)(empty=True)
new_obj._data = obj._data
durations = obj.support.durations
starts = np.insert(np.cumsum(durations + gap),0,0)[:-1]
stops = starts + durations
newsupport = type(obj._abscissa.support)(np.vstack((starts, stops)).T)
new_obj._support = newsupport
new_time = obj.time.astype(float) # fast copy
time_idx = np.insert(np.cumsum(obj.lengths),0,0)
new_offset = 0
for epidx in range(obj.n_epochs):
if epidx > 0:
new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset + gap
new_offset += durations[epidx] + gap
else:
new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset
new_offset += durations[epidx]
new_obj._time = new_time
new_obj._fs = obj._fs
elif isinstance(obj, core.EventArray):
if gap > 0:
raise ValueError("gaps not supported for SpikeTrainArrays yet!")
new_obj = type(obj)(empty=True)
new_time = [[] for _ in range(obj.n_series)]
duration = 0
for st_ in obj:
le = st_.support.start
for unit_ in range(obj.n_series):
new_time[unit_].extend(st_._data[unit_] - le + duration)
duration += st_.support.duration
new_time = np.asanyarray([np.asanyarray(unittime) for unittime in new_time])
new_obj._data = new_time
new_obj.support = type(obj._abscissa.support)([0, duration])
new_obj._series_ids = obj._series_ids
new_obj._series_labels = obj._series_labels
new_obj._series_tags = obj._series_tags
elif isinstance(obj, core.BinnedEventArray):
raise NotImplementedError("BinnedEventArrays are not yet supported, but bst.data is essentially already collapsed!")
else:
raise TypeError("unsupported type for collapse_time")
return new_obj
def cartesian(xcenters, ycenters):
"""Finds every combination of elements in two arrays.
Parameters
----------
xcenters : np.array
ycenters : np.array
Returns
-------
cartesian : np.array
With shape(n_sample, 2).
"""
return np.transpose([np.tile(xcenters, len(ycenters)), np.repeat(ycenters, len(xcenters))])
| [
"numpy.prod",
"logging.warnings",
"numpy.hstack",
"math.floor",
"numpy.log",
"numpy.asanyarray",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"operator.itemgetter",
"numpy.gradient",
"numpy.arange",
"numpy.mean",
"numpy.atleast_2d",
"numpy.searchsorted",
"numpy.where",
"numpy.sort",
"numpy.asarray",
"numpy.diff",
"numpy.max",
"numpy.linspace",
"numpy.vstack",
"numpy.isinf",
"numpy.logspace",
"numpy.abs",
"collections.namedtuple",
"numpy.ones",
"numpy.floor",
"logging.warning",
"itertools.tee",
"numpy.std",
"numpy.log2",
"numpy.transpose",
"numpy.insert",
"numpy.unique",
"numpy.append",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"numpy.cumsum",
"numpy.all",
"scipy.signal.hilbert"
]
| [((1683, 1708), 'numpy.array', 'np.array', (['(n_elem * [None])'], {}), '(n_elem * [None])\n', (1691, 1708), True, 'import numpy as np\n'), ((2313, 2339), 'numpy.array', 'np.array', (['indices'], {'ndmin': '(2)'}), '(indices, ndmin=2)\n', (2321, 2339), True, 'import numpy as np\n'), ((2790, 2845), 'numpy.linspace', 'np.linspace', (['start', 'stop'], {'num': 'num_steps', 'endpoint': '(False)'}), '(start, stop, num=num_steps, endpoint=False)\n', (2801, 2845), True, 'import numpy as np\n'), ((4854, 4876), 'copy.deepcopy', 'copy.deepcopy', (['ratemap'], {}), '(ratemap)\n', (4867, 4876), False, 'import copy\n'), ((5132, 5158), 'numpy.prod', 'np.prod', (['ratemap.shape[1:]'], {}), '(ratemap.shape[1:])\n', (5139, 5158), True, 'import numpy as np\n'), ((8062, 8088), 'numpy.prod', 'np.prod', (['ratemap.shape[1:]'], {}), '(ratemap.shape[1:])\n', (8069, 8088), True, 'import numpy as np\n'), ((10508, 10577), 'numpy.linspace', 'np.linspace', (['interval.start', '(interval.start + n_bins * ds)', '(n_bins + 1)'], {}), '(interval.start, interval.start + n_bins * ds, n_bins + 1)\n', (10519, 10577), True, 'import numpy as np\n'), ((11418, 11429), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (11426, 11429), True, 'import numpy as np\n'), ((11448, 11459), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (11456, 11459), True, 'import numpy as np\n'), ((11470, 11490), 'numpy.array', 'np.array', (['left_edges'], {}), '(left_edges)\n', (11478, 11490), True, 'import numpy as np\n'), ((11527, 11548), 'numpy.array', 'np.array', (['right_edges'], {}), '(right_edges)\n', (11535, 11548), True, 'import numpy as np\n'), ((11597, 11616), 'numpy.hstack', 'np.hstack', (['(le, re)'], {}), '((le, re))\n', (11606, 11616), True, 'import numpy as np\n'), ((14922, 14935), 'itertools.tee', 'tee', (['iterable'], {}), '(iterable)\n', (14925, 14935), False, 'from itertools import tee, repeat\n'), ((33325, 33343), 'numpy.asarray', 'np.asarray', (['bdries'], {}), '(bdries)\n', (33335, 33343), True, 'import numpy as np\n'), ((39418, 39447), 'numpy.zeros', 'np.zeros', (['(numCells, numCols)'], {}), '((numCells, numCols))\n', (39426, 39447), True, 'import numpy as np\n'), ((39463, 39482), 'numpy.arange', 'np.arange', (['numCells'], {}), '(numCells)\n', (39472, 39482), True, 'import numpy as np\n'), ((40938, 40958), 'numpy.asarray', 'np.asarray', (['eventmax'], {}), '(eventmax)\n', (40948, 40958), True, 'import numpy as np\n'), ((40975, 40996), 'numpy.asarray', 'np.asarray', (['eventlist'], {}), '(eventlist)\n', (40985, 40996), True, 'import numpy as np\n'), ((44866, 44923), 'numpy.searchsorted', 'np.searchsorted', (['bounds[:, 0]', 'events[:, 0]'], {'side': '"""right"""'}), "(bounds[:, 0], events[:, 0], side='right')\n", (44881, 44923), True, 'import numpy as np\n'), ((46275, 46317), 'numpy.unique', 'np.unique', (['bounds[:, 0]'], {'return_index': '(True)'}), '(bounds[:, 0], return_index=True)\n', (46284, 46317), True, 'import numpy as np\n'), ((46622, 46715), 'logging.warnings', 'logging.warnings', (['"""\'signal_envelope1D\' is deprecated; use \'signal_envelope_1d\' instead!"""'], {}), '(\n "\'signal_envelope1D\' is deprecated; use \'signal_envelope_1d\' instead!")\n', (46638, 46715), False, 'import logging\n'), ((62910, 62928), 'copy.deepcopy', 'copy.deepcopy', (['asa'], {}), '(asa)\n', (62923, 62928), False, 'import copy\n'), ((2210, 2264), 'numpy.searchsorted', 'np.searchsorted', (['asa._abscissa_vals', '(a_start, a_stop)'], {}), '(asa._abscissa_vals, (a_start, a_stop))\n', (2225, 2264), True, 'import numpy as np\n'), ((2750, 2781), 'numpy.floor', 'np.floor', (['((stop - start) / step)'], {}), '((stop - start) / step)\n', (2758, 2781), True, 'import numpy as np\n'), ((5406, 5438), 'numpy.transpose', 'np.transpose', (['ratemap', '(2, 1, 0)'], {}), '(ratemap, (2, 1, 0))\n', (5418, 5438), True, 'import numpy as np\n'), ((10387, 10417), 'numpy.floor', 'np.floor', (['(interval.length / ds)'], {}), '(interval.length / ds)\n', (10395, 10417), True, 'import numpy as np\n'), ((10607, 10634), 'numpy.max', 'np.max', (['(1, n_bins - w + 1)'], {}), '((1, n_bins - w + 1))\n', (10613, 10634), True, 'import numpy as np\n'), ((11858, 11900), 'numpy.vstack', 'np.vstack', (['[support_starts, support_stops]'], {}), '([support_starts, support_stops])\n', (11867, 11900), True, 'import numpy as np\n'), ((21023, 21064), 'logging.warning', 'logging.warning', (['"""no mua events detected"""'], {}), "('no mua events detected')\n", (21038, 21064), False, 'import logging\n'), ((30546, 30619), 'logging.warning', 'logging.warning', (['"""\'in_memory\' has been deprecated; use \'in_core\' instead"""'], {}), '("\'in_memory\' has been deprecated; use \'in_core\' instead")\n', (30561, 30619), False, 'import logging\n'), ((30670, 30744), 'logging.warning', 'logging.warning', (['"""\'sort\' has been deprecated; use \'assume_sorted\' instead"""'], {}), '("\'sort\' has been deprecated; use \'assume_sorted\' instead")\n', (30685, 30744), False, 'import logging\n'), ((30784, 30847), 'logging.warning', 'logging.warning', (['"""\'fs\' has been deprecated; use \'step\' instead"""'], {}), '("\'fs\' has been deprecated; use \'step\' instead")\n', (30799, 30847), False, 'import logging\n'), ((30976, 30992), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (30986, 30992), True, 'import numpy as np\n'), ((31703, 31730), 'numpy.insert', 'np.insert', (['(breaks + 1)', '(0)', '(0)'], {}), '(breaks + 1, 0, 0)\n', (31712, 31730), True, 'import numpy as np\n'), ((36174, 36188), 'math.floor', 'floor', (['seconds'], {}), '(seconds)\n', (36179, 36188), False, 'from math import floor\n'), ((36297, 36337), 'collections.namedtuple', 'namedtuple', (['"""Time"""', '"""pos dd hh mm ss ms"""'], {}), "('Time', 'pos dd hh mm ss ms')\n", (36307, 36337), False, 'from collections import namedtuple\n'), ((36525, 36542), 'numpy.isinf', 'np.isinf', (['seconds'], {}), '(seconds)\n', (36533, 36542), True, 'import numpy as np\n'), ((40331, 40361), 'numpy.where', 'np.where', (['(x <= threshold)', '(1)', '(0)'], {}), '(x <= threshold, 1, 0)\n', (40339, 40361), True, 'import numpy as np\n'), ((43324, 43334), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (43331, 43334), True, 'import numpy as np\n'), ((43826, 43873), 'logging.warning', 'logging.warning', (['"""no events satisfied criteria"""'], {}), "('no events satisfied criteria')\n", (43841, 43873), False, 'import logging\n'), ((46073, 46120), 'logging.warning', 'logging.warning', (['"""no events satisfied criteria"""'], {}), "('no events satisfied criteria')\n", (46088, 46120), False, 'import logging\n'), ((48113, 48127), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (48121, 48127), True, 'import numpy as np\n'), ((51374, 51398), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'int'}), '(x, dtype=int)\n', (51384, 51398), True, 'import numpy as np\n'), ((52181, 52228), 'numpy.logspace', 'np.logspace', (['(0.0)', 'nmax'], {'num': '(nmax + 1)', 'base': 'base'}), '(0.0, nmax, num=nmax + 1, base=base)\n', (52192, 52228), True, 'import numpy as np\n'), ((54472, 54490), 'copy.deepcopy', 'copy.deepcopy', (['obj'], {}), '(obj)\n', (54485, 54490), False, 'import copy\n'), ((60147, 60169), 'numpy.cumsum', 'np.cumsum', (['asa.lengths'], {}), '(asa.lengths)\n', (60156, 60169), True, 'import numpy as np\n'), ((61247, 61264), 'numpy.abs', 'np.abs', (['out._data'], {}), '(out._data)\n', (61253, 61264), True, 'import numpy as np\n'), ((62957, 62979), 'numpy.cumsum', 'np.cumsum', (['asa.lengths'], {}), '(asa.lengths)\n', (62966, 62979), True, 'import numpy as np\n'), ((64128, 64145), 'numpy.abs', 'np.abs', (['out._data'], {}), '(out._data)\n', (64134, 64145), True, 'import numpy as np\n'), ((65599, 65609), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (65606, 65609), True, 'import numpy as np\n'), ((16146, 16177), 'numpy.all', 'np.all', (['(chunk[:-1] <= chunk[1:])'], {}), '(chunk[:-1] <= chunk[1:])\n', (16152, 16177), True, 'import numpy as np\n'), ((27384, 27445), 'numpy.array', 'np.array', (['[snippet.n_active for snippet in data_[PBE_epochs]]'], {}), '([snippet.n_active for snippet in data_[PBE_epochs]])\n', (27392, 27445), True, 'import numpy as np\n'), ((31549, 31637), 'logging.warning', 'logging.warning', (['"""some steps in the data are smaller than the requested step size."""'], {}), "(\n 'some steps in the data are smaller than the requested step size.')\n", (31564, 31637), False, 'import logging\n'), ((31793, 31838), 'numpy.vstack', 'np.vstack', (['(data[starts], data[stops] + step)'], {}), '((data[starts], data[stops] + step))\n', (31802, 31838), True, 'import numpy as np\n'), ((40414, 40444), 'numpy.where', 'np.where', (['(x >= threshold)', '(1)', '(0)'], {}), '(x >= threshold, 1, 0)\n', (40422, 40444), True, 'import numpy as np\n'), ((40672, 40685), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (40682, 40685), False, 'from operator import itemgetter\n'), ((43195, 43205), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (43202, 43205), True, 'import numpy as np\n'), ((48145, 48159), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (48153, 48159), True, 'import numpy as np\n'), ((48700, 48728), 'scipy.signal.hilbert', 'hilbert', (['paddeddata'], {'axis': '(-1)'}), '(paddeddata, axis=-1)\n', (48707, 48728), False, 'from scipy.signal import hilbert\n'), ((56583, 56599), 'numpy.ones', 'np.ones', (['V.shape'], {}), '(V.shape)\n', (56590, 56599), True, 'import numpy as np\n'), ((56717, 56771), 'numpy.searchsorted', 'np.searchsorted', (['all_abscissa_vals', 'out._abscissa_vals'], {}), '(all_abscissa_vals, out._abscissa_vals)\n', (56732, 56771), True, 'import numpy as np\n'), ((56798, 56855), 'numpy.searchsorted', 'np.searchsorted', (['all_abscissa_vals', 'missing_abscissa_vals'], {}), '(all_abscissa_vals, missing_abscissa_vals)\n', (56813, 56855), True, 'import numpy as np\n'), ((57437, 57459), 'numpy.cumsum', 'np.cumsum', (['out.lengths'], {}), '(out.lengths)\n', (57446, 57459), True, 'import numpy as np\n'), ((61175, 61208), 'numpy.linalg.norm', 'np.linalg.norm', (['out._data'], {'axis': '(0)'}), '(out._data, axis=0)\n', (61189, 61208), True, 'import numpy as np\n'), ((65502, 65512), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (65509, 65512), True, 'import numpy as np\n'), ((68033, 68074), 'numpy.append', 'np.append', (['st1.time[unit]', 'st2.time[unit]'], {}), '(st1.time[unit], st2.time[unit])\n', (68042, 68074), True, 'import numpy as np\n'), ((69222, 69241), 'numpy.abs', 'np.abs', (['(array - val)'], {}), '(array - val)\n', (69228, 69241), True, 'import numpy as np\n'), ((71479, 71501), 'numpy.cumsum', 'np.cumsum', (['obj.lengths'], {}), '(obj.lengths)\n', (71488, 71501), True, 'import numpy as np\n'), ((8374, 8397), 'numpy.sum', 'np.sum', (['(Ri * Pi)'], {'axis': '(1)'}), '(Ri * Pi, axis=1)\n', (8380, 8397), True, 'import numpy as np\n'), ((8612, 8637), 'numpy.sum', 'np.sum', (['(Pi * Ri.T)'], {'axis': '(1)'}), '(Pi * Ri.T, axis=1)\n', (8618, 8637), True, 'import numpy as np\n'), ((11740, 11762), 'numpy.cumsum', 'np.cumsum', (['(lengths + 1)'], {}), '(lengths + 1)\n', (11749, 11762), True, 'import numpy as np\n'), ((15794, 15805), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (15802, 15805), True, 'import numpy as np\n'), ((26998, 27026), 'copy.deepcopy', 'copy.deepcopy', (['data.unit_ids'], {}), '(data.unit_ids)\n', (27011, 27026), False, 'import copy\n'), ((31083, 31096), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (31090, 31096), True, 'import numpy as np\n'), ((31185, 31198), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (31192, 31198), True, 'import numpy as np\n'), ((31514, 31527), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (31521, 31527), True, 'import numpy as np\n'), ((31663, 31676), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (31670, 31676), True, 'import numpy as np\n'), ((32557, 32570), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (32567, 32570), False, 'from operator import itemgetter\n'), ((32920, 32933), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (32930, 32933), False, 'from operator import itemgetter\n'), ((34336, 34362), 'numpy.argwhere', 'np.argwhere', (['(direction > 0)'], {}), '(direction > 0)\n', (34347, 34362), True, 'import numpy as np\n'), ((34553, 34579), 'numpy.argwhere', 'np.argwhere', (['(direction < 0)'], {}), '(direction < 0)\n', (34564, 34579), True, 'import numpy as np\n'), ((43210, 43219), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (43216, 43219), True, 'import numpy as np\n'), ((48585, 48614), 'numpy.zeros', 'np.zeros', (['(n_signals, padlen)'], {}), '((n_signals, padlen))\n', (48593, 48614), True, 'import numpy as np\n'), ((49565, 49588), 'numpy.cumsum', 'np.cumsum', (['data.lengths'], {}), '(data.lengths)\n', (49574, 49588), True, 'import numpy as np\n'), ((50873, 50896), 'numpy.atleast_2d', 'np.atleast_2d', (['envelope'], {}), '(envelope)\n', (50886, 50896), True, 'import numpy as np\n'), ((51307, 51313), 'numpy.log', 'log', (['n'], {}), '(n)\n', (51310, 51313), False, 'from numpy import log, ceil\n'), ((51317, 51326), 'numpy.log', 'log', (['base'], {}), '(base)\n', (51320, 51326), False, 'from numpy import log, ceil\n'), ((52144, 52150), 'numpy.log', 'log', (['x'], {}), '(x)\n', (52147, 52150), False, 'from numpy import log, ceil\n'), ((52154, 52163), 'numpy.log', 'log', (['base'], {}), '(base)\n', (52157, 52163), False, 'from numpy import log, ceil\n'), ((56640, 56692), 'numpy.append', 'np.append', (['out._abscissa_vals', 'missing_abscissa_vals'], {}), '(out._abscissa_vals, missing_abscissa_vals)\n', (56649, 56692), True, 'import numpy as np\n'), ((60690, 60764), 'numpy.gradient', 'np.gradient', (['asa._data[[0], cum_lengths[idx]:cum_lengths[idx + 1]]'], {'axis': '(1)'}), '(asa._data[[0], cum_lengths[idx]:cum_lengths[idx + 1]], axis=1)\n', (60701, 60764), True, 'import numpy as np\n'), ((61025, 61097), 'numpy.gradient', 'np.gradient', (['asa._data[:, cum_lengths[idx]:cum_lengths[idx + 1]]'], {'axis': '(1)'}), '(asa._data[:, cum_lengths[idx]:cum_lengths[idx + 1]], axis=1)\n', (61036, 61097), True, 'import numpy as np\n'), ((63524, 63598), 'numpy.gradient', 'np.gradient', (['asa._data[[0], cum_lengths[idx]:cum_lengths[idx + 1]]'], {'axis': '(1)'}), '(asa._data[[0], cum_lengths[idx]:cum_lengths[idx + 1]], axis=1)\n', (63535, 63598), True, 'import numpy as np\n'), ((65517, 65526), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (65523, 65526), True, 'import numpy as np\n'), ((71206, 71232), 'numpy.cumsum', 'np.cumsum', (['(durations + gap)'], {}), '(durations + gap)\n', (71215, 71232), True, 'import numpy as np\n'), ((71327, 71353), 'numpy.vstack', 'np.vstack', (['(starts, stops)'], {}), '((starts, stops))\n', (71336, 71353), True, 'import numpy as np\n'), ((11807, 11829), 'numpy.cumsum', 'np.cumsum', (['(lengths + 1)'], {}), '(lengths + 1)\n', (11816, 11829), True, 'import numpy as np\n'), ((27478, 27512), 'numpy.argwhere', 'np.argwhere', (['(n_active > min_active)'], {}), '(n_active > min_active)\n', (27489, 27512), True, 'import numpy as np\n'), ((31911, 31937), 'numpy.vstack', 'np.vstack', (['(starts, stops)'], {}), '((starts, stops))\n', (31920, 31937), True, 'import numpy as np\n'), ((31984, 32014), 'numpy.vstack', 'np.vstack', (['(starts, stops + 1)'], {}), '((starts, stops + 1))\n', (31993, 32014), True, 'import numpy as np\n'), ((50221, 50249), 'scipy.signal.hilbert', 'hilbert', (['paddeddata'], {'axis': '(-1)'}), '(paddeddata, axis=-1)\n', (50228, 50249), False, 'from scipy.signal import hilbert\n'), ((72593, 72616), 'numpy.asanyarray', 'np.asanyarray', (['unittime'], {}), '(unittime)\n', (72606, 72616), True, 'import numpy as np\n'), ((50098, 50127), 'numpy.zeros', 'np.zeros', (['(n_signals, padlen)'], {}), '((n_signals, padlen))\n', (50106, 50127), True, 'import numpy as np\n'), ((63897, 63969), 'numpy.gradient', 'np.gradient', (['asa._data[:, cum_lengths[idx]:cum_lengths[idx + 1]]'], {'axis': '(1)'}), '(asa._data[:, cum_lengths[idx]:cum_lengths[idx + 1]], axis=1)\n', (63908, 63969), True, 'import numpy as np\n'), ((5482, 5497), 'numpy.log2', 'np.log2', (['(Ri / R)'], {}), '(Ri / R)\n', (5489, 5497), True, 'import numpy as np\n'), ((5733, 5748), 'numpy.log2', 'np.log2', (['(Ri / R)'], {}), '(Ri / R)\n', (5740, 5748), True, 'import numpy as np\n'), ((70248, 70265), 'numpy.max', 'np.max', (['neuron_tc'], {}), '(neuron_tc)\n', (70254, 70265), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import numpy as np
import sys
import os
import paddle
from paddle.fluid import dygraph, core, framework
from paddle.fluid.executor import Executor
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.nn import Linear, Conv2D, Conv2DTranspose, MaxPool2D, MaxPool1D, BatchNorm1D, BatchNorm2D, BatchNorm3D
from paddle.fluid.dygraph.nn import BatchNorm, Pool2D
from paddle.fluid.io import load_inference_model, save_inference_model
from paddle.nn.layer.activation import ReLU, LeakyReLU, Sigmoid, ReLU6, Tanh, Softmax, PReLU, Swish
from paddle.fluid.log_helper import get_logger
from . import quant_nn
from .. import quantization_pass
__all__ = ['ImperativeQuantAware', 'ImperativeCalcOutScale']
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
_op_real_in_out_name = {
"conv2d": [["Input", "Filter"], ["Output"]],
"conv2d_transpose": [["Input", "Filter"], ["Output"]],
"pool2d": [["X"], ["Out"]],
"elementwise_add": [["X", "Y"], ["Out"]],
"softmax": [["X"], ["Out"]],
"relu": [["X"], ["Out"]],
"relu6": [["X"], ["Out"]],
"leaky_relu": [["X"], ["Out"]],
"prelu": [["X"], ["Out"]],
"tanh": [["X"], ["Out"]],
"batch_norm": [["X"], ["Y"]],
"sigmoid": [["X"], ["Out"]],
"swish": [["X"], ["Out"]],
}
class ImperativeQuantAware(object):
"""
Add the fake quant logic for given quantizable layers, namely add the quant_dequant
computational logic both for activation inputs and weight inputs.
"""
def __init__(self,
weight_bits=8,
activation_bits=8,
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max',
moving_rate=0.9,
quantizable_layer_type=['Conv2D', 'Linear'],
weight_preprocess_layer=None,
act_preprocess_layer=None,
weight_quantize_layer=None,
act_quantize_layer=None):
r"""
The constructor for ImperativeQuantAware.
Args:
weight_bits(int): quantization bit number for weights,
whereas the bias is not quantized.
activation_bits(int): quantization bit number for activations.
weight_quantize_type(str): quantization type for weights,
which supports 'abs_max' now. The 'moving_average_abs_max'
usually is not used for weights, since weights are fixed once the
model is well trained.
activation_quantize_type(str): quantization type for activations,
which supports 'abs_max' and 'moving_average_abs_max' now.
If using 'abs_max' mode, the quantization scale will be calculated
dynamically each step in both training and testing period. If using
'moving_average_abs_max', the static quantization scale will be calculated
during training and used in inference.
moving_rate(float): the parameter for 'moving_average_abs_max' quantization.
quantizable_layer_type(list[str]): List the type of layers that will be quantized.
Default is ['Conv2D', 'Linear']. The quantizable_op_type in
QuantizationFreezePass and ConvertToInt8Pass must be the same as this.
weight_preprocess_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to preprocess
weight before quantization. Using this can quickly test if user's
preprocess method works or not. The input is non-quantized
weight and function returns processed weight to be quantized.
If None, the weight will be quantized directly. Default is None.
act_preprocess_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to preprocess
activation before quantization. Using this can quickly test if user's
preprocess method works or not. The input is non-quantized
activation and function returns processed activation to be quantized.
If None, the activation will be quantized directly. Default is None.
weight_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to quantize weight.
Using this can quickly test if user's quantization method works or not.
In this layer, user should both define quantization method and
dequantization method, that is, the function's input is non-quantized
weight and returns dequantized weight. If None, will use
quantization op defined by 'weight_quantize_type'. Default is None.
act_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to quantize activation.
Using this can quickly test if user's quantization method works or not.
In this layer, user should both define quantization method and
dequantization method, that is, the function's input is non-quantized
activation and returns dequantized activation. If None, will use
quantization op defined by 'activation_quantize_type'. Default is None.
Note:
If user sets attribute 'skip_quant' to a Layer that support dynamic quantization and sets
it to true, the layer would not be quantized during training. If this attribute is not sets
or the attribute is false, the Layer would be qunatized in training.
Examples 1:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization \
import ImperativeQuantAware
from paddle.vision.models \
import resnet
model = resnet.resnet50(pretrained=True)
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
# The outscale of outputs in supportted layers would be calculated.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./resnet50_qat",
input_spec=[
paddle.static.InputSpec(
shape=[None, 3, 224, 224], dtype='float32')])
Examples 2:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization \
import ImperativeQuantAware
class ImperativeModel(paddle.nn.Layer):
def __init__(self):
super(ImperativeModel, self).__init__()
# self.linear_0 would skip the quantization.
self.linear_0 = paddle.nn.Linear(784, 400)
self.linear_0.skip_quant = True
# self.linear_1 would not skip the quantization.
self.linear_1 = paddle.nn.Linear(400, 10)
self.linear_1.skip_quant = False
def forward(self, inputs):
x = self.linear_0(inputs)
x = self.linear_1(inputs)
return x
model = ImperativeModel()
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
#
# There is only one Layer(self.linear1) would be added the
# fake quant logical.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./imperative_model_qat")
"""
super(ImperativeQuantAware, self).__init__()
self._weight_bits = weight_bits
self._activation_bits = activation_bits
self._moving_rate = moving_rate
self._activation_quantize_type = activation_quantize_type
self._weight_quantize_type = weight_quantize_type
self._weight_pre_layer = weight_preprocess_layer
self._act_pre_layer = act_preprocess_layer
self._weight_quant_layer = weight_quantize_layer
self._act_quant_layer = act_quantize_layer
self._out_scale = ImperativeCalcOutScale()
t_check = lambda method: method is None or issubclass(method, dygraph.layers.Layer)
assert t_check(
self._weight_pre_layer), "weight_preprocess should be nn.Layer"
assert t_check(self._act_pre_layer), "act_preprocess should be nn.Layer"
assert t_check(
self._weight_quant_layer), "weight_quantize should be nn.Layer"
assert t_check(self._act_quant_layer), "act_quantize should be nn.Layer"
quant_type = {
'abs_max', 'moving_average_abs_max', 'channel_wise_abs_max'
}
assert activation_quantize_type != 'channel_wise_abs_max', \
"The activation quantization type does not support 'channel_wise_abs_max'."
if activation_quantize_type not in quant_type:
raise ValueError(
"Unknown activation_quantize_type : '%s'. It can only be "
"'abs_max' or 'moving_average_abs_max' now." %
(str(activation_quantize_type)))
if weight_quantize_type not in quant_type:
raise ValueError(
"Unknown weight_quantize_type: '%s'. It can only be "
"'abs_max' or 'moving_average_abs_max' or 'channel_wise_abs_max' now."
% (str(weight_quantize_type)))
self._quant_layers_map = {
'Conv2D': Conv2D,
'Linear': Linear,
'Pool2D': Pool2D,
'ReLU': ReLU,
'LeakyReLU': LeakyReLU,
'ReLU6': ReLU6,
'Softmax': Softmax,
'Tanh': Tanh,
'Swish': Swish
}
self._quantizable_layer_type = tuple(
self._quant_layers_map[layer]
if layer in self._quant_layers_map else layer
for layer in quantizable_layer_type)
for layer in self._quantizable_layer_type:
assert not isinstance(
layer, str), "{} is unspported to be quantized.".format(layer)
def quantize(self, model):
"""
According to weights' and activations' quantization types, the model will be added some fake
quant ops, such as fake_quantize_dequantize_moving_average_abs_max, fake_quantize_dequantize_abs_max
and so on. At the same time, the out_scale value of outputs would be calculated.
Args:
model(fluid.dygraph.Layer): the model to be quantized.
Returns:
None
"""
for name, layer in model.named_sublayers():
if not isinstance(layer, self._quantizable_layer_type):
continue
if hasattr(layer, "skip_quant") and layer.skip_quant == True:
continue
scopes = name.split('.')
target = scopes[-1]
obj = model
parent = model
for i in range(len(scopes) - 1):
obj = getattr(parent, scopes[i])
parent = obj
quant_layer = self._get_quantized_counterpart(layer)
setattr(quant_layer, "layer_name", layer.full_name())
setattr(obj, target, quant_layer)
self._out_scale.calc_out_scale(model)
def _get_quantized_counterpart(self, layer):
quant_layers = tuple(self._quant_layers_map.values())
quantized_counterpart = tuple('Quantized' + k
for k in self._quant_layers_map.keys())
predicate = lambda value: isinstance(layer, value)
index_generator = (i for i, v in enumerate(quant_layers)
if predicate(v))
try:
index = next(index_generator)
except StopIteration:
_logger.fatal("The layer {} is unsupported to be quantized.".format(
layer.full_name()))
sys.exit(-1)
layer_with_weight = ['QuantizedConv2D', 'QuantizedLinear']
if quantized_counterpart[index] not in layer_with_weight:
quant_layer_class_name = 'QuantizedNoweightLayer'
else:
quant_layer_class_name = quantized_counterpart[index]
quantized_layer = quant_nn.__dict__[quant_layer_class_name](
layer, self._weight_bits, self._activation_bits, self._moving_rate,
self._weight_quantize_type, self._activation_quantize_type,
self._weight_pre_layer, self._act_pre_layer,
self._weight_quant_layer, self._act_quant_layer)
return quantized_layer
def save_quantized_model(self, layer, path, input_spec=None, **config):
self._out_scale.save_quantized_model(layer, path, input_spec, **config)
class ImperativeCalcOutScale(object):
def __init__(self, moving_rate=0.9):
"""
Add the logic of calculating and setting output quantization scales of some layers.
These output quantization scales may be used by tensorRT or some other inference engines.
Args:
moving_rate(float): The decay coefficient of moving average. The default value is 0.9.
"""
super(ImperativeCalcOutScale, self).__init__()
self._moving_rate = moving_rate
self._out_scale_layer_type_list = (
BatchNorm, BatchNorm1D, BatchNorm2D, BatchNorm3D, Conv2D,
Conv2DTranspose, LeakyReLU, Linear, PReLU, Pool2D, MaxPool1D,
MaxPool2D, ReLU, ReLU6, Sigmoid, Softmax, Tanh, Swish)
self._register_hook_handle_list = []
self._out_scale_dict = collections.OrderedDict()
def calc_out_scale(self, model):
"""
Insert the `moving_average_abs_max_scale` op to calculate output scale of Specific layers in model.
Args:
model(fluid.dygraph.Layer): The target model which would be calculate the output quantization scale.
Returns:
None
"""
assert isinstance(
model, dygraph.Layer), "model must be the instance of dygraph.Layer"
for _, layer in model.named_sublayers():
if not isinstance(layer, self._out_scale_layer_type_list):
if 'quantized_' not in layer.full_name():
continue
forward_post_hook_handle = layer.register_forward_post_hook(
self._forward_post_hook)
self._register_hook_handle_list.append(forward_post_hook_handle)
def save_quantized_model(self, layer, path, input_spec=None, **config):
"""
Save the quantized model for the inference.
Args:
layer (Layer): The Layer to be saved.
path (str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
input_spec (list[InputSpec|Tensor], optional): Describes the input of the saved model's forward
method, which can be described by InputSpec or example Tensor. If None, all input variables of
the original Layer's forward method would be the inputs of the saved model. Default None.
**configs (dict, optional): Other save configuration options for compatibility. We do not
recommend using these configurations, they may be removed in the future. If not necessary,
DO NOT use them. Default None.
The following options are currently supported:
(1) output_spec (list[Tensor]): Selects the output targets of the saved model.
By default, all return variables of original Layer's forward method are kept as the
output of the saved model. If the provided ``output_spec`` list is not all output variables,
the saved model will be pruned according to the given ``output_spec`` list.
Returns:
None
"""
assert isinstance(
layer, dygraph.Layer), "model must be the instance of dygraph.Layer"
is_dynamic_mode = False
with dygraph.guard():
layer.eval()
for handle in self._register_hook_handle_list:
handle.remove()
for key in self._out_scale_dict:
self._out_scale_dict[key] = float(self._out_scale_dict[key]
.numpy())
if paddle.in_dynamic_mode():
is_dynamic_mode = True
paddle.enable_static()
paddle.jit.save(layer=layer, path=path, input_spec=input_spec, **config)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
file_prefix = os.path.basename(path)
dirname = os.path.dirname(path)
model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX
[inference_program, feed_target_names, fetch_targets] = (
load_inference_model(
dirname=dirname,
executor=exe,
model_filename=model_filename,
params_filename=params_filename))
# Traverse all ops in the program and find out the op matching
# the Layer in the dynamic graph.
layer_var_dict = {}
ops_list = [key for key, _ in self._out_scale_dict.items()]
op_count = 0
for block in inference_program.blocks:
for op in block.ops:
if op.type in _op_real_in_out_name:
if op.type in ["batch_norm", "pool2d"]:
if op.type == "pool2d" and op.attr(
"pooling_type") != "max":
continue
op_count = self.op_match(op, ops_list, op_count)
if op_count >= len(ops_list):
continue
op._set_attr('out_threshold',
self._out_scale_dict[ops_list[op_count]])
op_count += 1
else:
output_var_names = quantization_pass._get_op_output_var_names(
op)
for output_var_name in output_var_names:
output_var_tensor = block.var(output_var_name)
if output_var_tensor.dtype not in [
core.VarDesc.VarType.FP64,
core.VarDesc.VarType.FP32
]:
continue
# Because the Layer in dygraph may correspond to multiple ops
# in static program after being saved. To ensure correctness,
# the outscale collected for output of dygraph Layer can only
# be set to the last op in the corresponding ops in static program.
#
# We can judge the execution order of the ops which corresponding
# to dygraph Layer by the name of output. And use dict to save
# the corresponding relationship between the dygraph Layer and the
# static graph op that needs to set the outscale attribute.
if '.' not in output_var_name:
continue
dynamic_layer_name, var_name_suffix = output_var_name.split(
".")
if dynamic_layer_name in layer_var_dict:
if layer_var_dict[dynamic_layer_name][
0] < var_name_suffix:
layer_var_dict[dynamic_layer_name] = [
var_name_suffix, op
]
else:
layer_var_dict[dynamic_layer_name] = [
var_name_suffix, op
]
# Because the naming styles of static and dynamic graph are different,
# in order to avoid mistakes, we unify the name here.
for (layer_name, var_name_op_list) in layer_var_dict.items():
if 'prelu' in layer_name:
layer_name = layer_name.replace('prelu', 'p_re_lu')
if 'relu' in layer_name:
layer_name = layer_name.replace('relu', 're_lu')
if layer_name not in self._out_scale_dict:
continue
var_name_op_list[1]._set_attr('out_threshold',
self._out_scale_dict[layer_name])
# Save the processed program.
save_inference_model(
dirname=dirname,
feeded_var_names=feed_target_names,
target_vars=fetch_targets,
executor=exe,
main_program=inference_program.clone(),
model_filename=model_filename,
params_filename=params_filename)
if is_dynamic_mode:
paddle.disable_static()
def op_match(self, op, ops_list, op_count):
while op_count < len(ops_list) and op.type not in ops_list[op_count]:
op_count += 1
while op_count < len(ops_list) and op.type is "pool2d" and op.attr(
"pooling_type") != "max":
op_count += 1
return op_count
def _forward_post_hook(self, layer, input, output):
assert isinstance(
output, (core.VarBase, framework.Variable)
), "Multiple outputs are not currently supported in ImperativeOutScale."
if output.dtype not in [
core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP64
]:
return
if not hasattr(layer, "_out_scale"):
layer._out_scale = quant_nn.MovingAverageAbsMaxScale(
output.name, self._moving_rate, output.dtype)
scale_out = layer._out_scale(output)
if hasattr(layer, 'layer_name'):
layer_name = layer.layer_name
else:
layer_name = layer.full_name()
self._out_scale_dict[layer_name] = scale_out
| [
"paddle.fluid.executor.Executor",
"collections.OrderedDict",
"paddle.fluid.io.load_inference_model",
"paddle.fluid.dygraph.guard",
"paddle.in_dynamic_mode",
"paddle.jit.save",
"paddle.enable_static",
"os.path.dirname",
"paddle.disable_static",
"paddle.fluid.log_helper.get_logger",
"os.path.basename",
"sys.exit",
"paddle.fluid.core.is_compiled_with_cuda",
"paddle.fluid.core.CUDAPlace",
"paddle.fluid.core.CPUPlace"
]
| [((1386, 1471), 'paddle.fluid.log_helper.get_logger', 'get_logger', (['__name__', 'logging.INFO'], {'fmt': '"""%(asctime)s-%(levelname)s: %(message)s"""'}), "(__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s'\n )\n", (1396, 1471), False, 'from paddle.fluid.log_helper import get_logger\n'), ((14910, 14935), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (14933, 14935), False, 'import collections\n'), ((17668, 17692), 'paddle.in_dynamic_mode', 'paddle.in_dynamic_mode', ([], {}), '()\n', (17690, 17692), False, 'import paddle\n'), ((17773, 17845), 'paddle.jit.save', 'paddle.jit.save', ([], {'layer': 'layer', 'path': 'path', 'input_spec': 'input_spec'}), '(layer=layer, path=path, input_spec=input_spec, **config)\n', (17788, 17845), False, 'import paddle\n'), ((17858, 17886), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (17884, 17886), False, 'from paddle.fluid import dygraph, core, framework\n'), ((17990, 18005), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (17998, 18005), False, 'from paddle.fluid.executor import Executor\n'), ((18029, 18051), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (18045, 18051), False, 'import os\n'), ((18070, 18091), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (18085, 18091), False, 'import os\n'), ((18289, 18409), 'paddle.fluid.io.load_inference_model', 'load_inference_model', ([], {'dirname': 'dirname', 'executor': 'exe', 'model_filename': 'model_filename', 'params_filename': 'params_filename'}), '(dirname=dirname, executor=exe, model_filename=\n model_filename, params_filename=params_filename)\n', (18309, 18409), False, 'from paddle.fluid.io import load_inference_model, save_inference_model\n'), ((17342, 17357), 'paddle.fluid.dygraph.guard', 'dygraph.guard', ([], {}), '()\n', (17355, 17357), False, 'from paddle.fluid import dygraph, core, framework\n'), ((17741, 17763), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (17761, 17763), False, 'import paddle\n'), ((17908, 17925), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (17922, 17925), False, 'from paddle.fluid import dygraph, core, framework\n'), ((17960, 17975), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (17973, 17975), False, 'from paddle.fluid import dygraph, core, framework\n'), ((22517, 22540), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (22538, 22540), False, 'import paddle\n'), ((13259, 13271), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (13267, 13271), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class TweetsItem(Item):
# define the fields for your item here like:
Author = Field()
Title = Field()
Create_time = Field()
Id = Field()
Context = Field()
Source = Field()
Url = Field()
class TopicItem(Item):
Url = Field()
Title = Field()
Category = Field()
context = Field()
Id = Field()
Hotlevel = Field()
Time = Field()
def main():
item = TopicItem()
pass
if __name__ == '__main__':
main() | [
"scrapy.Field"
]
| [((271, 278), 'scrapy.Field', 'Field', ([], {}), '()\n', (276, 278), False, 'from scrapy import Item, Field\n'), ((291, 298), 'scrapy.Field', 'Field', ([], {}), '()\n', (296, 298), False, 'from scrapy import Item, Field\n'), ((317, 324), 'scrapy.Field', 'Field', ([], {}), '()\n', (322, 324), False, 'from scrapy import Item, Field\n'), ((334, 341), 'scrapy.Field', 'Field', ([], {}), '()\n', (339, 341), False, 'from scrapy import Item, Field\n'), ((356, 363), 'scrapy.Field', 'Field', ([], {}), '()\n', (361, 363), False, 'from scrapy import Item, Field\n'), ((377, 384), 'scrapy.Field', 'Field', ([], {}), '()\n', (382, 384), False, 'from scrapy import Item, Field\n'), ((395, 402), 'scrapy.Field', 'Field', ([], {}), '()\n', (400, 402), False, 'from scrapy import Item, Field\n'), ((438, 445), 'scrapy.Field', 'Field', ([], {}), '()\n', (443, 445), False, 'from scrapy import Item, Field\n'), ((458, 465), 'scrapy.Field', 'Field', ([], {}), '()\n', (463, 465), False, 'from scrapy import Item, Field\n'), ((481, 488), 'scrapy.Field', 'Field', ([], {}), '()\n', (486, 488), False, 'from scrapy import Item, Field\n'), ((503, 510), 'scrapy.Field', 'Field', ([], {}), '()\n', (508, 510), False, 'from scrapy import Item, Field\n'), ((520, 527), 'scrapy.Field', 'Field', ([], {}), '()\n', (525, 527), False, 'from scrapy import Item, Field\n'), ((543, 550), 'scrapy.Field', 'Field', ([], {}), '()\n', (548, 550), False, 'from scrapy import Item, Field\n'), ((562, 569), 'scrapy.Field', 'Field', ([], {}), '()\n', (567, 569), False, 'from scrapy import Item, Field\n')] |
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Examples of design matrices specification and and computation (event-related
design, FIR design, etc)
Requires matplotlib
Author : <NAME>: 2009-2010
"""
print(__doc__)
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nipy.modalities.fmri.design_matrix import make_dmtx
from nipy.modalities.fmri.experimental_paradigm import (EventRelatedParadigm,
BlockParadigm)
# frame times
tr = 1.0
nscans = 128
frametimes = np.linspace(0, (nscans - 1) * tr, nscans)
# experimental paradigm
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c3', 'c3', 'c3']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
hrf_model = 'canonical'
motion = np.cumsum(np.random.randn(128, 6), 0)
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
#event-related design matrix
paradigm = EventRelatedParadigm(conditions, onsets)
X1 = make_dmtx(
frametimes, paradigm, drift_model='polynomial', drift_order=3,
add_regs=motion, add_reg_names=add_reg_names)
# block design matrix
duration = 7 * np.ones(9)
paradigm = BlockParadigm(con_id=conditions, onset=onsets,
duration=duration)
X2 = make_dmtx(frametimes, paradigm, drift_model='polynomial',
drift_order=3)
# FIR model
paradigm = EventRelatedParadigm(conditions, onsets)
hrf_model = 'FIR'
X3 = make_dmtx(frametimes, paradigm, hrf_model='fir',
drift_model='polynomial', drift_order=3,
fir_delays=np.arange(1, 6))
# plot the results
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot(1, 3, 1)
X1.show(ax=ax)
ax.set_title('Event-related design matrix', fontsize=12)
ax = plt.subplot(1, 3, 2)
X2.show(ax=ax)
ax.set_title('Block design matrix', fontsize=12)
ax = plt.subplot(1, 3, 3)
X3.show(ax=ax)
ax.set_title('FIR design matrix', fontsize=12)
plt.subplots_adjust(top=0.9, bottom=0.25)
plt.show()
| [
"numpy.ones",
"numpy.arange",
"nipy.modalities.fmri.experimental_paradigm.BlockParadigm",
"numpy.linspace",
"matplotlib.pyplot.figure",
"nipy.modalities.fmri.design_matrix.make_dmtx",
"numpy.random.randn",
"nipy.modalities.fmri.experimental_paradigm.EventRelatedParadigm",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
]
| [((791, 832), 'numpy.linspace', 'np.linspace', (['(0)', '((nscans - 1) * tr)', 'nscans'], {}), '(0, (nscans - 1) * tr, nscans)\n', (802, 832), True, 'import numpy as np\n'), ((1138, 1178), 'nipy.modalities.fmri.experimental_paradigm.EventRelatedParadigm', 'EventRelatedParadigm', (['conditions', 'onsets'], {}), '(conditions, onsets)\n', (1158, 1178), False, 'from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm, BlockParadigm\n'), ((1185, 1307), 'nipy.modalities.fmri.design_matrix.make_dmtx', 'make_dmtx', (['frametimes', 'paradigm'], {'drift_model': '"""polynomial"""', 'drift_order': '(3)', 'add_regs': 'motion', 'add_reg_names': 'add_reg_names'}), "(frametimes, paradigm, drift_model='polynomial', drift_order=3,\n add_regs=motion, add_reg_names=add_reg_names)\n", (1194, 1307), False, 'from nipy.modalities.fmri.design_matrix import make_dmtx\n'), ((1373, 1438), 'nipy.modalities.fmri.experimental_paradigm.BlockParadigm', 'BlockParadigm', ([], {'con_id': 'conditions', 'onset': 'onsets', 'duration': 'duration'}), '(con_id=conditions, onset=onsets, duration=duration)\n', (1386, 1438), False, 'from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm, BlockParadigm\n'), ((1474, 1546), 'nipy.modalities.fmri.design_matrix.make_dmtx', 'make_dmtx', (['frametimes', 'paradigm'], {'drift_model': '"""polynomial"""', 'drift_order': '(3)'}), "(frametimes, paradigm, drift_model='polynomial', drift_order=3)\n", (1483, 1546), False, 'from nipy.modalities.fmri.design_matrix import make_dmtx\n'), ((1586, 1626), 'nipy.modalities.fmri.experimental_paradigm.EventRelatedParadigm', 'EventRelatedParadigm', (['conditions', 'onsets'], {}), '(conditions, onsets)\n', (1606, 1626), False, 'from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm, BlockParadigm\n'), ((1824, 1851), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1834, 1851), True, 'import matplotlib.pyplot as plt\n'), ((1857, 1877), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (1868, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1955, 1975), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1966, 1975), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2065), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (2056, 2065), True, 'import matplotlib.pyplot as plt\n'), ((2128, 2169), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)', 'bottom': '(0.25)'}), '(top=0.9, bottom=0.25)\n', (2147, 2169), True, 'import matplotlib.pyplot as plt\n'), ((2170, 2180), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2178, 2180), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1039), 'numpy.random.randn', 'np.random.randn', (['(128)', '(6)'], {}), '(128, 6)\n', (1031, 1039), True, 'import numpy as np\n'), ((1351, 1361), 'numpy.ones', 'np.ones', (['(9)'], {}), '(9)\n', (1358, 1361), True, 'import numpy as np\n'), ((1781, 1796), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (1790, 1796), True, 'import numpy as np\n')] |
from app import app
from app.database.db import Database
if __name__ == "__main__":
db = Database()
db.create_tables()
db.create_admin()
app.run(debug=True) | [
"app.app.run",
"app.database.db.Database"
]
| [((95, 105), 'app.database.db.Database', 'Database', ([], {}), '()\n', (103, 105), False, 'from app.database.db import Database\n'), ((155, 174), 'app.app.run', 'app.run', ([], {'debug': '(True)'}), '(debug=True)\n', (162, 174), False, 'from app import app\n')] |
from pathlib import Path
from bsmu.bone_age.models import constants
IMAGE_DIR = Path('C:/MyDiskBackup/Projects/BoneAge/Data/SmallImages500_NoPads')
TRAIN_DATA_CSV_PATH = constants.TRAIN_DATA_CSV_PATH
VALID_DATA_CSV_PATH = constants.VALID_DATA_CSV_PATH
TEST_DATA_CSV_PATH = constants.TEST_DATA_CSV_PATH
BATCH_SIZE = 7
MODEL_NAME_PREFIX = 'DenseNet169'
MODEL_NAME_POSTFIX = 'AllImages3_MoreAugments'
| [
"pathlib.Path"
]
| [((86, 153), 'pathlib.Path', 'Path', (['"""C:/MyDiskBackup/Projects/BoneAge/Data/SmallImages500_NoPads"""'], {}), "('C:/MyDiskBackup/Projects/BoneAge/Data/SmallImages500_NoPads')\n", (90, 153), False, 'from pathlib import Path\n')] |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from uuid import uuid4
from sqlalchemy.dialects.postgresql import ARRAY, UUID
from sqlalchemy.ext.declarative import declared_attr
from werkzeug.urls import url_parse
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.modules.oauth import logger
from indico.util.i18n import _
from indico.util.struct.enum import IndicoEnum
SCOPES = {'read:user': _("User information (read only)"),
'read:legacy_api': _('Legacy API (read only)'),
'write:legacy_api': _('Legacy API (write only)'),
'registrants': _('Event registrants')}
class SystemAppType(int, IndicoEnum):
none = 0
checkin = 1
flower = 2
__enforced_data__ = {
checkin: {'default_scopes': {'registrants'},
'redirect_uris': ['http://localhost'],
'is_enabled': True},
flower: {'default_scopes': {'read:user'},
'is_enabled': True}
}
__default_data__ = {
checkin: {'is_trusted': True,
'name': 'Checkin App',
'description': 'The checkin app for mobile devices allows scanning ticket QR codes and '
'checking-in event participants.'},
flower: {'is_trusted': True,
'name': 'Flower',
'description': 'Flower allows monitoring Celery tasks. If flower is installed, this app is used to '
'restrict access to Indico administrators.'}
}
@property
def enforced_data(self):
return self.__enforced_data__.get(self, {})
@property
def default_data(self):
return dict(self.__default_data__.get(self, {}), **self.enforced_data)
class OAuthApplication(db.Model):
"""OAuth applications registered in Indico."""
__tablename__ = 'applications'
@declared_attr
def __table_args__(cls):
return (db.Index('ix_uq_applications_name_lower', db.func.lower(cls.name), unique=True),
db.Index(None, cls.system_app_type, unique=True,
postgresql_where=db.text(f'system_app_type != {SystemAppType.none.value}')),
{'schema': 'oauth'})
#: the unique id of the application
id = db.Column(
db.Integer,
primary_key=True
)
#: human readable name
name = db.Column(
db.String,
nullable=False
)
#: human readable description
description = db.Column(
db.Text,
nullable=False,
default=''
)
#: the OAuth client_id
client_id = db.Column(
UUID,
unique=True,
nullable=False,
default=lambda: str(uuid4())
)
#: the OAuth client_secret
client_secret = db.Column(
UUID,
nullable=False,
default=lambda: str(uuid4())
)
#: the OAuth default scopes the application may request access to
default_scopes = db.Column(
ARRAY(db.String),
nullable=False
)
#: the OAuth absolute URIs that a application may use to redirect to after authorization
redirect_uris = db.Column(
ARRAY(db.String),
nullable=False,
default=[]
)
#: whether the application is enabled or disabled
is_enabled = db.Column(
db.Boolean,
nullable=False,
default=True
)
#: whether the application can access user data without asking for permission
is_trusted = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: the type of system app (if any). system apps cannot be deleted
system_app_type = db.Column(
PyIntEnum(SystemAppType),
nullable=False,
default=SystemAppType.none
)
# relationship backrefs:
# - tokens (OAuthToken.application)
@property
def client_type(self):
return 'public'
@property
def default_redirect_uri(self):
return self.redirect_uris[0] if self.redirect_uris else None
@property
def locator(self):
return {'id': self.id}
def __repr__(self): # pragma: no cover
return f'<OAuthApplication({self.id}, {self.name}, {self.client_id})>'
def reset_client_secret(self):
self.client_secret = str(uuid4())
logger.info("Client secret for %s has been reset.", self)
def validate_redirect_uri(self, redirect_uri):
"""Called by flask-oauthlib to validate the redirect_uri.
Uses a logic similar to the one at GitHub, i.e. protocol and
host/port must match exactly and if there is a path in the
whitelisted URL, the path of the redirect_uri must start with
that path.
"""
uri_data = url_parse(redirect_uri)
for valid_uri_data in map(url_parse, self.redirect_uris):
if (uri_data.scheme == valid_uri_data.scheme and uri_data.netloc == valid_uri_data.netloc and
uri_data.path.startswith(valid_uri_data.path)):
return True
return False
| [
"werkzeug.urls.url_parse",
"indico.util.i18n._",
"indico.core.db.sqlalchemy.PyIntEnum",
"indico.core.db.db.func.lower",
"uuid.uuid4",
"indico.core.db.db.text",
"indico.modules.oauth.logger.info",
"sqlalchemy.dialects.postgresql.ARRAY",
"indico.core.db.db.Column"
]
| [((604, 637), 'indico.util.i18n._', '_', (['"""User information (read only)"""'], {}), "('User information (read only)')\n", (605, 637), False, 'from indico.util.i18n import _\n'), ((668, 695), 'indico.util.i18n._', '_', (['"""Legacy API (read only)"""'], {}), "('Legacy API (read only)')\n", (669, 695), False, 'from indico.util.i18n import _\n'), ((727, 755), 'indico.util.i18n._', '_', (['"""Legacy API (write only)"""'], {}), "('Legacy API (write only)')\n", (728, 755), False, 'from indico.util.i18n import _\n'), ((782, 804), 'indico.util.i18n._', '_', (['"""Event registrants"""'], {}), "('Event registrants')\n", (783, 804), False, 'from indico.util.i18n import _\n'), ((2454, 2493), 'indico.core.db.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (2463, 2493), False, 'from indico.core.db import db\n'), ((2554, 2590), 'indico.core.db.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (2563, 2590), False, 'from indico.core.db import db\n'), ((2665, 2711), 'indico.core.db.db.Column', 'db.Column', (['db.Text'], {'nullable': '(False)', 'default': '""""""'}), "(db.Text, nullable=False, default='')\n", (2674, 2711), False, 'from indico.core.db import db\n'), ((3468, 3519), 'indico.core.db.db.Column', 'db.Column', (['db.Boolean'], {'nullable': '(False)', 'default': '(True)'}), '(db.Boolean, nullable=False, default=True)\n', (3477, 3519), False, 'from indico.core.db import db\n'), ((3649, 3701), 'indico.core.db.db.Column', 'db.Column', (['db.Boolean'], {'nullable': '(False)', 'default': '(False)'}), '(db.Boolean, nullable=False, default=False)\n', (3658, 3701), False, 'from indico.core.db import db\n'), ((3151, 3167), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', (['db.String'], {}), '(db.String)\n', (3156, 3167), False, 'from sqlalchemy.dialects.postgresql import ARRAY, UUID\n'), ((3330, 3346), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', (['db.String'], {}), '(db.String)\n', (3335, 3346), False, 'from sqlalchemy.dialects.postgresql import ARRAY, UUID\n'), ((3843, 3867), 'indico.core.db.sqlalchemy.PyIntEnum', 'PyIntEnum', (['SystemAppType'], {}), '(SystemAppType)\n', (3852, 3867), False, 'from indico.core.db.sqlalchemy import PyIntEnum\n'), ((4469, 4526), 'indico.modules.oauth.logger.info', 'logger.info', (['"""Client secret for %s has been reset."""', 'self'], {}), "('Client secret for %s has been reset.', self)\n", (4480, 4526), False, 'from indico.modules.oauth import logger\n'), ((4902, 4925), 'werkzeug.urls.url_parse', 'url_parse', (['redirect_uri'], {}), '(redirect_uri)\n', (4911, 4925), False, 'from werkzeug.urls import url_parse\n'), ((4452, 4459), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (4457, 4459), False, 'from uuid import uuid4\n'), ((2161, 2184), 'indico.core.db.db.func.lower', 'db.func.lower', (['cls.name'], {}), '(cls.name)\n', (2174, 2184), False, 'from indico.core.db import db\n'), ((2307, 2364), 'indico.core.db.db.text', 'db.text', (['f"""system_app_type != {SystemAppType.none.value}"""'], {}), "(f'system_app_type != {SystemAppType.none.value}')\n", (2314, 2364), False, 'from indico.core.db import db\n'), ((2883, 2890), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2888, 2890), False, 'from uuid import uuid4\n'), ((3026, 3033), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3031, 3033), False, 'from uuid import uuid4\n')] |
import torch.nn as nn
from .basic import *
class squeeze_excitation_2d(nn.Module):
"""Squeeze-and-Excitation Block 2D
Args:
channel (int): number of input channels.
channel_reduction (int): channel squeezing factor.
spatial_reduction (int): pooling factor for x,y axes.
"""
def __init__(self, channel, channel_reduction=4, spatial_reduction=4, norm_mode='bn', act_mode='elu'):
super(squeeze_excitation_2d, self).__init__()
self.pool_size = (spatial_reduction, spatial_reduction)
layers = [nn.AvgPool2d(kernel_size=self.pool_size, stride=self.pool_size)]
layers += conv2d_norm_act(channel, channel // channel_reduction, kernel_size=1, padding=0, norm_mode=norm_mode, act_mode=act_mode, return_list=True)
layers += conv2d_norm_act(channel // channel_reduction, channel, kernel_size=1, padding=0, norm_mode=norm_mode, return_list=True)
layers = [nn.Sigmoid(),
nn.Upsample(scale_factor=self.pool_size, mode='trilinear', align_corners=False)]
self.se = nn.Sequential(*layers)
def forward(self, x):
y = self.se(x)
z = x + y*x
return z
class squeeze_excitation_3d(nn.Module):
"""Squeeze-and-Excitation Block 3D
Args:
channel (int): number of input channels.
channel_reduction (int): channel squeezing factor.
spatial_reduction (int): pooling factor for x,y axes.
z_reduction (int): pooling factor for z axis.
"""
def __init__(self, channel, channel_reduction=4, spatial_reduction=4, z_reduction=1, norm_mode='bn', act_mode='elu'):
super(squeeze_excitation_3d, self).__init__()
self.pool_size = (z_reduction, spatial_reduction, spatial_reduction)
layers = [nn.AvgPool3d(kernel_size=self.pool_size, stride=self.pool_size)]
layers += conv3d_norm_act(channel, channel//channel_reduction, kernel_size=1, padding=0, norm_mode=norm_mode, act_mode=act_mode, return_list=True)
layers += conv3d_norm_act(channel//channel_reduction, channel, kernel_size=1, padding=0, norm_mode=norm_mode, return_list=True)
layers += [nn.Sigmoid(),
nn.Upsample(scale_factor=self.pool_size, mode='trilinear', align_corners=False)]
self.se = nn.Sequential(*layers)
def forward(self, x):
y = self.se(x)
z = x + y*x
return z
| [
"torch.nn.Sigmoid",
"torch.nn.AvgPool3d",
"torch.nn.Sequential",
"torch.nn.Upsample",
"torch.nn.AvgPool2d"
]
| [((1066, 1088), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (1079, 1088), True, 'import torch.nn as nn\n'), ((2277, 2299), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (2290, 2299), True, 'import torch.nn as nn\n'), ((555, 618), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': 'self.pool_size', 'stride': 'self.pool_size'}), '(kernel_size=self.pool_size, stride=self.pool_size)\n', (567, 618), True, 'import torch.nn as nn\n'), ((933, 945), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (943, 945), True, 'import torch.nn as nn\n'), ((963, 1042), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.pool_size', 'mode': '"""trilinear"""', 'align_corners': '(False)'}), "(scale_factor=self.pool_size, mode='trilinear', align_corners=False)\n", (974, 1042), True, 'import torch.nn as nn\n'), ((1771, 1834), 'torch.nn.AvgPool3d', 'nn.AvgPool3d', ([], {'kernel_size': 'self.pool_size', 'stride': 'self.pool_size'}), '(kernel_size=self.pool_size, stride=self.pool_size)\n', (1783, 1834), True, 'import torch.nn as nn\n'), ((2148, 2160), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2158, 2160), True, 'import torch.nn as nn\n'), ((2178, 2257), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.pool_size', 'mode': '"""trilinear"""', 'align_corners': '(False)'}), "(scale_factor=self.pool_size, mode='trilinear', align_corners=False)\n", (2189, 2257), True, 'import torch.nn as nn\n')] |
import os
import option
import utility
import grapeMenu
import grapeGit as git
import grapeConfig
class Clone(option.Option):
""" grape-clone
Clones a git repo and configures it for use with git.
Usage: grape-clone <url> <path> [--recursive] [--allNested]
Arguments:
<url> The URL of the remote repository
<path> The directory where you want to clone the repo to.
Options:
--recursive Recursively clone submodules.
--allNested Get all nested subprojects.
"""
def __init__(self):
super(Clone, self).__init__()
self._key = "clone"
self._section = "Getting Started"
#Clones the default repo into a new local repo
def description(self):
return "Clone a repo and configure it for grape"
def execute(self, args):
remotepath = args["<url>"]
destpath = args["<path>"]
rstr = "--recursive" if args["--recursive"] else ""
utility.printMsg("Cloning %s into %s %s" % (remotepath, destpath, "recursively" if args["--recursive"] else ""))
git.clone(" %s %s %s" % (rstr, remotepath, destpath))
utility.printMsg("Clone succeeded!")
os.chdir(destpath)
grapeConfig.read()
# ensure you start on a reasonable publish branch
menu = grapeMenu.menu()
config = grapeConfig.grapeConfig()
publicBranches = config.getPublicBranchList()
if publicBranches:
if "develop" in publicBranches:
initialBranch = "develop"
elif "master" in publicBranches:
initialBranch = "master"
else:
initialBranch = publicBranches[0]
menu.applyMenuChoice("checkout", args=[initialBranch])
if args["--allNested"]:
configArgs = ["--uv","--uvArg=--allNestedSubprojects"]
else:
configArgs = []
return menu.applyMenuChoice("config", configArgs)
def setDefaultConfig(self, config):
pass
| [
"grapeGit.clone",
"grapeConfig.grapeConfig",
"grapeMenu.menu",
"utility.printMsg",
"os.chdir",
"grapeConfig.read"
]
| [((981, 1098), 'utility.printMsg', 'utility.printMsg', (["('Cloning %s into %s %s' % (remotepath, destpath, 'recursively' if args[\n '--recursive'] else ''))"], {}), "('Cloning %s into %s %s' % (remotepath, destpath, \n 'recursively' if args['--recursive'] else ''))\n", (997, 1098), False, 'import utility\n'), ((1102, 1155), 'grapeGit.clone', 'git.clone', (["(' %s %s %s' % (rstr, remotepath, destpath))"], {}), "(' %s %s %s' % (rstr, remotepath, destpath))\n", (1111, 1155), True, 'import grapeGit as git\n'), ((1164, 1200), 'utility.printMsg', 'utility.printMsg', (['"""Clone succeeded!"""'], {}), "('Clone succeeded!')\n", (1180, 1200), False, 'import utility\n'), ((1209, 1227), 'os.chdir', 'os.chdir', (['destpath'], {}), '(destpath)\n', (1217, 1227), False, 'import os\n'), ((1236, 1254), 'grapeConfig.read', 'grapeConfig.read', ([], {}), '()\n', (1252, 1254), False, 'import grapeConfig\n'), ((1328, 1344), 'grapeMenu.menu', 'grapeMenu.menu', ([], {}), '()\n', (1342, 1344), False, 'import grapeMenu\n'), ((1362, 1387), 'grapeConfig.grapeConfig', 'grapeConfig.grapeConfig', ([], {}), '()\n', (1385, 1387), False, 'import grapeConfig\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016, German Neuroinformatics Node (G-Node)
# <NAME> <<EMAIL>>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
"""
Tests for neo.io.nixio
"""
import os
from datetime import datetime
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
import string
import itertools
from six import string_types
import numpy as np
import quantities as pq
from neo.core import (Block, Segment, ChannelIndex, AnalogSignal,
IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch)
from neo.test.iotest.common_io_test import BaseTestIO
try:
import nixio
HAVE_NIX = True
except ImportError:
HAVE_NIX = False
from neo.io.nixio import NixIO
from neo.io.nixio import nixtypes
@unittest.skipUnless(HAVE_NIX, "Requires NIX")
class NixIOTest(unittest.TestCase):
filename = None
io = None
def compare_blocks(self, neoblocks, nixblocks):
for neoblock, nixblock in zip(neoblocks, nixblocks):
self.compare_attr(neoblock, nixblock)
self.assertEqual(len(neoblock.segments), len(nixblock.groups))
for idx, neoseg in enumerate(neoblock.segments):
nixgrp = nixblock.groups[neoseg.name]
self.compare_segment_group(neoseg, nixgrp)
for idx, neochx in enumerate(neoblock.channel_indexes):
if neochx.name:
nixsrc = nixblock.sources[neochx.name]
else:
nixsrc = nixblock.sources[idx]
self.compare_chx_source(neochx, nixsrc)
self.check_refs(neoblock, nixblock)
def compare_chx_source(self, neochx, nixsrc):
self.compare_attr(neochx, nixsrc)
nix_channels = list(src for src in nixsrc.sources
if src.type == "neo.channelindex")
self.assertEqual(len(neochx.index), len(nix_channels))
for nixchan in nix_channels:
nixchanidx = nixchan.metadata["index"]
try:
neochanpos = list(neochx.index).index(nixchanidx)
except ValueError:
self.fail("Channel indexes do not match.")
if len(neochx.channel_names):
neochanname = neochx.channel_names[neochanpos]
if ((not isinstance(neochanname, str)) and
isinstance(neochanname, bytes)):
neochanname = neochanname.decode()
nixchanname = nixchan.name
self.assertEqual(neochanname, nixchanname)
nix_units = list(src for src in nixsrc.sources
if src.type == "neo.unit")
self.assertEqual(len(neochx.units), len(nix_units))
for neounit in neochx.units:
nixunit = nixsrc.sources[neounit.name]
self.compare_attr(neounit, nixunit)
def check_refs(self, neoblock, nixblock):
"""
Checks whether the references between objects that are not nested are
mapped correctly (e.g., SpikeTrains referenced by a Unit).
:param neoblock: A Neo block
:param nixblock: The corresponding NIX block
"""
for idx, neochx in enumerate(neoblock.channel_indexes):
if neochx.name:
nixchx = nixblock.sources[neochx.name]
else:
nixchx = nixblock.sources[idx]
# AnalogSignals referencing CHX
neoasigs = list(sig.name for sig in neochx.analogsignals)
nixasigs = list(set(da.metadata.name for da in nixblock.data_arrays
if da.type == "neo.analogsignal" and
nixchx in da.sources))
self.assertEqual(len(neoasigs), len(nixasigs))
# IrregularlySampledSignals referencing CHX
neoisigs = list(sig.name for sig in neochx.irregularlysampledsignals)
nixisigs = list(set(da.metadata.name for da in nixblock.data_arrays
if da.type == "neo.irregularlysampledsignal" and
nixchx in da.sources))
self.assertEqual(len(neoisigs), len(nixisigs))
# SpikeTrains referencing CHX and Units
for sidx, neounit in enumerate(neochx.units):
if neounit.name:
nixunit = nixchx.sources[neounit.name]
else:
nixunit = nixchx.sources[sidx]
neosts = list(st.name for st in neounit.spiketrains)
nixsts = list(mt for mt in nixblock.multi_tags
if mt.type == "neo.spiketrain" and
nixunit.name in mt.sources)
# SpikeTrains must also reference CHX
for nixst in nixsts:
self.assertIn(nixchx.name, nixst.sources)
nixsts = list(st.name for st in nixsts)
self.assertEqual(len(neosts), len(nixsts))
for neoname in neosts:
if neoname:
self.assertIn(neoname, nixsts)
# Events and Epochs must reference all Signals in the Group (NIX only)
for nixgroup in nixblock.groups:
nixevep = list(mt for mt in nixgroup.multi_tags
if mt.type in ["neo.event", "neo.epoch"])
nixsigs = list(da.name for da in nixgroup.data_arrays
if da.type in ["neo.analogsignal",
"neo.irregularlysampledsignal"])
for nee in nixevep:
for ns in nixsigs:
self.assertIn(ns, nee.references)
def compare_segment_group(self, neoseg, nixgroup):
self.compare_attr(neoseg, nixgroup)
neo_signals = neoseg.analogsignals + neoseg.irregularlysampledsignals
self.compare_signals_das(neo_signals, nixgroup.data_arrays)
neo_eests = neoseg.epochs + neoseg.events + neoseg.spiketrains
self.compare_eests_mtags(neo_eests, nixgroup.multi_tags)
def compare_signals_das(self, neosignals, data_arrays):
for sig in neosignals:
if self.io._find_lazy_loaded(sig) is not None:
sig = self.io.load_lazy_object(sig)
dalist = list()
for idx in itertools.count():
nixname = "{}.{}".format(sig.name, idx)
if nixname in data_arrays:
dalist.append(data_arrays[nixname])
else:
break
_, nsig = np.shape(sig)
self.assertEqual(nsig, len(dalist))
self.compare_signal_dalist(sig, dalist)
def compare_signal_dalist(self, neosig, nixdalist):
"""
Check if a Neo Analog or IrregularlySampledSignal matches a list of
NIX DataArrays.
:param neosig: Neo Analog or IrregularlySampledSignal
:param nixdalist: List of DataArrays
"""
nixmd = nixdalist[0].metadata
self.assertTrue(all(nixmd == da.metadata for da in nixdalist))
neounit = str(neosig.dimensionality)
for sig, da in zip(np.transpose(neosig),
sorted(nixdalist, key=lambda d: d.name)):
self.compare_attr(neosig, da)
np.testing.assert_almost_equal(sig.magnitude, da)
self.assertEqual(neounit, da.unit)
timedim = da.dimensions[0]
if isinstance(neosig, AnalogSignal):
self.assertIsInstance(timedim, nixtypes["SampledDimension"])
self.assertEqual(
pq.Quantity(timedim.sampling_interval, timedim.unit),
neosig.sampling_period
)
self.assertEqual(timedim.offset, neosig.t_start.magnitude)
if "t_start.units" in da.metadata.props:
self.assertEqual(da.metadata["t_start.units"],
str(neosig.t_start.dimensionality))
elif isinstance(neosig, IrregularlySampledSignal):
self.assertIsInstance(timedim, nixtypes["RangeDimension"])
np.testing.assert_almost_equal(neosig.times.magnitude,
timedim.ticks)
self.assertEqual(timedim.unit,
str(neosig.times.dimensionality))
def compare_eests_mtags(self, eestlist, mtaglist):
self.assertEqual(len(eestlist), len(mtaglist))
for eest in eestlist:
if self.io._find_lazy_loaded(eest) is not None:
eest = self.io.load_lazy_object(eest)
mtag = mtaglist[eest.name]
if isinstance(eest, Epoch):
self.compare_epoch_mtag(eest, mtag)
elif isinstance(eest, Event):
self.compare_event_mtag(eest, mtag)
elif isinstance(eest, SpikeTrain):
self.compare_spiketrain_mtag(eest, mtag)
def compare_epoch_mtag(self, epoch, mtag):
self.assertEqual(mtag.type, "neo.epoch")
self.compare_attr(epoch, mtag)
np.testing.assert_almost_equal(epoch.times.magnitude, mtag.positions)
np.testing.assert_almost_equal(epoch.durations.magnitude, mtag.extents)
self.assertEqual(mtag.positions.unit,
str(epoch.times.units.dimensionality))
self.assertEqual(mtag.extents.unit,
str(epoch.durations.units.dimensionality))
for neol, nixl in zip(epoch.labels,
mtag.positions.dimensions[0].labels):
# Dirty. Should find the root cause instead
if isinstance(neol, bytes):
neol = neol.decode()
if isinstance(nixl, bytes):
nixl = nixl.decode()
self.assertEqual(neol, nixl)
def compare_event_mtag(self, event, mtag):
self.assertEqual(mtag.type, "neo.event")
self.compare_attr(event, mtag)
np.testing.assert_almost_equal(event.times.magnitude, mtag.positions)
self.assertEqual(mtag.positions.unit, str(event.units.dimensionality))
for neol, nixl in zip(event.labels,
mtag.positions.dimensions[0].labels):
# Dirty. Should find the root cause instead
# Only happens in 3.2
if isinstance(neol, bytes):
neol = neol.decode()
if isinstance(nixl, bytes):
nixl = nixl.decode()
self.assertEqual(neol, nixl)
def compare_spiketrain_mtag(self, spiketrain, mtag):
self.assertEqual(mtag.type, "neo.spiketrain")
self.compare_attr(spiketrain, mtag)
np.testing.assert_almost_equal(spiketrain.times.magnitude,
mtag.positions)
if len(mtag.features):
neowf = spiketrain.waveforms
nixwf = mtag.features[0].data
self.assertEqual(np.shape(neowf), np.shape(nixwf))
self.assertEqual(nixwf.unit, str(neowf.units.dimensionality))
np.testing.assert_almost_equal(neowf.magnitude, nixwf)
self.assertIsInstance(nixwf.dimensions[0], nixtypes["SetDimension"])
self.assertIsInstance(nixwf.dimensions[1], nixtypes["SetDimension"])
self.assertIsInstance(nixwf.dimensions[2],
nixtypes["SampledDimension"])
def compare_attr(self, neoobj, nixobj):
if neoobj.name:
if isinstance(neoobj, (AnalogSignal, IrregularlySampledSignal)):
nix_name = ".".join(nixobj.name.split(".")[:-1])
else:
nix_name = nixobj.name
self.assertEqual(neoobj.name, nix_name)
self.assertEqual(neoobj.description, nixobj.definition)
if hasattr(neoobj, "rec_datetime") and neoobj.rec_datetime:
self.assertEqual(neoobj.rec_datetime,
datetime.fromtimestamp(nixobj.created_at))
if hasattr(neoobj, "file_datetime") and neoobj.file_datetime:
self.assertEqual(neoobj.file_datetime,
datetime.fromtimestamp(
nixobj.metadata["file_datetime"]))
if neoobj.annotations:
nixmd = nixobj.metadata
for k, v, in neoobj.annotations.items():
if isinstance(v, pq.Quantity):
self.assertEqual(nixmd.props[str(k)].unit,
str(v.dimensionality))
np.testing.assert_almost_equal(nixmd[str(k)],
v.magnitude)
else:
self.assertEqual(nixmd[str(k)], v)
@classmethod
def create_full_nix_file(cls, filename):
nixfile = nixio.File.open(filename, nixio.FileMode.Overwrite)
nix_block_a = nixfile.create_block(cls.rword(10), "neo.block")
nix_block_a.definition = cls.rsentence(5, 10)
nix_block_b = nixfile.create_block(cls.rword(10), "neo.block")
nix_block_b.definition = cls.rsentence(3, 3)
nix_block_a.metadata = nixfile.create_section(
nix_block_a.name, nix_block_a.name+".metadata"
)
nix_block_b.metadata = nixfile.create_section(
nix_block_b.name, nix_block_b.name+".metadata"
)
nix_blocks = [nix_block_a, nix_block_b]
for blk in nix_blocks:
for ind in range(3):
group = blk.create_group(cls.rword(), "neo.segment")
group.definition = cls.rsentence(10, 15)
group_md = blk.metadata.create_section(group.name,
group.name+".metadata")
group.metadata = group_md
blk = nix_blocks[0]
group = blk.groups[0]
allspiketrains = list()
allsignalgroups = list()
# analogsignals
for n in range(3):
siggroup = list()
asig_name = "{}_asig{}".format(cls.rword(10), n)
asig_definition = cls.rsentence(5, 5)
asig_md = group.metadata.create_section(asig_name,
asig_name+".metadata")
for idx in range(3):
da_asig = blk.create_data_array(
"{}.{}".format(asig_name, idx),
"neo.analogsignal",
data=cls.rquant(100, 1)
)
da_asig.definition = asig_definition
da_asig.unit = "mV"
da_asig.metadata = asig_md
timedim = da_asig.append_sampled_dimension(0.01)
timedim.unit = "ms"
timedim.label = "time"
timedim.offset = 10
da_asig.append_set_dimension()
group.data_arrays.append(da_asig)
siggroup.append(da_asig)
allsignalgroups.append(siggroup)
# irregularlysampledsignals
for n in range(2):
siggroup = list()
isig_name = "{}_isig{}".format(cls.rword(10), n)
isig_definition = cls.rsentence(12, 12)
isig_md = group.metadata.create_section(isig_name,
isig_name+".metadata")
isig_times = cls.rquant(200, 1, True)
for idx in range(10):
da_isig = blk.create_data_array(
"{}.{}".format(isig_name, idx),
"neo.irregularlysampledsignal",
data=cls.rquant(200, 1)
)
da_isig.definition = isig_definition
da_isig.unit = "mV"
da_isig.metadata = isig_md
timedim = da_isig.append_range_dimension(isig_times)
timedim.unit = "s"
timedim.label = "time"
da_isig.append_set_dimension()
group.data_arrays.append(da_isig)
siggroup.append(da_isig)
allsignalgroups.append(siggroup)
# SpikeTrains with Waveforms
for n in range(4):
stname = "{}-st{}".format(cls.rword(20), n)
times = cls.rquant(400, 1, True)
times_da = blk.create_data_array(
"{}.times".format(stname),
"neo.spiketrain.times",
data=times
)
times_da.unit = "ms"
mtag_st = blk.create_multi_tag(stname,
"neo.spiketrain",
times_da)
group.multi_tags.append(mtag_st)
mtag_st.definition = cls.rsentence(20, 30)
mtag_st_md = group.metadata.create_section(
mtag_st.name, mtag_st.name+".metadata"
)
mtag_st.metadata = mtag_st_md
mtag_st_md.create_property(
"t_stop", nixio.Value(max(times_da).item()+1)
)
waveforms = cls.rquant((10, 8, 5), 1)
wfname = "{}.waveforms".format(mtag_st.name)
wfda = blk.create_data_array(wfname, "neo.waveforms",
data=waveforms)
wfda.unit = "mV"
mtag_st.create_feature(wfda, nixio.LinkType.Indexed)
wfda.append_set_dimension() # spike dimension
wfda.append_set_dimension() # channel dimension
wftimedim = wfda.append_sampled_dimension(0.1)
wftimedim.unit = "ms"
wftimedim.label = "time"
wfda.metadata = mtag_st_md.create_section(
wfname, "neo.waveforms.metadata"
)
wfda.metadata.create_property("left_sweep",
[nixio.Value(20)]*5)
allspiketrains.append(mtag_st)
# Epochs
for n in range(3):
epname = "{}-ep{}".format(cls.rword(5), n)
times = cls.rquant(5, 1, True)
times_da = blk.create_data_array(
"{}.times".format(epname),
"neo.epoch.times",
data=times
)
times_da.unit = "s"
extents = cls.rquant(5, 1)
extents_da = blk.create_data_array(
"{}.durations".format(epname),
"neo.epoch.durations",
data=extents
)
extents_da.unit = "s"
mtag_ep = blk.create_multi_tag(
epname, "neo.epoch", times_da
)
group.multi_tags.append(mtag_ep)
mtag_ep.definition = cls.rsentence(2)
mtag_ep.extents = extents_da
label_dim = mtag_ep.positions.append_set_dimension()
label_dim.labels = cls.rsentence(5).split(" ")
# reference all signals in the group
for siggroup in allsignalgroups:
mtag_ep.references.extend(siggroup)
# Events
for n in range(2):
evname = "{}-ev{}".format(cls.rword(5), n)
times = cls.rquant(5, 1, True)
times_da = blk.create_data_array(
"{}.times".format(evname),
"neo.event.times",
data=times
)
times_da.unit = "s"
mtag_ev = blk.create_multi_tag(
evname, "neo.event", times_da
)
group.multi_tags.append(mtag_ev)
mtag_ev.definition = cls.rsentence(2)
label_dim = mtag_ev.positions.append_set_dimension()
label_dim.labels = cls.rsentence(5).split(" ")
# reference all signals in the group
for siggroup in allsignalgroups:
mtag_ev.references.extend(siggroup)
# CHX
nixchx = blk.create_source(cls.rword(10),
"neo.channelindex")
nixchx.metadata = nix_blocks[0].metadata.create_section(
nixchx.name, "neo.channelindex.metadata"
)
chantype = "neo.channelindex"
# 3 channels
for idx in [2, 5, 9]:
channame = cls.rword(20)
nixrc = nixchx.create_source(channame, chantype)
nixrc.definition = cls.rsentence(13)
nixrc.metadata = nixchx.metadata.create_section(
nixrc.name, "neo.channelindex.metadata"
)
nixrc.metadata.create_property("index", nixio.Value(idx))
dims = tuple(map(nixio.Value, cls.rquant(3, 1)))
nixrc.metadata.create_property("coordinates", dims)
nixrc.metadata.create_property("coordinates.units",
nixio.Value("um"))
nunits = 1
stsperunit = np.array_split(allspiketrains, nunits)
for idx in range(nunits):
unitname = "{}-unit{}".format(cls.rword(5), idx)
nixunit = nixchx.create_source(unitname, "neo.unit")
nixunit.definition = cls.rsentence(4, 10)
for st in stsperunit[idx]:
st.sources.append(nixchx)
st.sources.append(nixunit)
# pick a few signal groups to reference this CHX
randsiggroups = np.random.choice(allsignalgroups, 5, False)
for siggroup in randsiggroups:
for sig in siggroup:
sig.sources.append(nixchx)
return nixfile
@staticmethod
def rdate():
return datetime(year=np.random.randint(1980, 2020),
month=np.random.randint(1, 13),
day=np.random.randint(1, 29))
@classmethod
def populate_dates(cls, obj):
obj.file_datetime = cls.rdate()
obj.rec_datetime = cls.rdate()
@staticmethod
def rword(n=10):
return "".join(np.random.choice(list(string.ascii_letters), n))
@classmethod
def rsentence(cls, n=3, maxwl=10):
return " ".join(cls.rword(np.random.randint(1, maxwl))
for _ in range(n))
@classmethod
def rdict(cls, nitems):
rd = dict()
for _ in range(nitems):
key = cls.rword()
value = cls.rword() if np.random.choice((0, 1)) \
else np.random.uniform()
rd[key] = value
return rd
@staticmethod
def rquant(shape, unit, incr=False):
try:
dim = len(shape)
except TypeError:
dim = 1
if incr and dim > 1:
raise TypeError("Shape of quantity array may only be "
"one-dimensional when incremental values are "
"requested.")
arr = np.random.random(shape)
if incr:
arr = np.array(np.cumsum(arr))
return arr*unit
@classmethod
def create_all_annotated(cls):
times = cls.rquant(1, pq.s)
signal = cls.rquant(1, pq.V)
blk = Block()
blk.annotate(**cls.rdict(3))
seg = Segment()
seg.annotate(**cls.rdict(4))
blk.segments.append(seg)
asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
asig.annotate(**cls.rdict(2))
seg.analogsignals.append(asig)
isig = IrregularlySampledSignal(times=times, signal=signal,
time_units=pq.s)
isig.annotate(**cls.rdict(2))
seg.irregularlysampledsignals.append(isig)
epoch = Epoch(times=times, durations=times)
epoch.annotate(**cls.rdict(4))
seg.epochs.append(epoch)
event = Event(times=times)
event.annotate(**cls.rdict(4))
seg.events.append(event)
spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
d = cls.rdict(6)
d["quantity"] = pq.Quantity(10, "mV")
d["qarray"] = pq.Quantity(range(10), "mA")
spiketrain.annotate(**d)
seg.spiketrains.append(spiketrain)
chx = ChannelIndex(name="achx", index=[1, 2])
chx.annotate(**cls.rdict(5))
blk.channel_indexes.append(chx)
unit = Unit()
unit.annotate(**cls.rdict(2))
chx.units.append(unit)
return blk
class NixIOWriteTest(NixIOTest):
def setUp(self):
self.filename = "nixio_testfile_write.h5"
self.writer = NixIO(self.filename, "ow")
self.io = self.writer
self.reader = nixio.File.open(self.filename,
nixio.FileMode.ReadOnly)
def tearDown(self):
del self.writer
self.reader.close()
os.remove(self.filename)
def write_and_compare(self, blocks):
self.writer.write_all_blocks(blocks)
self.compare_blocks(self.writer.read_all_blocks(), self.reader.blocks)
def test_block_write(self):
block = Block(name=self.rword(),
description=self.rsentence())
self.write_and_compare([block])
block.annotate(**self.rdict(5))
self.write_and_compare([block])
def test_segment_write(self):
block = Block(name=self.rword())
segment = Segment(name=self.rword(), description=self.rword())
block.segments.append(segment)
self.write_and_compare([block])
segment.annotate(**self.rdict(2))
self.write_and_compare([block])
def test_channel_index_write(self):
block = Block(name=self.rword())
chx = ChannelIndex(name=self.rword(),
description=self.rsentence(),
index=[1, 2, 3, 5, 8, 13])
block.channel_indexes.append(chx)
self.write_and_compare([block])
chx.annotate(**self.rdict(3))
self.write_and_compare([block])
def test_signals_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
asig = AnalogSignal(signal=self.rquant((10, 3), pq.mV),
sampling_rate=pq.Quantity(10, "Hz"))
seg.analogsignals.append(asig)
self.write_and_compare([block])
anotherblock = Block("ir signal block")
seg = Segment("ir signal seg")
anotherblock.segments.append(seg)
irsig = IrregularlySampledSignal(
signal=np.random.random((20, 3)),
times=self.rquant(20, pq.ms, True),
units=pq.A
)
seg.irregularlysampledsignals.append(irsig)
self.write_and_compare([anotherblock])
block.segments[0].analogsignals.append(
AnalogSignal(signal=[10.0, 1.0, 3.0], units=pq.S,
sampling_period=pq.Quantity(3, "s"),
dtype=np.double, name="signal42",
description="this is an analogsignal",
t_start=45 * pq.ms),
)
self.write_and_compare([block, anotherblock])
block.segments[0].irregularlysampledsignals.append(
IrregularlySampledSignal(times=np.random.random(10),
signal=np.random.random((10, 3)),
units="mV", time_units="s",
dtype=np.float,
name="some sort of signal",
description="the signal is described")
)
self.write_and_compare([block, anotherblock])
def test_epoch_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
epoch = Epoch(times=[1, 1, 10, 3]*pq.ms, durations=[3, 3, 3, 1]*pq.ms,
labels=np.array(["one", "two", "three", "four"]),
name="test epoch", description="an epoch for testing")
seg.epochs.append(epoch)
self.write_and_compare([block])
def test_event_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
event = Event(times=np.arange(0, 30, 10)*pq.s,
labels=np.array(["0", "1", "2"]),
name="event name",
description="event description")
seg.events.append(event)
self.write_and_compare([block])
def test_spiketrain_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
spiketrain = SpikeTrain(times=[3, 4, 5]*pq.s, t_stop=10.0,
name="spikes!", description="sssssspikes")
seg.spiketrains.append(spiketrain)
self.write_and_compare([block])
waveforms = self.rquant((20, 5, 10), pq.mV)
spiketrain = SpikeTrain(times=[1, 1.1, 1.2]*pq.ms, t_stop=1.5*pq.s,
name="spikes with wf",
description="spikes for waveform test",
waveforms=waveforms)
seg.spiketrains.append(spiketrain)
self.write_and_compare([block])
spiketrain.left_sweep = np.random.random(10)*pq.ms
self.write_and_compare([block])
def test_metadata_structure_write(self):
neoblk = self.create_all_annotated()
self.io.write_block(neoblk)
blk = self.io.nix_file.blocks[0]
blkmd = blk.metadata
self.assertEqual(blk.name, blkmd.name)
grp = blk.groups[0] # segment
self.assertIn(grp.name, blkmd.sections)
grpmd = blkmd.sections[grp.name]
for da in grp.data_arrays: # signals
name = ".".join(da.name.split(".")[:-1])
self.assertIn(name, grpmd.sections)
for mtag in grp.multi_tags: # spiketrains, events, and epochs
self.assertIn(mtag.name, grpmd.sections)
srcchx = blk.sources[0] # chx
self.assertIn(srcchx.name, blkmd.sections)
for srcunit in blk.sources: # units
self.assertIn(srcunit.name, blkmd.sections)
self.write_and_compare([neoblk])
def test_anonymous_objects_write(self):
nblocks = 2
nsegs = 2
nanasig = 4
nirrseg = 2
nepochs = 3
nevents = 4
nspiketrains = 3
nchx = 5
nunits = 10
times = self.rquant(1, pq.s)
signal = self.rquant(1, pq.V)
blocks = []
for blkidx in range(nblocks):
blk = Block()
blocks.append(blk)
for segidx in range(nsegs):
seg = Segment()
blk.segments.append(seg)
for anaidx in range(nanasig):
seg.analogsignals.append(AnalogSignal(signal=signal,
sampling_rate=pq.Hz))
for irridx in range(nirrseg):
seg.irregularlysampledsignals.append(
IrregularlySampledSignal(times=times,
signal=signal,
time_units=pq.s)
)
for epidx in range(nepochs):
seg.epochs.append(Epoch(times=times, durations=times))
for evidx in range(nevents):
seg.events.append(Event(times=times))
for stidx in range(nspiketrains):
seg.spiketrains.append(SpikeTrain(times=times, t_stop=pq.s,
units=pq.s))
for chidx in range(nchx):
chx = ChannelIndex(name="chx{}".format(chidx),
index=[1, 2])
blk.channel_indexes.append(chx)
for unidx in range(nunits):
unit = Unit()
chx.units.append(unit)
self.writer.write_all_blocks(blocks)
self.compare_blocks(blocks, self.reader.blocks)
def test_to_value(self):
section = self.io.nix_file.create_section("Metadata value test", "Test")
writeprop = self.io._write_property
# quantity
qvalue = pq.Quantity(10, "mV")
writeprop(section, "qvalue", qvalue)
self.assertEqual(section["qvalue"], 10)
self.assertEqual(section.props["qvalue"].unit, "mV")
# datetime
dt = self.rdate()
writeprop(section, "dt", dt)
self.assertEqual(datetime.fromtimestamp(section["dt"]), dt)
# string
randstr = self.rsentence()
writeprop(section, "randstr", randstr)
self.assertEqual(section["randstr"], randstr)
# bytes
bytestring = b"bytestring"
writeprop(section, "randbytes", bytestring)
self.assertEqual(section["randbytes"], bytestring.decode())
# iterables
randlist = np.random.random(10).tolist()
writeprop(section, "randlist", randlist)
self.assertEqual(randlist, section["randlist"])
randarray = np.random.random(10)
writeprop(section, "randarray", randarray)
np.testing.assert_almost_equal(randarray, section["randarray"])
# numpy item
npval = np.float64(2398)
writeprop(section, "npval", npval)
self.assertEqual(npval, section["npval"])
# number
val = 42
writeprop(section, "val", val)
self.assertEqual(val, section["val"])
# multi-dimensional data -- UNSUPORTED
# mdlist = [[1, 2, 3], [4, 5, 6]]
# writeprop(section, "mdlist", mdlist)
# mdarray = np.random.random((10, 3))
# writeprop(section, "mdarray", mdarray)
class NixIOReadTest(NixIOTest):
filename = "testfile_readtest.h5"
nixfile = None
nix_blocks = None
original_methods = dict()
@classmethod
def setUpClass(cls):
if HAVE_NIX:
cls.nixfile = cls.create_full_nix_file(cls.filename)
def setUp(self):
self.io = NixIO(self.filename, "ro")
self.original_methods["_read_cascade"] = self.io._read_cascade
self.original_methods["_update_maps"] = self.io._update_maps
@classmethod
def tearDownClass(cls):
if HAVE_NIX:
cls.nixfile.close()
def tearDown(self):
del self.io
def test_all_read(self):
neo_blocks = self.io.read_all_blocks(cascade=True, lazy=False)
nix_blocks = self.io.nix_file.blocks
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazyload_fullcascade_read(self):
neo_blocks = self.io.read_all_blocks(cascade=True, lazy=True)
nix_blocks = self.io.nix_file.blocks
# data objects should be empty
for block in neo_blocks:
for seg in block.segments:
for asig in seg.analogsignals:
self.assertEqual(len(asig), 0)
for isig in seg.irregularlysampledsignals:
self.assertEqual(len(isig), 0)
for epoch in seg.epochs:
self.assertEqual(len(epoch), 0)
for event in seg.events:
self.assertEqual(len(event), 0)
for st in seg.spiketrains:
self.assertEqual(len(st), 0)
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazyload_lazycascade_read(self):
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=True)
nix_blocks = self.io.nix_file.blocks
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazycascade_read(self):
def getitem(self, index):
return self._data.__getitem__(index)
from neo.io.nixio import LazyList
getitem_original = LazyList.__getitem__
LazyList.__getitem__ = getitem
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=False)
for block in neo_blocks:
self.assertIsInstance(block.segments, LazyList)
self.assertIsInstance(block.channel_indexes, LazyList)
for seg in block.segments:
self.assertIsInstance(seg, string_types)
for chx in block.channel_indexes:
self.assertIsInstance(chx, string_types)
LazyList.__getitem__ = getitem_original
def test_load_lazy_cascade(self):
from neo.io.nixio import LazyList
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=False)
for block in neo_blocks:
self.assertIsInstance(block.segments, LazyList)
self.assertIsInstance(block.channel_indexes, LazyList)
name = block.name
block = self.io.load_lazy_cascade("/" + name, lazy=False)
self.assertIsInstance(block.segments, list)
self.assertIsInstance(block.channel_indexes, list)
for seg in block.segments:
self.assertIsInstance(seg.analogsignals, list)
self.assertIsInstance(seg.irregularlysampledsignals, list)
self.assertIsInstance(seg.epochs, list)
self.assertIsInstance(seg.events, list)
self.assertIsInstance(seg.spiketrains, list)
def test_nocascade_read(self):
self.io._read_cascade = mock.Mock()
neo_blocks = self.io.read_all_blocks(cascade=False)
self.io._read_cascade.assert_not_called()
for block in neo_blocks:
self.assertEqual(len(block.segments), 0)
nix_block = self.io.nix_file.blocks[block.name]
self.compare_attr(block, nix_block)
def test_lazy_load_subschema(self):
blk = self.io.nix_file.blocks[0]
segpath = "/" + blk.name + "/segments/" + blk.groups[0].name
segment = self.io.load_lazy_cascade(segpath, lazy=True)
self.assertIsInstance(segment, Segment)
self.assertEqual(segment.name, blk.groups[0].name)
self.assertIs(segment.block, None)
self.assertEqual(len(segment.analogsignals[0]), 0)
segment = self.io.load_lazy_cascade(segpath, lazy=False)
self.assertEqual(np.shape(segment.analogsignals[0]), (100, 3))
class NixIOHashTest(NixIOTest):
def setUp(self):
self.hash = NixIO._hash_object
def _hash_test(self, objtype, argfuncs):
attr = {}
for arg, func in argfuncs.items():
attr[arg] = func()
obj_one = objtype(**attr)
obj_two = objtype(**attr)
hash_one = self.hash(obj_one)
hash_two = self.hash(obj_two)
self.assertEqual(hash_one, hash_two)
for arg, func in argfuncs.items():
chattr = attr.copy()
chattr[arg] = func()
obj_two = objtype(**chattr)
hash_two = self.hash(obj_two)
self.assertNotEqual(
hash_one, hash_two,
"Hash test failed with different '{}'".format(arg)
)
def test_block_seg_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"rec_datetime": self.rdate,
"file_datetime": self.rdate,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(Block, argfuncs)
self._hash_test(Segment, argfuncs)
self._hash_test(Unit, argfuncs)
def test_chx_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"index": lambda: np.random.random(10).tolist(),
"channel_names": lambda: self.rsentence(10).split(" "),
"coordinates": lambda: [(np.random.random() * pq.cm,
np.random.random() * pq.cm,
np.random.random() * pq.cm)]*10,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(ChannelIndex, argfuncs)
def test_analogsignal_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"signal": lambda: self.rquant((10, 10), pq.mV),
"sampling_rate": lambda: np.random.random() * pq.Hz,
"t_start": lambda: np.random.random() * pq.sec,
"t_stop": lambda: np.random.random() * pq.sec,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(AnalogSignal, argfuncs)
def test_irregularsignal_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"signal": lambda: self.rquant((10, 10), pq.mV),
"times": lambda: self.rquant(10, pq.ms, True),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(IrregularlySampledSignal, argfuncs)
def test_event_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"times": lambda: self.rquant(10, pq.ms),
"durations": lambda: self.rquant(10, pq.ms),
"labels": lambda: self.rsentence(10).split(" "),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(Event, argfuncs)
self._hash_test(Epoch, argfuncs)
def test_spiketrain_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"times": lambda: self.rquant(10, pq.ms, True),
"t_start": lambda: -np.random.random() * pq.sec,
"t_stop": lambda: np.random.random() * 100 * pq.sec,
"waveforms": lambda: self.rquant((10, 10, 20), pq.mV),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(SpikeTrain, argfuncs)
class NixIOPartialWriteTest(NixIOTest):
filename = "testfile_partialwrite.h5"
nixfile = None
neo_blocks = None
original_methods = dict()
@classmethod
def setUpClass(cls):
if HAVE_NIX:
cls.nixfile = cls.create_full_nix_file(cls.filename)
def setUp(self):
self.io = NixIO(self.filename, "rw")
self.neo_blocks = self.io.read_all_blocks()
self.original_methods["_write_attr_annotations"] =\
self.io._write_attr_annotations
@classmethod
def tearDownClass(cls):
if HAVE_NIX:
cls.nixfile.close()
def tearDown(self):
self.restore_methods()
del self.io
def restore_methods(self):
for name, method in self.original_methods.items():
setattr(self.io, name, self.original_methods[name])
def _mock_write_attr(self, objclass):
typestr = str(objclass.__name__).lower()
self.io._write_attr_annotations = mock.Mock(
wraps=self.io._write_attr_annotations,
side_effect=self.check_obj_type("neo.{}".format(typestr))
)
neo_blocks = self.neo_blocks
self.modify_objects(neo_blocks, excludes=[objclass])
self.io.write_all_blocks(neo_blocks)
self.restore_methods()
def check_obj_type(self, typestring):
neq = self.assertNotEqual
def side_effect_func(*args, **kwargs):
obj = kwargs.get("nixobj", args[0])
if isinstance(obj, list):
for sig in obj:
neq(sig.type, typestring)
else:
neq(obj.type, typestring)
return side_effect_func
@classmethod
def modify_objects(cls, objs, excludes=()):
excludes = tuple(excludes)
for obj in objs:
if not (excludes and isinstance(obj, excludes)):
obj.description = cls.rsentence()
for container in getattr(obj, "_child_containers", []):
children = getattr(obj, container)
cls.modify_objects(children, excludes)
def test_partial(self):
for objclass in NixIO.supported_objects:
self._mock_write_attr(objclass)
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
def test_no_modifications(self):
self.io._write_attr_annotations = mock.Mock()
self.io.write_all_blocks(self.neo_blocks)
self.io._write_attr_annotations.assert_not_called()
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
# clearing hashes and checking again
for k in self.io._object_hashes.keys():
self.io._object_hashes[k] = None
self.io.write_all_blocks(self.neo_blocks)
self.io._write_attr_annotations.assert_not_called()
# changing hashes to force rewrite
for k in self.io._object_hashes.keys():
self.io._object_hashes[k] = "_"
self.io.write_all_blocks(self.neo_blocks)
callcount = self.io._write_attr_annotations.call_count
self.assertEqual(callcount, len(self.io._object_hashes))
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
@unittest.skipUnless(HAVE_NIX, "Requires NIX")
class CommonTests(BaseTestIO, unittest.TestCase):
ioclass = NixIO
| [
"mock.Mock",
"nixio.Value",
"quantities.Quantity",
"neo.core.Event",
"numpy.array_split",
"neo.core.Unit",
"numpy.array",
"neo.io.nixio.NixIO",
"neo.core.AnalogSignal",
"neo.core.SpikeTrain",
"numpy.arange",
"os.remove",
"numpy.random.random",
"numpy.float64",
"numpy.testing.assert_almost_equal",
"nixio.File.open",
"neo.core.Segment",
"numpy.random.choice",
"unittest.skipUnless",
"numpy.shape",
"numpy.transpose",
"neo.core.ChannelIndex",
"datetime.datetime.fromtimestamp",
"neo.core.Block",
"neo.core.Epoch",
"neo.core.IrregularlySampledSignal",
"numpy.random.randint",
"itertools.count",
"numpy.random.uniform",
"numpy.cumsum"
]
| [((1013, 1058), 'unittest.skipUnless', 'unittest.skipUnless', (['HAVE_NIX', '"""Requires NIX"""'], {}), "(HAVE_NIX, 'Requires NIX')\n", (1032, 1058), False, 'import unittest\n'), ((45446, 45491), 'unittest.skipUnless', 'unittest.skipUnless', (['HAVE_NIX', '"""Requires NIX"""'], {}), "(HAVE_NIX, 'Requires NIX')\n", (45465, 45491), False, 'import unittest\n'), ((9333, 9402), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['epoch.times.magnitude', 'mtag.positions'], {}), '(epoch.times.magnitude, mtag.positions)\n', (9363, 9402), True, 'import numpy as np\n'), ((9412, 9483), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['epoch.durations.magnitude', 'mtag.extents'], {}), '(epoch.durations.magnitude, mtag.extents)\n', (9442, 9483), True, 'import numpy as np\n'), ((10213, 10282), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['event.times.magnitude', 'mtag.positions'], {}), '(event.times.magnitude, mtag.positions)\n', (10243, 10282), True, 'import numpy as np\n'), ((10923, 10997), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['spiketrain.times.magnitude', 'mtag.positions'], {}), '(spiketrain.times.magnitude, mtag.positions)\n', (10953, 10997), True, 'import numpy as np\n'), ((13030, 13081), 'nixio.File.open', 'nixio.File.open', (['filename', 'nixio.FileMode.Overwrite'], {}), '(filename, nixio.FileMode.Overwrite)\n', (13045, 13081), False, 'import nixio\n'), ((20961, 20999), 'numpy.array_split', 'np.array_split', (['allspiketrains', 'nunits'], {}), '(allspiketrains, nunits)\n', (20975, 20999), True, 'import numpy as np\n'), ((21420, 21463), 'numpy.random.choice', 'np.random.choice', (['allsignalgroups', '(5)', '(False)'], {}), '(allsignalgroups, 5, False)\n', (21436, 21463), True, 'import numpy as np\n'), ((22867, 22890), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (22883, 22890), True, 'import numpy as np\n'), ((23115, 23122), 'neo.core.Block', 'Block', ([], {}), '()\n', (23120, 23122), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((23175, 23184), 'neo.core.Segment', 'Segment', ([], {}), '()\n', (23182, 23184), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((23271, 23319), 'neo.core.AnalogSignal', 'AnalogSignal', ([], {'signal': 'signal', 'sampling_rate': 'pq.Hz'}), '(signal=signal, sampling_rate=pq.Hz)\n', (23283, 23319), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((23413, 23482), 'neo.core.IrregularlySampledSignal', 'IrregularlySampledSignal', ([], {'times': 'times', 'signal': 'signal', 'time_units': 'pq.s'}), '(times=times, signal=signal, time_units=pq.s)\n', (23437, 23482), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((23629, 23664), 'neo.core.Epoch', 'Epoch', ([], {'times': 'times', 'durations': 'times'}), '(times=times, durations=times)\n', (23634, 23664), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((23754, 23772), 'neo.core.Event', 'Event', ([], {'times': 'times'}), '(times=times)\n', (23759, 23772), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((23867, 23915), 'neo.core.SpikeTrain', 'SpikeTrain', ([], {'times': 'times', 't_stop': 'pq.s', 'units': 'pq.s'}), '(times=times, t_stop=pq.s, units=pq.s)\n', (23877, 23915), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((23965, 23986), 'quantities.Quantity', 'pq.Quantity', (['(10)', '"""mV"""'], {}), "(10, 'mV')\n", (23976, 23986), True, 'import quantities as pq\n'), ((24129, 24168), 'neo.core.ChannelIndex', 'ChannelIndex', ([], {'name': '"""achx"""', 'index': '[1, 2]'}), "(name='achx', index=[1, 2])\n", (24141, 24168), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((24262, 24268), 'neo.core.Unit', 'Unit', ([], {}), '()\n', (24266, 24268), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((24487, 24513), 'neo.io.nixio.NixIO', 'NixIO', (['self.filename', '"""ow"""'], {}), "(self.filename, 'ow')\n", (24492, 24513), False, 'from neo.io.nixio import NixIO\n'), ((24566, 24621), 'nixio.File.open', 'nixio.File.open', (['self.filename', 'nixio.FileMode.ReadOnly'], {}), '(self.filename, nixio.FileMode.ReadOnly)\n', (24581, 24621), False, 'import nixio\n'), ((24745, 24769), 'os.remove', 'os.remove', (['self.filename'], {}), '(self.filename)\n', (24754, 24769), False, 'import os\n'), ((25943, 25950), 'neo.core.Block', 'Block', ([], {}), '()\n', (25948, 25950), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((25965, 25974), 'neo.core.Segment', 'Segment', ([], {}), '()\n', (25972, 25974), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((26243, 26267), 'neo.core.Block', 'Block', (['"""ir signal block"""'], {}), "('ir signal block')\n", (26248, 26267), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((26282, 26306), 'neo.core.Segment', 'Segment', (['"""ir signal seg"""'], {}), "('ir signal seg')\n", (26289, 26306), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((27592, 27599), 'neo.core.Block', 'Block', ([], {}), '()\n', (27597, 27599), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((27614, 27623), 'neo.core.Segment', 'Segment', ([], {}), '()\n', (27621, 27623), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((28011, 28018), 'neo.core.Block', 'Block', ([], {}), '()\n', (28016, 28018), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((28033, 28042), 'neo.core.Segment', 'Segment', ([], {}), '()\n', (28040, 28042), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((28413, 28420), 'neo.core.Block', 'Block', ([], {}), '()\n', (28418, 28420), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((28435, 28444), 'neo.core.Segment', 'Segment', ([], {}), '()\n', (28442, 28444), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((28502, 28597), 'neo.core.SpikeTrain', 'SpikeTrain', ([], {'times': '([3, 4, 5] * pq.s)', 't_stop': '(10.0)', 'name': '"""spikes!"""', 'description': '"""sssssspikes"""'}), "(times=[3, 4, 5] * pq.s, t_stop=10.0, name='spikes!', description\n ='sssssspikes')\n", (28512, 28597), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((28780, 28932), 'neo.core.SpikeTrain', 'SpikeTrain', ([], {'times': '([1, 1.1, 1.2] * pq.ms)', 't_stop': '(1.5 * pq.s)', 'name': '"""spikes with wf"""', 'description': '"""spikes for waveform test"""', 'waveforms': 'waveforms'}), "(times=[1, 1.1, 1.2] * pq.ms, t_stop=1.5 * pq.s, name=\n 'spikes with wf', description='spikes for waveform test', waveforms=\n waveforms)\n", (28790, 28932), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((32158, 32179), 'quantities.Quantity', 'pq.Quantity', (['(10)', '"""mV"""'], {}), "(10, 'mV')\n", (32169, 32179), True, 'import quantities as pq\n'), ((33007, 33027), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (33023, 33027), True, 'import numpy as np\n'), ((33087, 33150), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['randarray', "section['randarray']"], {}), "(randarray, section['randarray'])\n", (33117, 33150), True, 'import numpy as np\n'), ((33189, 33205), 'numpy.float64', 'np.float64', (['(2398)'], {}), '(2398)\n', (33199, 33205), True, 'import numpy as np\n'), ((33966, 33992), 'neo.io.nixio.NixIO', 'NixIO', (['self.filename', '"""ro"""'], {}), "(self.filename, 'ro')\n", (33971, 33992), False, 'from neo.io.nixio import NixIO\n'), ((37183, 37194), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (37192, 37194), False, 'import mock\n'), ((42586, 42612), 'neo.io.nixio.NixIO', 'NixIO', (['self.filename', '"""rw"""'], {}), "(self.filename, 'rw')\n", (42591, 42612), False, 'from neo.io.nixio import NixIO\n'), ((44617, 44628), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (44626, 44628), False, 'import mock\n'), ((6544, 6561), 'itertools.count', 'itertools.count', ([], {}), '()\n', (6559, 6561), False, 'import itertools\n'), ((6788, 6801), 'numpy.shape', 'np.shape', (['sig'], {}), '(sig)\n', (6796, 6801), True, 'import numpy as np\n'), ((7372, 7392), 'numpy.transpose', 'np.transpose', (['neosig'], {}), '(neosig)\n', (7384, 7392), True, 'import numpy as np\n'), ((7517, 7566), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['sig.magnitude', 'da'], {}), '(sig.magnitude, da)\n', (7547, 7566), True, 'import numpy as np\n'), ((11300, 11354), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['neowf.magnitude', 'nixwf'], {}), '(neowf.magnitude, nixwf)\n', (11330, 11354), True, 'import numpy as np\n'), ((29132, 29152), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (29148, 29152), True, 'import numpy as np\n'), ((30457, 30464), 'neo.core.Block', 'Block', ([], {}), '()\n', (30462, 30464), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((32442, 32479), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["section['dt']"], {}), "(section['dt'])\n", (32464, 32479), False, 'from datetime import datetime\n'), ((38013, 38047), 'numpy.shape', 'np.shape', (['segment.analogsignals[0]'], {}), '(segment.analogsignals[0])\n', (38021, 38047), True, 'import numpy as np\n'), ((11180, 11195), 'numpy.shape', 'np.shape', (['neowf'], {}), '(neowf)\n', (11188, 11195), True, 'import numpy as np\n'), ((11197, 11212), 'numpy.shape', 'np.shape', (['nixwf'], {}), '(nixwf)\n', (11205, 11212), True, 'import numpy as np\n'), ((12167, 12208), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['nixobj.created_at'], {}), '(nixobj.created_at)\n', (12189, 12208), False, 'from datetime import datetime\n'), ((12360, 12416), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["nixobj.metadata['file_datetime']"], {}), "(nixobj.metadata['file_datetime'])\n", (12382, 12416), False, 'from datetime import datetime\n'), ((20651, 20667), 'nixio.Value', 'nixio.Value', (['idx'], {}), '(idx)\n', (20662, 20667), False, 'import nixio\n'), ((20901, 20918), 'nixio.Value', 'nixio.Value', (['"""um"""'], {}), "('um')\n", (20912, 20918), False, 'import nixio\n'), ((21668, 21697), 'numpy.random.randint', 'np.random.randint', (['(1980)', '(2020)'], {}), '(1980, 2020)\n', (21685, 21697), True, 'import numpy as np\n'), ((21729, 21753), 'numpy.random.randint', 'np.random.randint', (['(1)', '(13)'], {}), '(1, 13)\n', (21746, 21753), True, 'import numpy as np\n'), ((21783, 21807), 'numpy.random.randint', 'np.random.randint', (['(1)', '(29)'], {}), '(1, 29)\n', (21800, 21807), True, 'import numpy as np\n'), ((22378, 22402), 'numpy.random.choice', 'np.random.choice', (['(0, 1)'], {}), '((0, 1))\n', (22394, 22402), True, 'import numpy as np\n'), ((22426, 22445), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (22443, 22445), True, 'import numpy as np\n'), ((22935, 22949), 'numpy.cumsum', 'np.cumsum', (['arr'], {}), '(arr)\n', (22944, 22949), True, 'import numpy as np\n'), ((26117, 26138), 'quantities.Quantity', 'pq.Quantity', (['(10)', '"""Hz"""'], {}), "(10, 'Hz')\n", (26128, 26138), True, 'import quantities as pq\n'), ((26410, 26435), 'numpy.random.random', 'np.random.random', (['(20, 3)'], {}), '((20, 3))\n', (26426, 26435), True, 'import numpy as np\n'), ((27768, 27809), 'numpy.array', 'np.array', (["['one', 'two', 'three', 'four']"], {}), "(['one', 'two', 'three', 'four'])\n", (27776, 27809), True, 'import numpy as np\n'), ((28163, 28188), 'numpy.array', 'np.array', (["['0', '1', '2']"], {}), "(['0', '1', '2'])\n", (28171, 28188), True, 'import numpy as np\n'), ((30558, 30567), 'neo.core.Segment', 'Segment', ([], {}), '()\n', (30565, 30567), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((32851, 32871), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (32867, 32871), True, 'import numpy as np\n'), ((7833, 7885), 'quantities.Quantity', 'pq.Quantity', (['timedim.sampling_interval', 'timedim.unit'], {}), '(timedim.sampling_interval, timedim.unit)\n', (7844, 7885), True, 'import quantities as pq\n'), ((8374, 8443), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['neosig.times.magnitude', 'timedim.ticks'], {}), '(neosig.times.magnitude, timedim.ticks)\n', (8404, 8443), True, 'import numpy as np\n'), ((22143, 22170), 'numpy.random.randint', 'np.random.randint', (['(1)', 'maxwl'], {}), '(1, maxwl)\n', (22160, 22170), True, 'import numpy as np\n'), ((26769, 26788), 'quantities.Quantity', 'pq.Quantity', (['(3)', '"""s"""'], {}), "(3, 's')\n", (26780, 26788), True, 'import quantities as pq\n'), ((27127, 27147), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (27143, 27147), True, 'import numpy as np\n'), ((27193, 27218), 'numpy.random.random', 'np.random.random', (['(10, 3)'], {}), '((10, 3))\n', (27209, 27218), True, 'import numpy as np\n'), ((28107, 28127), 'numpy.arange', 'np.arange', (['(0)', '(30)', '(10)'], {}), '(0, 30, 10)\n', (28116, 28127), True, 'import numpy as np\n'), ((31815, 31821), 'neo.core.Unit', 'Unit', ([], {}), '()\n', (31819, 31821), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((40256, 40274), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (40272, 40274), True, 'import numpy as np\n'), ((40323, 40341), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (40339, 40341), True, 'import numpy as np\n'), ((40390, 40408), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (40406, 40408), True, 'import numpy as np\n'), ((18008, 18023), 'nixio.Value', 'nixio.Value', (['(20)'], {}), '(20)\n', (18019, 18023), False, 'import nixio\n'), ((30700, 30748), 'neo.core.AnalogSignal', 'AnalogSignal', ([], {'signal': 'signal', 'sampling_rate': 'pq.Hz'}), '(signal=signal, sampling_rate=pq.Hz)\n', (30712, 30748), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((30936, 31005), 'neo.core.IrregularlySampledSignal', 'IrregularlySampledSignal', ([], {'times': 'times', 'signal': 'signal', 'time_units': 'pq.s'}), '(times=times, signal=signal, time_units=pq.s)\n', (30960, 31005), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((31209, 31244), 'neo.core.Epoch', 'Epoch', ([], {'times': 'times', 'durations': 'times'}), '(times=times, durations=times)\n', (31214, 31244), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((31329, 31347), 'neo.core.Event', 'Event', ([], {'times': 'times'}), '(times=times)\n', (31334, 31347), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((31442, 31490), 'neo.core.SpikeTrain', 'SpikeTrain', ([], {'times': 'times', 't_stop': 'pq.s', 'units': 'pq.s'}), '(times=times, t_stop=pq.s, units=pq.s)\n', (31452, 31490), False, 'from neo.core import Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch\n'), ((39482, 39502), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (39498, 39502), True, 'import numpy as np\n'), ((41886, 41904), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (41902, 41904), True, 'import numpy as np\n'), ((41953, 41971), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (41969, 41971), True, 'import numpy as np\n'), ((39634, 39652), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (39650, 39652), True, 'import numpy as np\n'), ((39707, 39725), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (39723, 39725), True, 'import numpy as np\n'), ((39780, 39798), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (39796, 39798), True, 'import numpy as np\n')] |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass, field
from typing import List, Iterator, TypeVar, Union, Any, Generic
import pandas as pd
from pandas.core.indexing import _LocIndexer
from reamber.base.Map import Map
from reamber.base.Property import stack_props
NoteListT = TypeVar('NoteListT')
HitListT = TypeVar('HitListT')
HoldListT = TypeVar('HoldListT')
BpmListT = TypeVar('BpmListT')
MapT = TypeVar('MapT')
@dataclass
class MapSet(Generic[NoteListT, HitListT, HoldListT, BpmListT, MapT]):
maps: List[MapT[NoteListT, HitListT, HoldListT, BpmListT]] = field(default_factory=lambda: [])
def __init__(self, maps: List[MapT[NoteListT, HitListT, HoldListT, BpmListT]]):
self.maps = maps
def __iter__(self) -> Iterator[MapT]:
for m in self.maps:
yield m
def items(self):
for m in self.maps:
yield m.__class__, m
def __getitem__(self, item: Union[Any, type]):
if isinstance(item, type):
# We want to index by type.
return [m[item][0] for m in self.maps]
else:
# We want to index by slice/int/etc.
return self.maps[item]
def __setitem__(self, key: Union[Any, type], value):
this = self[key]
assert len(this) == len(value), "The lengths of the set and get must be the same."
for i in range(len(this)): this[i] = value[i]
def deepcopy(self):
""" Returns a deep copy of itself """
return deepcopy(self)
def describe(self, rounding: int = 2, unicode: bool = False) -> List[str]:
""" Describes the map's attributes as a short summary
:param rounding: The decimal rounding
:param unicode: Whether to attempt to get the non-unicode or unicode. \
Doesn't attempt to translate.
"""
return [m.describe(rounding=rounding, unicode=unicode, s=self) for m in self]
def rate(self, by: float) -> MapSet:
""" Changes the rate of the map. Note that you need to do rate on the mapset to affect BPM.
:param by: The value to rate it by. 1.1x speeds up the song by 10%. Hence 10/11 of the length.
"""
copy = self.deepcopy()
copy.maps = [m.rate(by=by) for m in copy.maps]
return copy
# noinspection DuplicatedCode,PyUnresolvedReferences
@stack_props()
class Stacker:
""" This purpose of this class is to provide unnamed access to the lists.
This can make code much shorter as we don't have to deal with keyed dicts.
For example,
>>> m = Map.stack()
>>> m.offset *= 2
Or if you do it inline,
>>> m.stack().lengths *= 2
This will change the offsets of all lists that have the offset property.
This will change the map itself, as stack is a reference
This also is a "naive" system, so if the property, like column, doesn't exist
for Bpms, it will not break it. However, all properties must exist at least
once.
If the property isn't listed here, you can do string indexing
For example,
>>> m = Map.stack()
>>> m.other_property *= 2
"""
""" How does this work?
Firstly, if you concat a list of dfs, pd will always make a copy, so you have to
preserve the original dfs and also the stacked.
LISTS ---STACK---> COPY ---> STACKED
+---------- REFERENCE ---> UNSTACKED
The reason for stacking is so that we don't have to loop through all dfs to mutate.
If we did loop through the dfs, we have to stack them anyways, so it's as efficient.
However, it's just easier, by my eyes, to stack then attempt to mutate.
So, we keep 2 things in check, the unstacked, and the stacked.
However, we only can mutate the stacked one, then convert to the unstacked, because
the unstacked is the referenced.
Hence, we keep track of what partitions of the unstacked are each of the stacked.
IXS | | | | |
UNSTACKED [........] [........] [..] [....]
STACKED [...............................]
That's where ixs come in to help in converting the stacked values to unstacked.
So the workflow is that when we retrieve a value, it's always from the stacked.
Then, when it's mutated, it can be set and it will always call the _update
to update the referenced unstacked.
"""
stackers: List[Map.Stacker]
# noinspection PyProtectedMember
def __init__(self, stackers: List[Map.Stacker]):
self.stackers = stackers
def __getitem__(self, item):
return pd.DataFrame([i[item] for i in self.stackers])
def __setitem__(self, key, value):
for s, i in zip(self.stackers, value.iloc):
s[key] = i
_props = ['offset', 'column', 'length', 'bpm', 'metronome']
def stack(self, include: List[str] = None):
""" This creates a mutator for this instance, see Mutator for details. """
return self.Stacker([_.stack(include) for _ in self])
| [
"reamber.base.Property.stack_props",
"copy.deepcopy",
"pandas.DataFrame",
"dataclasses.field",
"typing.TypeVar"
]
| [((326, 346), 'typing.TypeVar', 'TypeVar', (['"""NoteListT"""'], {}), "('NoteListT')\n", (333, 346), False, 'from typing import List, Iterator, TypeVar, Union, Any, Generic\n'), ((358, 377), 'typing.TypeVar', 'TypeVar', (['"""HitListT"""'], {}), "('HitListT')\n", (365, 377), False, 'from typing import List, Iterator, TypeVar, Union, Any, Generic\n'), ((390, 410), 'typing.TypeVar', 'TypeVar', (['"""HoldListT"""'], {}), "('HoldListT')\n", (397, 410), False, 'from typing import List, Iterator, TypeVar, Union, Any, Generic\n'), ((422, 441), 'typing.TypeVar', 'TypeVar', (['"""BpmListT"""'], {}), "('BpmListT')\n", (429, 441), False, 'from typing import List, Iterator, TypeVar, Union, Any, Generic\n'), ((449, 464), 'typing.TypeVar', 'TypeVar', (['"""MapT"""'], {}), "('MapT')\n", (456, 464), False, 'from typing import List, Iterator, TypeVar, Union, Any, Generic\n'), ((615, 649), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : [])'}), '(default_factory=lambda : [])\n', (620, 649), False, 'from dataclasses import dataclass, field\n'), ((2375, 2388), 'reamber.base.Property.stack_props', 'stack_props', ([], {}), '()\n', (2386, 2388), False, 'from reamber.base.Property import stack_props\n'), ((1523, 1537), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (1531, 1537), False, 'from copy import deepcopy\n'), ((4768, 4814), 'pandas.DataFrame', 'pd.DataFrame', (['[i[item] for i in self.stackers]'], {}), '([i[item] for i in self.stackers])\n', (4780, 4814), True, 'import pandas as pd\n')] |
"""
=========
filtering.py
=========
This module provides more granular filtering for captures.
You can customize your own filters too.
"""
from __future__ import annotations
import re
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from json import JSONEncoder
from pathlib import PosixPath
from typing import (
Any,
Dict,
Iterable,
Mapping,
NewType,
Optional,
Protocol,
Type,
TypedDict,
Union,
)
import h5py
import numpy as np
from h5py import File as Fast5File
from ..hdf5 import (
HasFast5,
HDF5_Group,
HDF5_GroupSerialableDataclass,
HDF5_GroupSerializable,
HDF5_GroupSerializing,
IsAttr,
)
from ..logger import Logger, getLogger
from ..signals import Capture
from .core import NumpyArrayLike, PathLikeOrString, ReadId, stripped_by_keys
from .plugin import Plugin
CaptureOrTimeSeries = Union[Capture, NumpyArrayLike]
# Unique identifier for a collection of filters (e.g. "ProfJeffsAwesomeFilters")
FilterSetId = NewType("FilterSetId", str)
# Unique identifier for an individual filter (e.g. "min_frac")
FilterName = NewType("FilterName", str)
__all__ = [
"does_pass_filters",
"get_filters",
"FilterName",
"FilterSetId",
"FilterConfig",
"Filter",
"Filters",
"DEFAULT_FILTER_PLUGINS",
"FilterSet",
"FilterConfigs",
"FilterPlugin",
"PATH",
]
@dataclass(frozen=True)
class FILTER_PATH:
ROOT = f"/Filter/"
@classmethod
def filter_set_path(cls, filter_set_id: FilterSetId) -> str:
filter_path = str(PosixPath(FILTER_PATH.ROOT, filter_set_id))
return filter_path
@classmethod
def filter_set_pass_path(cls, filter_set_id: FilterSetId) -> str:
pass_path = str(PosixPath(FILTER_PATH.filter_set_path(filter_set_id), "pass"))
return pass_path
@classmethod
def filter_set_pass_path_for_read_id(cls, filter_set_id: FilterSetId, read_id: ReadId) -> str:
pass_path = str(PosixPath(FILTER_PATH.filter_set_pass_path(filter_set_id), read_id))
return pass_path
class FilterConfig(TypedDict):
"""A blueprint for how to construct a FilterPlugin.
Contains a name, and any number of other attributes
Note on terminology:
- FilterConfig: A high-level description of a filter.
- FilterPlugin: An actual, callable, implementation of a FilterConfig.
For custom plugins, make sure "filepath" is an attribute that points to the file to laod
"""
# Mapping of a FilterName to filter configurations.
FilterConfigs = NewType("FilterConfigs", Dict[FilterName, FilterConfig])
# TODO: Filter Plugin should check that name is unique. https://github.com/uwmisl/poretitioner/issues/91
class FilterPlugin(Plugin):
"""
Abstract class for Filter plugins. To write your own filter, subclass this abstract
class and implement the `apply` method and `name` property.
"""
@classmethod
@abstractmethod
def name(cls) -> str:
"""Unique name for this filter.
Make sure it doesn't conflict with any existing names.
Returns
-------
str
The unique name for this filter (e.g. "fourier_transform").
Raises
------
NotImplementedError
Raised if this filter is called without this name method being implemented.
"""
raise NotImplementedError(
"'name' class method not implemented for filter. This class method should return a unique name for this filter."
)
@abstractmethod
def apply(self, capture: CaptureOrTimeSeries) -> bool:
"""Returns True if a capture passes a given filter criteria.
For instance, a range filter would check that a capture's summary statistsics lie within a given range.
Parameters
----------
capture : np.typing.ArrayLike
Time series capture to filter.
Returns
-------
bool
Whether this capture passes the filter.
Raises
------
NotImplementedError
Raised when the filter method isn't implemented by the consuming Filter class
"""
raise NotImplementedError(
"'apply' method not implemented for filter. This method should return True if and only if applied to a capture that meets the filter criterion. For instance, "
)
def __call__(self, capture: CaptureOrTimeSeries) -> bool:
"""Apply the filter.
Defining `__call__` lets us do nice things like:
class MyCustomFilter(FilterPlugin):
def apply(capture):
# ...
pass
# Later in code where filtering is done....
valid_captures = []
filters = [ MyCustomFilter(), AnotherCustomFilter(), ... ]
valid_captures = [capture for capture in captures if all([filt(capture) for filt in filters])]
for capture in captures: # You'd want to parallelize this in a real life example...
for filt in filters:
filtered_captures = filt(capture).
Parameters
----------
capture : CaptureOrTimeSeries
Capture to filter.
Returns
-------
bool
Whether this capture passes the filter.
"""
result = self.apply(capture)
return result
RANGE_FILTER_DEFAULT_MINIMUM: float = -np.inf
RANGE_FILTER_DEFAULT_MAXIMUM: float = np.inf
class RangeFilter(FilterPlugin):
def __init__(self, minimum: Optional[float] = None, maximum: Optional[float] = None):
"""A filter that filters based on whether a signal falls between a maximum and a minimum.
Parameters
----------
minimum : float, optional
The smallest value this signal should be allowed to take (inclusive), by default RangeFilter.DEFAULT_MINIMUM
maximum : float, optional
The largest value this signal should be allowed to take (inclusive), by default RangeFilter.DEFAULT_MAXIMUM
"""
self.minimum = minimum if minimum is not None else RANGE_FILTER_DEFAULT_MINIMUM
self.maximum = maximum if maximum is not None else RANGE_FILTER_DEFAULT_MAXIMUM
def extract(self, capture: CaptureOrTimeSeries) -> NumpyArrayLike:
"""Extracts a summary statistic from the capture (e.g. mean, length, standard deviation).
Identity operation by default (just returns the capture).
You can use this function to transform the data in a useful way before processing it (e.g.
getting the mean value of a capture before filtering based on that mean.)
Note: If we picture the filtering workflow as an ETL (Extract-Transform-Load) pipeline, this would be the "transform"
(take data, modify it for a later purpose), but I feel that "transform" is perhaps a misleading function name in this context.
Parameters
----------
capture : CaptureOrTimeSeries
Capture from which to extract data.
"""
try:
signal = capture.fractionalized()
except AttributeError:
signal = capture
else:
signal = capture
return signal
# signal = getattr(capture, Capture.fractionalized.__name__, capture)
def is_in_range(self, value: Union[NumpyArrayLike, float]) -> bool:
try:
# If the value is just a float, we can use this handy syntax:
return self.minimum <= value <= self.maximum
except ValueError:
# But we're not allowed to use that syntax on numpy arrays.
return all(np.logical_and(self.minimum <= value, value <= self.maximum))
def apply(self, signal):
value = self.extract(signal)
return self.is_in_range(value)
class StandardDeviationFilter(RangeFilter):
"""Filters for captures with standard deviations in some range."""
@classmethod
def name(cls) -> str:
return "stdv"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.std(signal)
class MeanFilter(RangeFilter):
"""Filters for captures with an arithmetic mean within a range."""
@classmethod
def name(cls) -> str:
return "mean"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.mean(signal)
class MedianFilter(RangeFilter):
"""Filters for captures with a median within a range."""
@classmethod
def name(cls) -> str:
return "median"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.median(signal)
class MinimumFilter(RangeFilter):
"""Filters for captures with a minimum within a range."""
@classmethod
def name(cls) -> str:
return "min"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.min(signal)
class MaximumFilter(RangeFilter):
"""Filters for captures with a maximum within a range."""
@classmethod
def name(cls) -> str:
return "max"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.max(signal)
class LengthFilter(RangeFilter):
"""Filters captures based on their length."""
@classmethod
def name(cls) -> str:
return "length"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return len(signal)
class EjectedFilter(FilterPlugin):
"""Filters captures based on whether they were ejected from the pore."""
@classmethod
def name(cls) -> str:
return "ejected"
def extract(self, capture: Capture):
return capture.ejected
"""
How to Create Your Own Custom Filter:
Need more advanced filtering than what we provide out of the box? No problem.
Create your own custom filter by inheriting from the FilterPlugin class.
For this example, let's do something complex. Say you only want to examine captures
that have more than 5 samples with a hyperbolic tangent greater than some threshold.
That means our custom filter's `apply` function should return True if and only if
the signal has more than 5 samples greater than the threshold, after taking the hyperbolic tangent in `extract`.
"""
class MyCustomFilter(FilterPlugin):
threshold: float = 0.5 # Totally arbitrary.
def name(self):
return "foo"
def extract(self, capture):
# Do the transformations here, or pre-process it before the filter.
# Gets the hyperbolic tangent of the signal.
extracted = np.tanh(capture.signal)
return extracted
def apply(self, signal):
# Only return true if more than 5 samples have a square root greater than 2.0 (arbitrary)
extracted = self.extract(signal)
# If we want to filter out signals with fewer than 5 matching samples, then we
# should retrun True when there are 5 or more matching samples.
n_meeting_threshold = len(
extracted[extracted > self.threshold]
) # Number of samples greater than the threshold
meets_criteria = (
n_meeting_threshold >= 5
) # Are there at least 5 samples meeting this threshold?
return meets_criteria
def apply_feature_filters(capture: CaptureOrTimeSeries, filters: List[FilterPlugin]) -> bool:
"""
Check whether an array of current values (i.e. a single nanopore capture)
passes a set of filters. Filters can be based on summary statistics
(e.g., mean) and/or a range of allowed values.
Notes on filter behavior: If the filters list is empty, there are no filters
and the capture passes.
Parameters
----------
capture : CaptureOrTimeSeries | NumpyArrayLike
Capture containing time series of nanopore current values for a single capture, or the signal itself.
filters : List[FilterPlugin]
List of FilterPlugin instances. Write your own filter by subclassing FilterPlugin.
Returns
-------
boolean
True if capture passes all filters; False otherwise.
"""
if filters is None:
filters = []
# TODO: Parallelize? https://github.com/uwmisl/poretitioner/issues/67
filtered = [filter_out(capture) for filter_out in filters]
print(filtered)
# Did this signal pass all filters?
all_passed = all(filtered)
return all_passed
def check_capture_ejection_by_read(f5, read_id):
"""Checks whether the current capture was in the pore until the voltage
was reversed.
Parameters
----------
f5 : h5py.File object (open for reading or more)
Capture fast5 file
read_id : TODO
Returns
-------
boolean
True if the end of the capture coincides with the end of a voltage window.
"""
try:
ejected = f5.get(f"/read_{read_id}/Signal").attrs["ejected"]
except AttributeError:
raise ValueError(f"path /read_{read_id} does not exist in the fast5 file.")
return ejected
def check_capture_ejection(end_capture, voltage_ends, tol_obs=20):
"""Checks whether the current capture was in the pore until the voltage
was reversed.
Essentially checks whether a value (end_capture) is close enough (within
a margin of tol_obs) to any value in voltage_ends.
Parameters
----------
end_capture : numeric
The end time of the capture.
voltage_ends : list of numeric
List of times when the standard voltage ends.
tol_obs : int, optional
Tolerance for defining when the end of the capture = voltage end, by default 20
Returns
-------
boolean
True if the end of the capture coincides with the end of a voltage window.
"""
for voltage_end in voltage_ends:
if np.abs(end_capture - voltage_end) < tol_obs:
return True
return False
def filter_like_existing(config, example_fast5, example_filter_path, fast5_files, new_filter_path):
# Filters a set of fast5 files exactly the same as an existing filter
# TODO : #68 : implement
raise NotImplementedError()
def get_filter_pass_path(filter_set_id, read_id):
return FILTER_PATH.filter_set_pass_path(filter_set_id)
__DEFAULT_FILTER_PLUGINS = [
MeanFilter,
StandardDeviationFilter,
MedianFilter,
MinimumFilter,
MaximumFilter,
LengthFilter,
]
DEFAULT_FILTER_PLUGINS = {
filter_plugin_class.name(): filter_plugin_class
for filter_plugin_class in __DEFAULT_FILTER_PLUGINS
}
class Filtering(Protocol):
"""Classes that adhere to the Filtering protocol
provide an 'apply' method to an input that returns True
if and only if the input passes its filter.
These are also callable, so calling a filter on an input
is functionally equivalent to calling its apply method.
"""
def __call__(self, *args, **kwargs) -> bool:
raise NotImplementedError("Filtering protocol hasn't implemented __call__ yet!")
def apply(self, *args, **kwargs) -> bool:
raise NotImplementedError("Filtering protocol hasn't implemented Apply yet!")
@dataclass
class Filter(Filtering):
"""A named filter that can be applied to some data.
You can use this filter by just calling it on some data.
my_signal = [1,2,3,4]
filter = Filter(...)
passed_filter: bool = filter(my_signal)
Parameters
----------
config : FilterConfig
A description of this filter's configuration (e.g. where it was loaded from).
plugin : FilterPlugin
The actual implementation of this filter.
We have this class defined with
"""
config: FilterConfig
plugin: FilterPlugin
def __call__(self, *args, **kwargs) -> bool:
return self.plugin(*args, **kwargs)
def apply(self, *args, **kwargs) -> bool:
return self.plugin.apply(*args, **kwargs)
@property
def name(self) -> FilterName:
return FilterName(self.plugin.__class__.name())
def as_attr(self) -> Dict[str, Any]:
name = self.name
attrs = {**vars(self.config), **vars(self.plugin), name: name}
return attrs
def from_attr(self, attr) -> IsAttr:
...
import json
@dataclass
class HDF5_FilterSerialable(Filter, HDF5_GroupSerialableDataclass):
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
log = log if log is not None else getLogger()
# Note: This line simply registers a group with the name 'name' in the parent group.
this_group = HDF5_Group(parent_group.require_group(self.name))
all_attrs = {**self.config, **vars(self.plugin)}
this_group.create_attrs(all_attrs)
# Implementers must now write their serialized instance to this group.
return this_group
@classmethod
def from_group(
cls, group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerialableDataclass:
# You see, the trouble is, in the above 'as_group' call, we lumped together
# all the attributes of the FilterConfig and the FilterPlugin, not knowing
# which attributes belonged to which class.
#
# Now, here in `from_group`, it's time to pay the piper and figure out which attribute
# goes where to create a new Filter instance.
#
# This is likely achievable through the plugin architecture, since the plugin's
# name is unique, we can try to find a plugin with a given name, then get its attributes from there.
# Load
log.warning("Filter.from_group not implemented...It's a whole thing (see comment)")
# This is pure <NAME>.
return super().from_group(group, log)
# class Filters(HDF5_GroupSerialableDataclass):
# filters:
Filters = Dict[FilterName, Filter]
def get_filters(filter_configs: Optional[FilterConfigs] = None) -> Filters:
"""Creates Filters from a list of filter configurations.
Parameters
----------
filter_configs : Optional[FilterConfigs]
A mapping of filter names to their configurations, None by default (i.e. no filtering).
Returns
-------
Filters
A set of callable/applyable filters.
"""
filter_configs = filter_configs if filter_configs is not None else FilterConfigs({})
my_filters = {
name: filter_from_config(name, filter_config)
for name, filter_config in filter_configs.items()
}
return my_filters
def does_pass_filters(capture: CaptureOrTimeSeries, filters: Iterable[Filter]) -> bool:
"""
Check whether an array of values (e.g. a single nanopore capture)
passes a set of filters. Filters can be based on summary statistics
(e.g., mean) and/or a range of allowed values.
Parameters
----------
capture : CaptureOrTimeSeries | NumpyArrayLike
Capture containing time series of nanopore current values for a single capture, or the signal itself.
filters : Iterable[Filter]
The set of filters to apply. Write your own filter by subclassing FilterPlugin.
Returns
-------
boolean
True if capture passes all filters; False otherwise.
"""
all_passed = True
for some_filter in filters:
if not some_filter(capture):
return False
return all_passed
@dataclass(frozen=True)
class FilterSetProtocol(Filtering, Protocol):
filter_set_id: FilterSetId
filters: Filters
@classmethod
def from_filter_configs(cls, name: FilterSetId, filter_configs: FilterConfigs = None):
...
@dataclass(frozen=True, init=False)
class FilterSet(FilterSetProtocol):
"""
A collection of filters with a name for easy
identification. Essentially a mapping of filter names to their implementations.
"""
def validate(self):
raise NotImplementedError("Implement validation for filters!")
def __init__(self, filter_set_id: FilterSetId, filters: Filters) -> None:
filterset = super().__init__(self)
object.__setattr__(self, "filter_set_id", filter_set_id)
object.__setattr__(self, "filters", filters)
# self.name = name
# self.filters = filters
############################
#
# FilterSetProtocol
#
############################
@classmethod
def from_filter_configs(cls, name: FilterSetId, filter_configs: FilterConfigs = None):
filters: Filters = get_filters(filter_configs)
filter_set = cls.__new__(cls, name, filters)
filter_set.__init__(name, filters)
return filter_set
def apply(self, capture: CaptureOrTimeSeries) -> bool:
return does_pass_filters(capture, self.filters.values())
def __call__(self, capture: CaptureOrTimeSeries) -> bool:
return self.apply(capture)
class HDF5_FilterSet(FilterSet, HDF5_GroupSerialableDataclass):
def __init__(self, filter_set: FilterSet) -> None:
self._filterset = filter_set
############################
#
# HDF5_GroupSerializable
#
############################
def name(self):
return self._filterset.filter_set_id
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
filter_set_group = parent_group.require_group(self.name())
for name, filter_t in self._filterset.filters.items():
hdf5_filter = HDF5_FilterSerialable(filter_t.config, filter_t.plugin)
hdf5_filter.as_group(filter_set_group)
return HDF5_Group(filter_set_group)
# @classmethod
# def from_group(
# cls, group: HDF5_Group, log: Optional[Logger] = None
# ) -> HDF5_GroupSerializable:
# raise NotImplementedError(
# f"from_group not implemented for {cls.__name__}. Make sure you write a method that returns a serialzied version of this object."
# )
def filter_from_config(name: str, config: FilterConfig, log: Logger = getLogger()) -> Filter:
"""Creates a Filter from a config spefication. If no "filename" is present in the FilterConfig, it's
assumed to be one of the default filtesr
Parameters
----------
name : str
The unique name of a filter.
config : FilterConfig
Filter configuration to build the plugin.
log : Logger, optional
Logger to use for information/warnings/debug, by default getLogger()
Returns
-------
Filter
A filter that can be applied to some data.
Raises
------
AttributeError
A filter plugin could not be built from the configuration description. If this error is raised, be sure to check
1) A plugin class with the name in the configuration is defined at the filepath described in the configuration
2) The plugin class inherits from the `FilterPlugin` abstract base class.
"""
filepath = config.get("filepath", None)
# TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
plugin = None
if name in DEFAULT_FILTER_PLUGINS:
plugin = DEFAULT_FILTER_PLUGINS[name]()
else:
# TODO: For non-default FilterPlugins, load the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
plugin = plugin_from_file(name, filepath)
pass
# Make sure any plugin attributes defined in the config are moved over to the plugin instance.
try:
# Here, we take care of setting whatever attributes the plugin config defines on the new plugin instance.
for key, value in config.items():
object.__setattr__(plugin, key, value)
except AttributeError as e:
log.warning(
"""
Uh oh, couldn't find plugin '{name}'. Are you sure:
1) A plugin class with the name '{name}' is defined in the file {filepath}?
2) That plugin class inherits from `FilterPlugin`?
"""
)
raise e
my_filter = Filter(config, plugin)
return my_filter
def plugin_from_file(name: str, filepath: PathLikeOrString):
"""[summary]
Parameters
----------
name : str
[description]
filepath : PathLikeOrString
[description]
Returns
-------
[type]
[description]
Raises
------
NotImplementedError
[description]
"""
# TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
raise NotImplementedError(
"Plugin from file has not been implemented! This method should take in a filepath and filter name, and return a runnable FilterPlugin!"
)
| [
"numpy.mean",
"numpy.abs",
"numpy.median",
"numpy.logical_and",
"pathlib.PosixPath",
"dataclasses.dataclass",
"numpy.min",
"typing.NewType",
"numpy.max",
"numpy.tanh",
"numpy.std"
]
| [((1019, 1046), 'typing.NewType', 'NewType', (['"""FilterSetId"""', 'str'], {}), "('FilterSetId', str)\n", (1026, 1046), False, 'from typing import Any, Dict, Iterable, Mapping, NewType, Optional, Protocol, Type, TypedDict, Union\n'), ((1124, 1150), 'typing.NewType', 'NewType', (['"""FilterName"""', 'str'], {}), "('FilterName', str)\n", (1131, 1150), False, 'from typing import Any, Dict, Iterable, Mapping, NewType, Optional, Protocol, Type, TypedDict, Union\n'), ((1400, 1422), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1409, 1422), False, 'from dataclasses import dataclass\n'), ((2568, 2624), 'typing.NewType', 'NewType', (['"""FilterConfigs"""', 'Dict[FilterName, FilterConfig]'], {}), "('FilterConfigs', Dict[FilterName, FilterConfig])\n", (2575, 2624), False, 'from typing import Any, Dict, Iterable, Mapping, NewType, Optional, Protocol, Type, TypedDict, Union\n'), ((19419, 19441), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (19428, 19441), False, 'from dataclasses import dataclass\n'), ((19664, 19698), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'init': '(False)'}), '(frozen=True, init=False)\n', (19673, 19698), False, 'from dataclasses import dataclass\n'), ((8107, 8121), 'numpy.std', 'np.std', (['signal'], {}), '(signal)\n', (8113, 8121), True, 'import numpy as np\n'), ((8403, 8418), 'numpy.mean', 'np.mean', (['signal'], {}), '(signal)\n', (8410, 8418), True, 'import numpy as np\n'), ((8694, 8711), 'numpy.median', 'np.median', (['signal'], {}), '(signal)\n', (8703, 8711), True, 'import numpy as np\n'), ((8986, 9000), 'numpy.min', 'np.min', (['signal'], {}), '(signal)\n', (8992, 9000), True, 'import numpy as np\n'), ((9275, 9289), 'numpy.max', 'np.max', (['signal'], {}), '(signal)\n', (9281, 9289), True, 'import numpy as np\n'), ((10700, 10723), 'numpy.tanh', 'np.tanh', (['capture.signal'], {}), '(capture.signal)\n', (10707, 10723), True, 'import numpy as np\n'), ((1574, 1616), 'pathlib.PosixPath', 'PosixPath', (['FILTER_PATH.ROOT', 'filter_set_id'], {}), '(FILTER_PATH.ROOT, filter_set_id)\n', (1583, 1616), False, 'from pathlib import PosixPath\n'), ((13897, 13930), 'numpy.abs', 'np.abs', (['(end_capture - voltage_end)'], {}), '(end_capture - voltage_end)\n', (13903, 13930), True, 'import numpy as np\n'), ((7645, 7705), 'numpy.logical_and', 'np.logical_and', (['(self.minimum <= value)', '(value <= self.maximum)'], {}), '(self.minimum <= value, value <= self.maximum)\n', (7659, 7705), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for Interrogate."""
import socket
from grr.client import vfs
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact_test
from grr.lib import client_index
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import search
from grr.lib import test_lib
class DiscoveryTestEventListener(flow.EventListener):
"""A test listener to receive new client discoveries."""
well_known_session_id = rdfvalue.SessionID(flow_name="discovery_test")
EVENTS = ["Discovery"]
# For this test we just write the event as a class attribute.
event = None
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
_ = message
DiscoveryTestEventListener.event = event
class TestClientInterrogate(artifact_test.ArtifactTest):
"""Test the interrogate flow."""
def _CheckUsers(self, all_users):
"""Check all user stores."""
summary = self.fd.GetSummary()
self.assertItemsEqual([x.username for x in summary.users], all_users)
users = [x.username for x in self.fd.Get(self.fd.Schema.USER)]
self.assertItemsEqual(users, all_users)
self.assertItemsEqual(self.fd.Get(self.fd.Schema.USERNAMES), all_users)
# Check kb users
kbusers = [x.username for x in
self.fd.Get(self.fd.Schema.KNOWLEDGE_BASE).users]
self.assertItemsEqual(kbusers, all_users)
def _CheckAFF4Object(self, hostname, system, install_date):
self.assertEqual(self.fd.Get(self.fd.Schema.HOSTNAME), hostname)
self.assertEqual(self.fd.Get(self.fd.Schema.SYSTEM), system)
self.assertEqual(self.fd.Get(self.fd.Schema.INSTALL_DATE), install_date)
def _CheckClientInfo(self):
info = self.fd.Get(self.fd.Schema.CLIENT_INFO)
self.assertEqual(info.client_name, config_lib.CONFIG["Client.name"])
self.assertEqual(info.client_version,
int(config_lib.CONFIG["Client.version_numeric"]))
self.assertEqual(info.build_time, config_lib.CONFIG["Client.build_time"])
def _CheckGRRConfig(self):
"""Check old and new client config."""
config_info = self.fd.Get(self.fd.Schema.GRR_CONFIGURATION)
self.assertEqual(config_info["Client.control_urls"],
["http://localhost:8001/control"])
self.assertEqual(config_info["Client.poll_min"], 1.0)
def _CheckClientIndex(self, host_pattern):
"""Check that the index has been updated."""
index_fd = aff4.FACTORY.Create(self.fd.Schema.client_index, "AFF4Index",
mode="r", token=self.token)
self.assertEqual(
[self.fd.urn],
[x for x in index_fd.Query([self.fd.Schema.HOSTNAME], host_pattern)])
def _CheckClientKwIndex(self, keywords, expected_count):
# Tests that the client index has expected_count results when
# searched for keywords.
index = aff4.FACTORY.Create(client_index.MAIN_INDEX,
aff4_type="ClientIndex",
mode="rw",
token=self.token)
self.assertEqual(len(index.LookupClients(keywords)),
expected_count)
def _CheckNotificationsCreated(self):
user_fd = aff4.FACTORY.Open("aff4:/users/test", token=self.token)
notifications = user_fd.Get(user_fd.Schema.PENDING_NOTIFICATIONS)
self.assertEqual(len(notifications), 1)
notification = notifications[0]
self.assertEqual(notification.subject, rdfvalue.RDFURN(self.client_id))
def _CheckClientSummary(self, osname, version, kernel="3.13.0-39-generic",
release="5"):
summary = self.fd.GetSummary()
self.assertEqual(summary.client_info.client_name,
config_lib.CONFIG["Client.name"])
self.assertEqual(summary.client_info.client_version,
int(config_lib.CONFIG["Client.version_numeric"]))
self.assertEqual(summary.client_info.build_time,
config_lib.CONFIG["Client.build_time"])
self.assertEqual(summary.system_info.system, osname)
self.assertEqual(summary.system_info.node, "test_node")
self.assertEqual(summary.system_info.release, release)
self.assertEqual(summary.system_info.version, version)
self.assertEqual(summary.system_info.machine, "i386")
self.assertEqual(summary.system_info.kernel, kernel)
self.assertEqual(len(summary.interfaces), 1)
self.assertEqual(summary.interfaces[0].mac_address, "123456")
# Check that the client summary was published to the event listener.
self.assertEqual(DiscoveryTestEventListener.event.client_id, self.client_id)
self.assertEqual(
DiscoveryTestEventListener.event.interfaces[0].mac_address,
"123456")
def _CheckNetworkInfo(self):
net_fd = self.fd.OpenMember("network")
interfaces = list(net_fd.Get(net_fd.Schema.INTERFACES))
self.assertEqual(interfaces[0].mac_address, "123456")
self.assertEqual(interfaces[0].addresses[0].human_readable, "192.168.127.12")
self.assertEqual(socket.inet_ntoa(interfaces[0].addresses[0].packed_bytes),
"192.168.127.12")
# Mac addresses should be available as hex for searching
mac_addresses = self.fd.Get(self.fd.Schema.MAC_ADDRESS)
self.assertTrue("123456".encode("hex") in str(mac_addresses))
# Same for IP addresses.
ip_addresses = self.fd.Get(self.fd.Schema.HOST_IPS)
self.assertTrue("192.168.127.12" in str(ip_addresses))
def _CheckVFS(self):
# Check that virtual directories exist for the mount points
fd = aff4.FACTORY.Open(self.client_id.Add("fs/os/mnt/data"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
fd = aff4.FACTORY.Open(self.client_id.Add("fs/tsk/dev/sda"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
fd = aff4.FACTORY.Open(self.client_id.Add("devices/dev/sda"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
def _CheckLabelIndex(self):
"""Check that label indexes are updated."""
self.assertEqual(
list(search.SearchClients("label:Label2", token=self.token)),
[self.client_id])
def _CheckWindowsDiskInfo(self):
client = aff4.FACTORY.Open(self.client_id, token=self.token)
volumes = client.Get(client.Schema.VOLUMES)
self.assertEqual(len(volumes), 2)
for result in volumes:
self.assertTrue(isinstance(result, rdfvalue.Volume))
self.assertTrue(result.windows.drive_letter in ["Z:", "C:"])
def _CheckRegistryPathspec(self):
# This tests that we can click refresh on a key in the registry vfs subtree
# even if we haven't downloaded any other key above it in the tree.
fd = aff4.FACTORY.Open(self.client_id.Add("registry").Add(
"HKEY_LOCAL_MACHINE").Add("random/path/bla"), token=self.token)
pathspec = fd.real_pathspec
self.assertEqual(pathspec.pathtype, rdfvalue.PathSpec.PathType.REGISTRY)
self.assertEqual(pathspec.CollapsePath(),
u"/HKEY_LOCAL_MACHINE/random/path/bla")
def _CheckRelease(self, desired_release, desired_version):
# Test for correct Linux release override behaviour.
client = aff4.FACTORY.Open(self.client_id, token=self.token)
release = str(client.Get(client.Schema.OS_RELEASE))
version = str(client.Get(client.Schema.OS_VERSION))
self.assertEqual(release, desired_release)
self.assertEqual(version, desired_version)
def testInterrogateLinuxWithWtmp(self):
"""Test the Interrogate flow."""
test_lib.ClientFixture(self.client_id, token=self.token)
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeTestDataVFSHandler
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["LinuxWtmp",
"NetgroupConfiguration",
"LinuxRelease"])
config_lib.CONFIG.Set("Artifacts.netgroup_filter_regexes", [r"^login$"])
self.SetLinuxClient()
client_mock = action_mocks.InterrogatedClient("TransferBuffer", "StatFile",
"Find", "HashBuffer",
"ListDirectory",
"FingerprintFile")
client_mock.InitializeClient()
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self._CheckAFF4Object("test_node", "Linux", 100 * 1000000)
self._CheckClientInfo()
self._CheckClientIndex(".*test.*")
self._CheckGRRConfig()
self._CheckNotificationsCreated()
self._CheckClientSummary("Linux", "14.4", release="Ubuntu",
kernel="3.13.0-39-generic")
self._CheckRelease("Ubuntu", "14.4")
# users 1,2,3 from wtmp
# users yagharek, isaac from netgroup
self._CheckUsers(["yagharek", "isaac", "user1", "user2", "user3"])
self._CheckNetworkInfo()
self._CheckVFS()
self._CheckLabelIndex()
self._CheckClientKwIndex(["Linux"], 1)
self._CheckClientKwIndex(["Label2"], 1)
def testInterrogateWindows(self):
"""Test the Interrogate flow."""
test_lib.ClientFixture(self.client_id, token=self.token)
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
client_mock = action_mocks.InterrogatedClient("TransferBuffer", "StatFile",
"Find", "HashBuffer",
"ListDirectory",
"FingerprintFile")
self.SetWindowsClient()
client_mock.InitializeClient(system="Windows", version="6.1.7600",
kernel="6.1.7601")
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self._CheckAFF4Object("test_node", "Windows", 100 * 1000000)
self._CheckClientInfo()
self._CheckClientIndex(".*Host.*")
self._CheckGRRConfig()
self._CheckNotificationsCreated()
self._CheckClientSummary("Windows", "6.1.7600", kernel="6.1.7601")
# users Bert and Ernie added by the fixture should not be present (USERS
# overriden by kb)
# jim parsed from registry profile keys
self._CheckUsers(["jim", "kovacs"])
self._CheckNetworkInfo()
self._CheckVFS()
self._CheckLabelIndex()
self._CheckWindowsDiskInfo()
self._CheckRegistryPathspec()
self._CheckClientKwIndex(["Linux"], 0)
self._CheckClientKwIndex(["Windows"], 1)
self._CheckClientKwIndex(["Label2"], 1)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| [
"grr.lib.flags.StartMain",
"grr.lib.test_lib.GrrTestProgram",
"grr.lib.rdfvalue.RDFURN",
"grr.lib.aff4.FACTORY.Open",
"grr.lib.test_lib.ClientFixture",
"grr.lib.test_lib.TestFlowHelper",
"grr.lib.flow.EventHandler",
"grr.lib.config_lib.CONFIG.Set",
"grr.lib.rdfvalue.SessionID",
"grr.lib.search.SearchClients",
"grr.lib.aff4.FACTORY.Create",
"socket.inet_ntoa",
"grr.lib.action_mocks.InterrogatedClient"
]
| [((568, 614), 'grr.lib.rdfvalue.SessionID', 'rdfvalue.SessionID', ([], {'flow_name': '"""discovery_test"""'}), "(flow_name='discovery_test')\n", (586, 614), False, 'from grr.lib import rdfvalue\n'), ((724, 761), 'grr.lib.flow.EventHandler', 'flow.EventHandler', ([], {'auth_required': '(True)'}), '(auth_required=True)\n', (741, 761), False, 'from grr.lib import flow\n'), ((11492, 11526), 'grr.lib.test_lib.GrrTestProgram', 'test_lib.GrrTestProgram', ([], {'argv': 'argv'}), '(argv=argv)\n', (11515, 11526), False, 'from grr.lib import test_lib\n'), ((11557, 11578), 'grr.lib.flags.StartMain', 'flags.StartMain', (['main'], {}), '(main)\n', (11572, 11578), False, 'from grr.lib import flags\n'), ((2544, 2637), 'grr.lib.aff4.FACTORY.Create', 'aff4.FACTORY.Create', (['self.fd.Schema.client_index', '"""AFF4Index"""'], {'mode': '"""r"""', 'token': 'self.token'}), "(self.fd.Schema.client_index, 'AFF4Index', mode='r',\n token=self.token)\n", (2563, 2637), False, 'from grr.lib import aff4\n'), ((2960, 3063), 'grr.lib.aff4.FACTORY.Create', 'aff4.FACTORY.Create', (['client_index.MAIN_INDEX'], {'aff4_type': '"""ClientIndex"""', 'mode': '"""rw"""', 'token': 'self.token'}), "(client_index.MAIN_INDEX, aff4_type='ClientIndex', mode=\n 'rw', token=self.token)\n", (2979, 3063), False, 'from grr.lib import aff4\n'), ((3304, 3359), 'grr.lib.aff4.FACTORY.Open', 'aff4.FACTORY.Open', (['"""aff4:/users/test"""'], {'token': 'self.token'}), "('aff4:/users/test', token=self.token)\n", (3321, 3359), False, 'from grr.lib import aff4\n'), ((6606, 6657), 'grr.lib.aff4.FACTORY.Open', 'aff4.FACTORY.Open', (['self.client_id'], {'token': 'self.token'}), '(self.client_id, token=self.token)\n', (6623, 6657), False, 'from grr.lib import aff4\n'), ((7571, 7622), 'grr.lib.aff4.FACTORY.Open', 'aff4.FACTORY.Open', (['self.client_id'], {'token': 'self.token'}), '(self.client_id, token=self.token)\n', (7588, 7622), False, 'from grr.lib import aff4\n'), ((7914, 7970), 'grr.lib.test_lib.ClientFixture', 'test_lib.ClientFixture', (['self.client_id'], {'token': 'self.token'}), '(self.client_id, token=self.token)\n', (7936, 7970), False, 'from grr.lib import test_lib\n'), ((8072, 8181), 'grr.lib.config_lib.CONFIG.Set', 'config_lib.CONFIG.Set', (['"""Artifacts.knowledge_base"""', "['LinuxWtmp', 'NetgroupConfiguration', 'LinuxRelease']"], {}), "('Artifacts.knowledge_base', ['LinuxWtmp',\n 'NetgroupConfiguration', 'LinuxRelease'])\n", (8093, 8181), False, 'from grr.lib import config_lib\n'), ((8292, 8363), 'grr.lib.config_lib.CONFIG.Set', 'config_lib.CONFIG.Set', (['"""Artifacts.netgroup_filter_regexes"""', "['^login$']"], {}), "('Artifacts.netgroup_filter_regexes', ['^login$'])\n", (8313, 8363), False, 'from grr.lib import config_lib\n'), ((8409, 8532), 'grr.lib.action_mocks.InterrogatedClient', 'action_mocks.InterrogatedClient', (['"""TransferBuffer"""', '"""StatFile"""', '"""Find"""', '"""HashBuffer"""', '"""ListDirectory"""', '"""FingerprintFile"""'], {}), "('TransferBuffer', 'StatFile', 'Find',\n 'HashBuffer', 'ListDirectory', 'FingerprintFile')\n", (8440, 8532), False, 'from grr.lib import action_mocks\n'), ((8728, 8827), 'grr.lib.test_lib.TestFlowHelper', 'test_lib.TestFlowHelper', (['"""Interrogate"""', 'client_mock'], {'token': 'self.token', 'client_id': 'self.client_id'}), "('Interrogate', client_mock, token=self.token,\n client_id=self.client_id)\n", (8751, 8827), False, 'from grr.lib import test_lib\n'), ((8925, 8976), 'grr.lib.aff4.FACTORY.Open', 'aff4.FACTORY.Open', (['self.client_id'], {'token': 'self.token'}), '(self.client_id, token=self.token)\n', (8942, 8976), False, 'from grr.lib import aff4\n'), ((9720, 9776), 'grr.lib.test_lib.ClientFixture', 'test_lib.ClientFixture', (['self.client_id'], {'token': 'self.token'}), '(self.client_id, token=self.token)\n', (9742, 9776), False, 'from grr.lib import test_lib\n'), ((9989, 10112), 'grr.lib.action_mocks.InterrogatedClient', 'action_mocks.InterrogatedClient', (['"""TransferBuffer"""', '"""StatFile"""', '"""Find"""', '"""HashBuffer"""', '"""ListDirectory"""', '"""FingerprintFile"""'], {}), "('TransferBuffer', 'StatFile', 'Find',\n 'HashBuffer', 'ListDirectory', 'FingerprintFile')\n", (10020, 10112), False, 'from grr.lib import action_mocks\n'), ((10465, 10564), 'grr.lib.test_lib.TestFlowHelper', 'test_lib.TestFlowHelper', (['"""Interrogate"""', 'client_mock'], {'token': 'self.token', 'client_id': 'self.client_id'}), "('Interrogate', client_mock, token=self.token,\n client_id=self.client_id)\n", (10488, 10564), False, 'from grr.lib import test_lib\n'), ((10662, 10713), 'grr.lib.aff4.FACTORY.Open', 'aff4.FACTORY.Open', (['self.client_id'], {'token': 'self.token'}), '(self.client_id, token=self.token)\n', (10679, 10713), False, 'from grr.lib import aff4\n'), ((3555, 3586), 'grr.lib.rdfvalue.RDFURN', 'rdfvalue.RDFURN', (['self.client_id'], {}), '(self.client_id)\n', (3570, 3586), False, 'from grr.lib import rdfvalue\n'), ((5118, 5175), 'socket.inet_ntoa', 'socket.inet_ntoa', (['interfaces[0].addresses[0].packed_bytes'], {}), '(interfaces[0].addresses[0].packed_bytes)\n', (5134, 5175), False, 'import socket\n'), ((6474, 6528), 'grr.lib.search.SearchClients', 'search.SearchClients', (['"""label:Label2"""'], {'token': 'self.token'}), "('label:Label2', token=self.token)\n", (6494, 6528), False, 'from grr.lib import search\n')] |
from armstrong.dev.tests.utils import ArmstrongTestCase
import random
def random_range():
# TODO: make sure this can only be generated once
return range(random.randint(1000, 2000))
class HatbandTestCase(ArmstrongTestCase):
pass
class HatbandTestMixin(object):
script_code = """
<script type="text/javascript" src="/static/ckeditor/ckeditor.js"></script>
""".strip()
textarea_code = 'class="ckeditor"></textarea>'
def assertCkEditorPresent(self, response):
self.assertContains(response, self.script_code)
self.assertContains(response, self.textarea_code)
def assertCkEditorNotPresent(self, response):
self.assertNotContains(response, self.script_code)
self.assertNotContains(response, self.textarea_code)
| [
"random.randint"
]
| [((163, 189), 'random.randint', 'random.randint', (['(1000)', '(2000)'], {}), '(1000, 2000)\n', (177, 189), False, 'import random\n')] |
import unittest
from logics.classes.propositional import Inference, Formula
from logics.classes.propositional.proof_theories import NaturalDeductionStep, NaturalDeductionRule
from logics.utils.parsers import classical_parser
from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system
class TestClassicalNaturalDeductionSystem(unittest.TestCase):
def test_natural_deduction_rule(self):
"""Test overriding of index and len methods in NaturalDeductionRule"""
rule = NaturalDeductionRule([
'(...)',
NaturalDeductionStep(Formula(['→', ['A'], ['B']])),
'(...)',
NaturalDeductionStep(Formula(['B']), 'E→', [0, 1])
])
self.assertEqual(rule.index(NaturalDeductionStep(Formula(['B']), 'E→', [0, 1])), 1)
self.assertEqual(len(rule), 2)
def test_nd_system(self):
"""Test the method that tells if a step is a correct application of a rule"""
# A correct derivation
deriv = classical_parser.parse_derivation(
"""p; premise
(p → q); premise
q; E→; [1, 0]; []
p ∧ q; I∧; [0, 2]; []""",
natural_deduction=True)
# Check is application of the correct rule, and a different rule
self.assertTrue(nd_system.is_correct_application(deriv, 2, nd_system.rules['E→']))
self.assertFalse(nd_system.is_correct_application(deriv, 2, nd_system.rules['E∧2']))
self.assertTrue(nd_system.is_correct_application(deriv, 3, nd_system.rules['I∧']))
self.assertFalse(nd_system.is_correct_application(deriv, 3, nd_system.rules['E→']))
# Check is correct derivation of the correct and an incorrect inference
i = Inference([Formula(['p']), Formula(['→', ['p'], ['q']])],
[Formula(['∧', ['p'], ['q']])])
self.assertTrue(nd_system.is_correct_derivation(deriv, i))
i2 = Inference([Formula(['p']), Formula(['→', ['p'], ['q']])],
[Formula(['∧', ['q'], ['p']])])
self.assertFalse(nd_system.is_correct_derivation(deriv, i2))
# Repeating steps should not alter the outcome (should print a warning)
# deriv2_0 = classical_parser.parse_derivation(
# """p; supposition; []; [0]
# p; repetition; [0, 0]; [0]""",
# natural_deduction=True)
# self.assertTrue(nd_system.is_correct_application(deriv2_0, 1, nd_system.rules['repetition']))
# Test step in the future
deriv2_1 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [1]; [0]""",
natural_deduction=True)
deriv2_2 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [2]; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv2_1, 1, nd_system.rules['repetition']))
self.assertFalse(nd_system.is_correct_application(deriv2_2, 1, nd_system.rules['repetition']))
# -------------------------------------------------
# Test incorrect use of suppositions
# Using a step in a closed supposition
deriv3_1 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [0]; [0]
(p → p); I→; [0, 1]; []
p; E→; [2, 0]; []""",
natural_deduction=True)
# Check correct application of rep and I→
self.assertTrue(nd_system.is_correct_application(deriv3_1, 1, nd_system.rules['repetition']))
self.assertTrue(nd_system.is_correct_application(deriv3_1, 2, nd_system.rules['I→']))
self.assertFalse(nd_system.is_correct_application(deriv3_1, 3, nd_system.rules['E→']))
# Closing a supposition with a rule that does not close
deriv3_2 = classical_parser.parse_derivation('''
p; premise
p; supposition; []; [1]
p; repetition; [0]; [1]
(p ∨ q); I∨1; [0]; []''',
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_2, 3, nd_system.rules['I∨1']))
# Closing two suppositions at once
deriv3_3 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; supposition; [0]; [0, 1]
(p → p); I→; [0, 1]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_3, 2, nd_system.rules['I→']))
# Not closing a supposition with a rule that does close
deriv3_4 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [0]; [0]
(p → p); I→; [0, 1]; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_4, 2, nd_system.rules['I→']))
# Incorrect opening of suppositions
deriv3_5 = classical_parser.parse_derivation(
"""p; supposition; []; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv3_5, None))
deriv3_6 = classical_parser.parse_derivation(
"""p; premise; []; []
q; supposition; []; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv3_6, None))
# -------------------------------------------------
# A correct derivation using all the rules
deriv4 = classical_parser.parse_derivation(
"""q; premise; []; []
~q; supposition; []; [1]
~q; repetition; [1]; [1]
(q ∧ ~q); I∧; [0, 2]; [1]
q; E∧1; [3]; [1]
⊥; E~; [1, 4]; [1]
p; EFSQ; [5]; [1]
⊥; repetition; [5]; [1]
~~q; I~; [1, 7]; []
q; ~~; [8]; []
q; supposition; []; [10]
q; repetition; [10]; [10]
(q → q); I→; [10, 11]; []
q; E→; [12, 9]; []
(q ∨ p); I∨1; [13]; []
(p → q); premise; []; []
q; E∨; [14, 12, 15]; []
""", natural_deduction=True)
i3 = Inference([Formula(['q']), Formula(['→', ['p'], ['q']])],
[Formula(['q'])])
self.assertTrue(nd_system.is_correct_derivation(deriv4, i3))
def test_rule_order(self):
# i1 is conjunction introduction
i1 = Inference([Formula(['p']), Formula(['q'])],
[Formula(['∧', ['p'], ['q']])])
# First derivation: standard one
deriv1_1 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(p ∧ q); I∧; [0, 1]; []""",
natural_deduction=True)
self.assertTrue(nd_system.is_correct_derivation(deriv1_1, i1))
# Second derivation: reverse on_steps order
deriv1_2 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(p ∧ q); I∧; [1, 0]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv1_2, i1))
i2 = Inference([Formula(['p']), Formula(['q'])],
[Formula(['∧', ['q'], ['p']])])
# Third derivation: reverse the conjuncts
deriv2_1 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(q ∧ p); I∧; [1, 0]; []""",
natural_deduction=True)
self.assertTrue(nd_system.is_correct_derivation(deriv2_1, i2))
# Fourth derivation: reverse the conjuncts and the on_steps
deriv2_2 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(q ∧ p); I∧; [0, 1]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv2_2, i2))
if __name__ == '__main__':
unittest.main()
| [
"logics.classes.propositional.Formula",
"logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application",
"unittest.main",
"logics.utils.parsers.classical_parser.parse_derivation",
"logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_derivation"
]
| [((7981, 7996), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7994, 7996), False, 'import unittest\n'), ((1035, 1213), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; premise\n (p → q); premise\n q; E→; [1, 0]; []\n p ∧ q; I∧; [0, 2]; []"""'], {'natural_deduction': '(True)'}), '(\n """p; premise\n (p → q); premise\n q; E→; [1, 0]; []\n p ∧ q; I∧; [0, 2]; []"""\n , natural_deduction=True)\n', (1068, 1213), False, 'from logics.utils.parsers import classical_parser\n'), ((2555, 2688), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; supposition; []; [0]\n p; repetition; [1]; [0]"""'], {'natural_deduction': '(True)'}), '(\n """p; supposition; []; [0]\n p; repetition; [1]; [0]""",\n natural_deduction=True)\n', (2588, 2688), False, 'from logics.utils.parsers import classical_parser\n'), ((2724, 2857), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; supposition; []; [0]\n p; repetition; [2]; [0]"""'], {'natural_deduction': '(True)'}), '(\n """p; supposition; []; [0]\n p; repetition; [2]; [0]""",\n natural_deduction=True)\n', (2757, 2857), False, 'from logics.utils.parsers import classical_parser\n'), ((3253, 3453), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; supposition; []; [0]\n p; repetition; [0]; [0]\n (p → p); I→; [0, 1]; []\n p; E→; [2, 0]; []"""'], {'natural_deduction': '(True)'}), '(\n """p; supposition; []; [0]\n p; repetition; [0]; [0]\n (p → p); I→; [0, 1]; []\n p; E→; [2, 0]; []"""\n , natural_deduction=True)\n', (3286, 3453), False, 'from logics.utils.parsers import classical_parser\n'), ((3894, 4098), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""\n p; premise\n p; supposition; []; [1]\n p; repetition; [0]; [1]\n (p ∨ q); I∨1; [0]; []"""'], {'natural_deduction': '(True)'}), '(\n """\n p; premise\n p; supposition; []; [1]\n p; repetition; [0]; [1]\n (p ∨ q); I∨1; [0]; []"""\n , natural_deduction=True)\n', (3927, 4098), False, 'from logics.utils.parsers import classical_parser\n'), ((4260, 4434), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; supposition; []; [0]\n p; supposition; [0]; [0, 1]\n (p → p); I→; [0, 1]; []"""'], {'natural_deduction': '(True)'}), '(\n """p; supposition; []; [0]\n p; supposition; [0]; [0, 1]\n (p → p); I→; [0, 1]; []"""\n , natural_deduction=True)\n', (4293, 4434), False, 'from logics.utils.parsers import classical_parser\n'), ((4629, 4800), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; supposition; []; [0]\n p; repetition; [0]; [0]\n (p → p); I→; [0, 1]; [0]"""'], {'natural_deduction': '(True)'}), '(\n """p; supposition; []; [0]\n p; repetition; [0]; [0]\n (p → p); I→; [0, 1]; [0]"""\n , natural_deduction=True)\n', (4662, 4800), False, 'from logics.utils.parsers import classical_parser\n'), ((4975, 5062), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; supposition; []; []"""'], {'natural_deduction': '(True)'}), "('p; supposition; []; []',\n natural_deduction=True)\n", (5008, 5062), False, 'from logics.utils.parsers import classical_parser\n'), ((5181, 5309), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; premise; []; []\n q; supposition; []; [0]"""'], {'natural_deduction': '(True)'}), '(\n """p; premise; []; []\n q; supposition; []; [0]""",\n natural_deduction=True)\n', (5214, 5309), False, 'from logics.utils.parsers import classical_parser\n'), ((5530, 6185), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""q; premise; []; []\n ~q; supposition; []; [1]\n ~q; repetition; [1]; [1]\n (q ∧ ~q); I∧; [0, 2]; [1]\n q; E∧1; [3]; [1]\n ⊥; E~; [1, 4]; [1]\n p; EFSQ; [5]; [1]\n ⊥; repetition; [5]; [1]\n ~~q; I~; [1, 7]; []\n q; ~~; [8]; []\n q; supposition; []; [10]\n q; repetition; [10]; [10]\n (q → q); I→; [10, 11]; []\n q; E→; [12, 9]; []\n (q ∨ p); I∨1; [13]; []\n (p → q); premise; []; []\n q; E∨; [14, 12, 15]; []\n """'], {'natural_deduction': '(True)'}), '(\n """q; premise; []; []\n ~q; supposition; []; [1]\n ~q; repetition; [1]; [1]\n (q ∧ ~q); I∧; [0, 2]; [1]\n q; E∧1; [3]; [1]\n ⊥; E~; [1, 4]; [1]\n p; EFSQ; [5]; [1]\n ⊥; repetition; [5]; [1]\n ~~q; I~; [1, 7]; []\n q; ~~; [8]; []\n q; supposition; []; [10]\n q; repetition; [10]; [10]\n (q → q); I→; [10, 11]; []\n q; E→; [12, 9]; []\n (q ∨ p); I∨1; [13]; []\n (p → q); premise; []; []\n q; E∨; [14, 12, 15]; []\n """\n , natural_deduction=True)\n', (5563, 6185), False, 'from logics.utils.parsers import classical_parser\n'), ((6615, 6775), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; premise; []; []\n q; premise; []; []\n (p ∧ q); I∧; [0, 1]; []"""'], {'natural_deduction': '(True)'}), '(\n """p; premise; []; []\n q; premise; []; []\n (p ∧ q); I∧; [0, 1]; []"""\n , natural_deduction=True)\n', (6648, 6775), False, 'from logics.utils.parsers import classical_parser\n'), ((6934, 7094), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; premise; []; []\n q; premise; []; []\n (p ∧ q); I∧; [1, 0]; []"""'], {'natural_deduction': '(True)'}), '(\n """p; premise; []; []\n q; premise; []; []\n (p ∧ q); I∧; [1, 0]; []"""\n , natural_deduction=True)\n', (6967, 7094), False, 'from logics.utils.parsers import classical_parser\n'), ((7365, 7525), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; premise; []; []\n q; premise; []; []\n (q ∧ p); I∧; [1, 0]; []"""'], {'natural_deduction': '(True)'}), '(\n """p; premise; []; []\n q; premise; []; []\n (q ∧ p); I∧; [1, 0]; []"""\n , natural_deduction=True)\n', (7398, 7525), False, 'from logics.utils.parsers import classical_parser\n'), ((7700, 7860), 'logics.utils.parsers.classical_parser.parse_derivation', 'classical_parser.parse_derivation', (['"""p; premise; []; []\n q; premise; []; []\n (q ∧ p); I∧; [0, 1]; []"""'], {'natural_deduction': '(True)'}), '(\n """p; premise; []; []\n q; premise; []; []\n (q ∧ p); I∧; [0, 1]; []"""\n , natural_deduction=True)\n', (7733, 7860), False, 'from logics.utils.parsers import classical_parser\n'), ((1327, 1392), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv', '(2)', "nd_system.rules['E→']"], {}), "(deriv, 2, nd_system.rules['E→'])\n", (1359, 1392), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((1419, 1485), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv', '(2)', "nd_system.rules['E∧2']"], {}), "(deriv, 2, nd_system.rules['E∧2'])\n", (1451, 1485), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((1511, 1576), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv', '(3)', "nd_system.rules['I∧']"], {}), "(deriv, 3, nd_system.rules['I∧'])\n", (1543, 1576), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((1603, 1668), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv', '(3)', "nd_system.rules['E→']"], {}), "(deriv, 3, nd_system.rules['E→'])\n", (1635, 1668), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((1899, 1940), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_derivation', 'nd_system.is_correct_derivation', (['deriv', 'i'], {}), '(deriv, i)\n', (1930, 1940), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((2092, 2134), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_derivation', 'nd_system.is_correct_derivation', (['deriv', 'i2'], {}), '(deriv, i2)\n', (2123, 2134), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((2899, 2975), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv2_1', '(1)', "nd_system.rules['repetition']"], {}), "(deriv2_1, 1, nd_system.rules['repetition'])\n", (2931, 2975), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((3002, 3078), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv2_2', '(1)', "nd_system.rules['repetition']"], {}), "(deriv2_2, 1, nd_system.rules['repetition'])\n", (3034, 3078), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((3543, 3619), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv3_1', '(1)', "nd_system.rules['repetition']"], {}), "(deriv3_1, 1, nd_system.rules['repetition'])\n", (3575, 3619), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((3645, 3713), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv3_1', '(2)', "nd_system.rules['I→']"], {}), "(deriv3_1, 2, nd_system.rules['I→'])\n", (3677, 3713), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((3740, 3808), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv3_1', '(3)', "nd_system.rules['E→']"], {}), "(deriv3_1, 3, nd_system.rules['E→'])\n", (3772, 3808), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((4126, 4195), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv3_2', '(3)', "nd_system.rules['I∨1']"], {}), "(deriv3_2, 3, nd_system.rules['I∨1'])\n", (4158, 4195), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((4475, 4543), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv3_3', '(2)', "nd_system.rules['I→']"], {}), "(deriv3_3, 2, nd_system.rules['I→'])\n", (4507, 4543), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((4841, 4909), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_application', 'nd_system.is_correct_application', (['deriv3_4', '(2)', "nd_system.rules['I→']"], {}), "(deriv3_4, 2, nd_system.rules['I→'])\n", (4873, 4909), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((5113, 5160), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_derivation', 'nd_system.is_correct_derivation', (['deriv3_5', 'None'], {}), '(deriv3_5, None)\n', (5144, 5160), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((5351, 5398), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_derivation', 'nd_system.is_correct_derivation', (['deriv3_6', 'None'], {}), '(deriv3_6, None)\n', (5382, 5398), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((6325, 6368), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_derivation', 'nd_system.is_correct_derivation', (['deriv4', 'i3'], {}), '(deriv4, i3)\n', (6356, 6368), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((6815, 6860), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_derivation', 'nd_system.is_correct_derivation', (['deriv1_1', 'i1'], {}), '(deriv1_1, i1)\n', (6846, 6860), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((7135, 7180), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_derivation', 'nd_system.is_correct_derivation', (['deriv1_2', 'i1'], {}), '(deriv1_2, i1)\n', (7166, 7180), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((7565, 7610), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_derivation', 'nd_system.is_correct_derivation', (['deriv2_1', 'i2'], {}), '(deriv2_1, i2)\n', (7596, 7610), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((7901, 7946), 'logics.instances.propositional.natural_deduction.classical_natural_deduction_system.is_correct_derivation', 'nd_system.is_correct_derivation', (['deriv2_2', 'i2'], {}), '(deriv2_2, i2)\n', (7932, 7946), True, 'from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n'), ((1774, 1788), 'logics.classes.propositional.Formula', 'Formula', (["['p']"], {}), "(['p'])\n", (1781, 1788), False, 'from logics.classes.propositional import Inference, Formula\n'), ((1790, 1818), 'logics.classes.propositional.Formula', 'Formula', (["['→', ['p'], ['q']]"], {}), "(['→', ['p'], ['q']])\n", (1797, 1818), False, 'from logics.classes.propositional import Inference, Formula\n'), ((1844, 1872), 'logics.classes.propositional.Formula', 'Formula', (["['∧', ['p'], ['q']]"], {}), "(['∧', ['p'], ['q']])\n", (1851, 1872), False, 'from logics.classes.propositional import Inference, Formula\n'), ((1966, 1980), 'logics.classes.propositional.Formula', 'Formula', (["['p']"], {}), "(['p'])\n", (1973, 1980), False, 'from logics.classes.propositional import Inference, Formula\n'), ((1982, 2010), 'logics.classes.propositional.Formula', 'Formula', (["['→', ['p'], ['q']]"], {}), "(['→', ['p'], ['q']])\n", (1989, 2010), False, 'from logics.classes.propositional import Inference, Formula\n'), ((2036, 2064), 'logics.classes.propositional.Formula', 'Formula', (["['∧', ['q'], ['p']]"], {}), "(['∧', ['q'], ['p']])\n", (2043, 2064), False, 'from logics.classes.propositional import Inference, Formula\n'), ((6213, 6227), 'logics.classes.propositional.Formula', 'Formula', (["['q']"], {}), "(['q'])\n", (6220, 6227), False, 'from logics.classes.propositional import Inference, Formula\n'), ((6229, 6257), 'logics.classes.propositional.Formula', 'Formula', (["['→', ['p'], ['q']]"], {}), "(['→', ['p'], ['q']])\n", (6236, 6257), False, 'from logics.classes.propositional import Inference, Formula\n'), ((6284, 6298), 'logics.classes.propositional.Formula', 'Formula', (["['q']"], {}), "(['q'])\n", (6291, 6298), False, 'from logics.classes.propositional import Inference, Formula\n'), ((6467, 6481), 'logics.classes.propositional.Formula', 'Formula', (["['p']"], {}), "(['p'])\n", (6474, 6481), False, 'from logics.classes.propositional import Inference, Formula\n'), ((6483, 6497), 'logics.classes.propositional.Formula', 'Formula', (["['q']"], {}), "(['q'])\n", (6490, 6497), False, 'from logics.classes.propositional import Inference, Formula\n'), ((6523, 6551), 'logics.classes.propositional.Formula', 'Formula', (["['∧', ['p'], ['q']]"], {}), "(['∧', ['p'], ['q']])\n", (6530, 6551), False, 'from logics.classes.propositional import Inference, Formula\n'), ((7207, 7221), 'logics.classes.propositional.Formula', 'Formula', (["['p']"], {}), "(['p'])\n", (7214, 7221), False, 'from logics.classes.propositional import Inference, Formula\n'), ((7223, 7237), 'logics.classes.propositional.Formula', 'Formula', (["['q']"], {}), "(['q'])\n", (7230, 7237), False, 'from logics.classes.propositional import Inference, Formula\n'), ((7264, 7292), 'logics.classes.propositional.Formula', 'Formula', (["['∧', ['q'], ['p']]"], {}), "(['∧', ['q'], ['p']])\n", (7271, 7292), False, 'from logics.classes.propositional import Inference, Formula\n'), ((613, 641), 'logics.classes.propositional.Formula', 'Formula', (["['→', ['A'], ['B']]"], {}), "(['→', ['A'], ['B']])\n", (620, 641), False, 'from logics.classes.propositional import Inference, Formula\n'), ((698, 712), 'logics.classes.propositional.Formula', 'Formula', (["['B']"], {}), "(['B'])\n", (705, 712), False, 'from logics.classes.propositional import Inference, Formula\n'), ((796, 810), 'logics.classes.propositional.Formula', 'Formula', (["['B']"], {}), "(['B'])\n", (803, 810), False, 'from logics.classes.propositional import Inference, Formula\n')] |
from __future__ import absolute_import
from __future__ import unicode_literals
from compose import utils
class StreamOutputError(Exception):
pass
def stream_output(output, stream):
is_terminal = hasattr(stream, 'isatty') and stream.isatty()
stream = utils.get_output_stream(stream)
all_events = []
lines = {}
diff = 0
for event in utils.json_stream(output):
all_events.append(event)
is_progress_event = 'progress' in event or 'progressDetail' in event
if not is_progress_event:
print_output_event(event, stream, is_terminal)
stream.flush()
continue
if not is_terminal:
continue
# if it's a progress event and we have a terminal, then display the progress bars
image_id = event.get('id')
if not image_id:
continue
if image_id not in lines:
lines[image_id] = len(lines)
stream.write("\n")
diff = len(lines) - lines[image_id]
# move cursor up `diff` rows
stream.write("%c[%dA" % (27, diff))
print_output_event(event, stream, is_terminal)
if 'id' in event:
# move cursor back down
stream.write("%c[%dB" % (27, diff))
stream.flush()
return all_events
def print_output_event(event, stream, is_terminal):
if 'errorDetail' in event:
raise StreamOutputError(event['errorDetail']['message'])
terminator = ''
if is_terminal and 'stream' not in event:
# erase current line
stream.write("%c[2K\r" % 27)
terminator = "\r"
elif 'progressDetail' in event:
return
if 'time' in event:
stream.write("[%s] " % event['time'])
if 'id' in event:
stream.write("%s: " % event['id'])
if 'from' in event:
stream.write("(from %s) " % event['from'])
status = event.get('status', '')
if 'progress' in event:
stream.write("%s %s%s" % (status, event['progress'], terminator))
elif 'progressDetail' in event:
detail = event['progressDetail']
total = detail.get('total')
if 'current' in detail and total:
percentage = float(detail['current']) / float(total) * 100
stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
else:
stream.write('%s%s' % (status, terminator))
elif 'stream' in event:
stream.write("%s%s" % (event['stream'], terminator))
else:
stream.write("%s%s\n" % (status, terminator))
def get_digest_from_pull(events):
for event in events:
status = event.get('status')
if not status or 'Digest' not in status:
continue
_, digest = status.split(':', 1)
return digest.strip()
return None
def get_digest_from_push(events):
for event in events:
digest = event.get('aux', {}).get('Digest')
if digest:
return digest
return None
| [
"compose.utils.json_stream",
"compose.utils.get_output_stream"
]
| [((267, 298), 'compose.utils.get_output_stream', 'utils.get_output_stream', (['stream'], {}), '(stream)\n', (290, 298), False, 'from compose import utils\n'), ((365, 390), 'compose.utils.json_stream', 'utils.json_stream', (['output'], {}), '(output)\n', (382, 390), False, 'from compose import utils\n')] |
import os
from bids_validator import BIDSValidator
def validate(bids_directory):
print('- Validate: init started.')
file_paths = []
result = []
validator = BIDSValidator()
for path, dirs, files in os.walk(bids_directory):
for filename in files:
if filename == '.bidsignore':
continue
if filename.endswith('_annotations.tsv'):
continue
if filename.endswith('_annotations.json'):
continue
temp = os.path.join(path, filename)
file_paths.append(temp[len(bids_directory):len(temp)])
result.append(validator.is_bids(temp[len(bids_directory):len(temp)]))
# print(validator.is_bids(temp[len(bids_directory):len(temp)]))
return file_paths, result
| [
"os.walk",
"os.path.join",
"bids_validator.BIDSValidator"
]
| [((174, 189), 'bids_validator.BIDSValidator', 'BIDSValidator', ([], {}), '()\n', (187, 189), False, 'from bids_validator import BIDSValidator\n'), ((219, 242), 'os.walk', 'os.walk', (['bids_directory'], {}), '(bids_directory)\n', (226, 242), False, 'import os\n'), ((523, 551), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (535, 551), False, 'import os\n')] |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class ManagementUnit(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ManagementUnit - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'name': 'str',
'division': 'Division',
'business_unit': 'BusinessUnitReference',
'start_day_of_week': 'str',
'time_zone': 'str',
'settings': 'ManagementUnitSettingsResponse',
'metadata': 'WfmVersionedEntityMetadata',
'version': 'int',
'date_modified': 'datetime',
'modified_by': 'UserReference',
'self_uri': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'division': 'division',
'business_unit': 'businessUnit',
'start_day_of_week': 'startDayOfWeek',
'time_zone': 'timeZone',
'settings': 'settings',
'metadata': 'metadata',
'version': 'version',
'date_modified': 'dateModified',
'modified_by': 'modifiedBy',
'self_uri': 'selfUri'
}
self._id = None
self._name = None
self._division = None
self._business_unit = None
self._start_day_of_week = None
self._time_zone = None
self._settings = None
self._metadata = None
self._version = None
self._date_modified = None
self._modified_by = None
self._self_uri = None
@property
def id(self):
"""
Gets the id of this ManagementUnit.
The globally unique identifier for the object.
:return: The id of this ManagementUnit.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ManagementUnit.
The globally unique identifier for the object.
:param id: The id of this ManagementUnit.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this ManagementUnit.
:return: The name of this ManagementUnit.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ManagementUnit.
:param name: The name of this ManagementUnit.
:type: str
"""
self._name = name
@property
def division(self):
"""
Gets the division of this ManagementUnit.
The division to which this entity belongs.
:return: The division of this ManagementUnit.
:rtype: Division
"""
return self._division
@division.setter
def division(self, division):
"""
Sets the division of this ManagementUnit.
The division to which this entity belongs.
:param division: The division of this ManagementUnit.
:type: Division
"""
self._division = division
@property
def business_unit(self):
"""
Gets the business_unit of this ManagementUnit.
The business unit to which this management unit belongs
:return: The business_unit of this ManagementUnit.
:rtype: BusinessUnitReference
"""
return self._business_unit
@business_unit.setter
def business_unit(self, business_unit):
"""
Sets the business_unit of this ManagementUnit.
The business unit to which this management unit belongs
:param business_unit: The business_unit of this ManagementUnit.
:type: BusinessUnitReference
"""
self._business_unit = business_unit
@property
def start_day_of_week(self):
"""
Gets the start_day_of_week of this ManagementUnit.
Start day of week for scheduling and forecasting purposes. Moving to Business Unit
:return: The start_day_of_week of this ManagementUnit.
:rtype: str
"""
return self._start_day_of_week
@start_day_of_week.setter
def start_day_of_week(self, start_day_of_week):
"""
Sets the start_day_of_week of this ManagementUnit.
Start day of week for scheduling and forecasting purposes. Moving to Business Unit
:param start_day_of_week: The start_day_of_week of this ManagementUnit.
:type: str
"""
allowed_values = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
if start_day_of_week.lower() not in map(str.lower, allowed_values):
# print("Invalid value for start_day_of_week -> " + start_day_of_week)
self._start_day_of_week = "outdated_sdk_version"
else:
self._start_day_of_week = start_day_of_week
@property
def time_zone(self):
"""
Gets the time_zone of this ManagementUnit.
The time zone for the management unit in standard Olson format. Moving to Business Unit
:return: The time_zone of this ManagementUnit.
:rtype: str
"""
return self._time_zone
@time_zone.setter
def time_zone(self, time_zone):
"""
Sets the time_zone of this ManagementUnit.
The time zone for the management unit in standard Olson format. Moving to Business Unit
:param time_zone: The time_zone of this ManagementUnit.
:type: str
"""
self._time_zone = time_zone
@property
def settings(self):
"""
Gets the settings of this ManagementUnit.
The configuration settings for this management unit
:return: The settings of this ManagementUnit.
:rtype: ManagementUnitSettingsResponse
"""
return self._settings
@settings.setter
def settings(self, settings):
"""
Sets the settings of this ManagementUnit.
The configuration settings for this management unit
:param settings: The settings of this ManagementUnit.
:type: ManagementUnitSettingsResponse
"""
self._settings = settings
@property
def metadata(self):
"""
Gets the metadata of this ManagementUnit.
Version info metadata for this management unit. Deprecated, use settings.metadata
:return: The metadata of this ManagementUnit.
:rtype: WfmVersionedEntityMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this ManagementUnit.
Version info metadata for this management unit. Deprecated, use settings.metadata
:param metadata: The metadata of this ManagementUnit.
:type: WfmVersionedEntityMetadata
"""
self._metadata = metadata
@property
def version(self):
"""
Gets the version of this ManagementUnit.
The version of the underlying entity. Deprecated, use field from settings.metadata instead
:return: The version of this ManagementUnit.
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this ManagementUnit.
The version of the underlying entity. Deprecated, use field from settings.metadata instead
:param version: The version of this ManagementUnit.
:type: int
"""
self._version = version
@property
def date_modified(self):
"""
Gets the date_modified of this ManagementUnit.
The date and time at which this entity was last modified. Deprecated, use field from settings.metadata instead. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The date_modified of this ManagementUnit.
:rtype: datetime
"""
return self._date_modified
@date_modified.setter
def date_modified(self, date_modified):
"""
Sets the date_modified of this ManagementUnit.
The date and time at which this entity was last modified. Deprecated, use field from settings.metadata instead. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param date_modified: The date_modified of this ManagementUnit.
:type: datetime
"""
self._date_modified = date_modified
@property
def modified_by(self):
"""
Gets the modified_by of this ManagementUnit.
The user who last modified this entity. Deprecated, use field from settings.metadata instead
:return: The modified_by of this ManagementUnit.
:rtype: UserReference
"""
return self._modified_by
@modified_by.setter
def modified_by(self, modified_by):
"""
Sets the modified_by of this ManagementUnit.
The user who last modified this entity. Deprecated, use field from settings.metadata instead
:param modified_by: The modified_by of this ManagementUnit.
:type: UserReference
"""
self._modified_by = modified_by
@property
def self_uri(self):
"""
Gets the self_uri of this ManagementUnit.
The URI for this object
:return: The self_uri of this ManagementUnit.
:rtype: str
"""
return self._self_uri
@self_uri.setter
def self_uri(self, self_uri):
"""
Sets the self_uri of this ManagementUnit.
The URI for this object
:param self_uri: The self_uri of this ManagementUnit.
:type: str
"""
self._self_uri = self_uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"six.iteritems"
]
| [((11071, 11100), 'six.iteritems', 'iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (11080, 11100), False, 'from six import iteritems\n')] |
import asyncio
from contextlib import suppress
from unittest import mock
import pytest
from aiohttp.base_protocol import BaseProtocol
async def test_loop() -> None:
loop = asyncio.get_event_loop()
asyncio.set_event_loop(None)
pr = BaseProtocol(loop)
assert pr._loop is loop
async def test_pause_writing() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop)
assert not pr._paused
pr.pause_writing()
assert pr._paused
async def test_resume_writing_no_waiters() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
pr.pause_writing()
assert pr._paused
pr.resume_writing()
assert not pr._paused
async def test_connection_made() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
assert pr.transport is None
pr.connection_made(tr)
assert pr.transport is not None
async def test_connection_lost_not_paused() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
assert not pr._connection_lost
pr.connection_lost(None)
assert pr.transport is None
assert pr._connection_lost
async def test_connection_lost_paused_without_waiter() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
assert not pr._connection_lost
pr.pause_writing()
pr.connection_lost(None)
assert pr.transport is None
assert pr._connection_lost
async def test_drain_lost() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.connection_lost(None)
with pytest.raises(ConnectionResetError):
await pr._drain_helper()
async def test_drain_not_paused() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
assert pr._drain_waiter is None
await pr._drain_helper()
assert pr._drain_waiter is None
async def test_resume_drain_waited() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
t = loop.create_task(pr._drain_helper())
await asyncio.sleep(0)
assert pr._drain_waiter is not None
pr.resume_writing()
assert (await t) is None
assert pr._drain_waiter is None
async def test_lost_drain_waited_ok() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
t = loop.create_task(pr._drain_helper())
await asyncio.sleep(0)
assert pr._drain_waiter is not None
pr.connection_lost(None)
assert (await t) is None
assert pr._drain_waiter is None
async def test_lost_drain_waited_exception() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
t = loop.create_task(pr._drain_helper())
await asyncio.sleep(0)
assert pr._drain_waiter is not None
exc = RuntimeError()
pr.connection_lost(exc)
with pytest.raises(RuntimeError) as cm:
await t
assert cm.value is exc
assert pr._drain_waiter is None
async def test_lost_drain_cancelled() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
fut = loop.create_future()
async def wait():
fut.set_result(None)
await pr._drain_helper()
t = loop.create_task(wait())
await fut
t.cancel()
assert pr._drain_waiter is not None
pr.connection_lost(None)
with suppress(asyncio.CancelledError):
await t
assert pr._drain_waiter is None
async def test_resume_drain_cancelled() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
fut = loop.create_future()
async def wait():
fut.set_result(None)
await pr._drain_helper()
t = loop.create_task(wait())
await fut
t.cancel()
assert pr._drain_waiter is not None
pr.resume_writing()
with suppress(asyncio.CancelledError):
await t
assert pr._drain_waiter is None
| [
"unittest.mock.Mock",
"asyncio.sleep",
"pytest.raises",
"contextlib.suppress",
"aiohttp.base_protocol.BaseProtocol",
"asyncio.set_event_loop",
"asyncio.get_event_loop"
]
| [((180, 204), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (202, 204), False, 'import asyncio\n'), ((209, 237), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['None'], {}), '(None)\n', (231, 237), False, 'import asyncio\n'), ((247, 265), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', (['loop'], {}), '(loop)\n', (259, 265), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((347, 371), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (369, 371), False, 'import asyncio\n'), ((381, 399), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', (['loop'], {}), '(loop)\n', (393, 399), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((536, 560), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (558, 560), False, 'import asyncio\n'), ((570, 593), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (582, 593), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((744, 768), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (766, 768), False, 'import asyncio\n'), ((778, 801), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (790, 801), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((811, 822), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (820, 822), False, 'from unittest import mock\n'), ((984, 1008), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1006, 1008), False, 'import asyncio\n'), ((1018, 1041), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (1030, 1041), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((1051, 1062), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1060, 1062), False, 'from unittest import mock\n'), ((1294, 1318), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1316, 1318), False, 'import asyncio\n'), ((1328, 1351), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (1340, 1351), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((1361, 1372), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1370, 1372), False, 'from unittest import mock\n'), ((1600, 1624), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1622, 1624), False, 'import asyncio\n'), ((1634, 1657), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (1646, 1657), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((1667, 1678), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1676, 1678), False, 'from unittest import mock\n'), ((1870, 1894), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1892, 1894), False, 'import asyncio\n'), ((1904, 1927), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (1916, 1927), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((1937, 1948), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1946, 1948), False, 'from unittest import mock\n'), ((2136, 2160), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2158, 2160), False, 'import asyncio\n'), ((2170, 2193), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (2182, 2193), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((2203, 2214), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (2212, 2214), False, 'from unittest import mock\n'), ((2528, 2552), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2550, 2552), False, 'import asyncio\n'), ((2562, 2585), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (2574, 2585), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((2595, 2606), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (2604, 2606), False, 'from unittest import mock\n'), ((2932, 2956), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2954, 2956), False, 'import asyncio\n'), ((2966, 2989), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (2978, 2989), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((2999, 3010), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3008, 3010), False, 'from unittest import mock\n'), ((3411, 3435), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3433, 3435), False, 'import asyncio\n'), ((3445, 3468), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (3457, 3468), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((3478, 3489), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3487, 3489), False, 'from unittest import mock\n'), ((3947, 3971), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3969, 3971), False, 'import asyncio\n'), ((3981, 4004), 'aiohttp.base_protocol.BaseProtocol', 'BaseProtocol', ([], {'loop': 'loop'}), '(loop=loop)\n', (3993, 4004), False, 'from aiohttp.base_protocol import BaseProtocol\n'), ((4014, 4025), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (4023, 4025), False, 'from unittest import mock\n'), ((1744, 1779), 'pytest.raises', 'pytest.raises', (['ConnectionResetError'], {}), '(ConnectionResetError)\n', (1757, 1779), False, 'import pytest\n'), ((2321, 2337), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (2334, 2337), False, 'import asyncio\n'), ((2713, 2729), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (2726, 2729), False, 'import asyncio\n'), ((3117, 3133), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (3130, 3133), False, 'import asyncio\n'), ((3237, 3264), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3250, 3264), False, 'import pytest\n'), ((3799, 3831), 'contextlib.suppress', 'suppress', (['asyncio.CancelledError'], {}), '(asyncio.CancelledError)\n', (3807, 3831), False, 'from contextlib import suppress\n'), ((4330, 4362), 'contextlib.suppress', 'suppress', (['asyncio.CancelledError'], {}), '(asyncio.CancelledError)\n', (4338, 4362), False, 'from contextlib import suppress\n')] |
# Helper code to plot binary losses.
#
# <NAME> (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
fig, ax = plt.subplots()
fig.set_tight_layout(True)
xs = np.linspace(-2, 2, 500)
# plot L0/1 loss
ax.plot(xs, np.where(xs < 0, np.ones_like(xs), np.zeros_like(xs)),
color='r', linewidth=2.0, label='$L_{01}$')
# plot square loss
ax.plot(xs, (xs - 1) ** 2, linestyle='-.', label='$L_2$')
# plot hinge loss
ax.plot(xs, np.maximum(np.zeros_like(xs), 1 - xs),
color='g', linewidth=2.0, label='$L_h$')
ax.grid(True)
plt.ylim((-1, 4))
ax.legend()
fig.savefig('loss.png', dpi=80)
plt.show()
| [
"numpy.ones_like",
"numpy.linspace",
"matplotlib.pyplot.ylim",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
]
| [((247, 261), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (259, 261), True, 'import matplotlib.pyplot as plt\n'), ((303, 326), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(500)'], {}), '(-2, 2, 500)\n', (314, 326), True, 'import numpy as np\n'), ((717, 734), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1, 4)'], {}), '((-1, 4))\n', (725, 734), True, 'import matplotlib.pyplot as plt\n'), ((792, 802), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (800, 802), True, 'import matplotlib.pyplot as plt\n'), ((382, 398), 'numpy.ones_like', 'np.ones_like', (['xs'], {}), '(xs)\n', (394, 398), True, 'import numpy as np\n'), ((400, 417), 'numpy.zeros_like', 'np.zeros_like', (['xs'], {}), '(xs)\n', (413, 417), True, 'import numpy as np\n'), ((612, 629), 'numpy.zeros_like', 'np.zeros_like', (['xs'], {}), '(xs)\n', (625, 629), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
################################
# Development tool
# Auto-compiles style.less to style.css
#
# Requires lessc and less clean css to be installed:
# npm install -g less
# npm install -g less-plugin-clean-css
################################
import os, time
from os import path
from math import floor
from _helper import *
# Main application
class Main:
style_less = "style.less"
style_css = "style.css"
def __init__(self):
clear()
os.chdir("../")
header("Watching style.less for changes\nctrl+c to exit")
print()
while True:
if not os.path.exists(self.style_less):
print(self.style_less + " does not exist. Exiting.")
return
if not os.path.exists(self.style_css):
self.compile()
elif path.getmtime(self.style_less) > path.getmtime(self.style_css):
self.compile()
time.sleep(.2)
def compile(self):
start = time.time()
os.system("lessc " + self.style_less + " " + self.style_css + " --clean-css")
touch(self.style_css, path.getmtime(self.style_less))
print("Recompiled [" + str(floor((time.time() - start) * 100)) + " ms]")
print()
# Run application
if __name__ == "__main__":
try:
app = Main()
except KeyboardInterrupt:
print("Exiting") | [
"os.path.exists",
"os.path.getmtime",
"time.sleep",
"os.chdir",
"os.system",
"time.time"
]
| [((466, 481), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (474, 481), False, 'import os, time\n'), ((895, 906), 'time.time', 'time.time', ([], {}), '()\n', (904, 906), False, 'import os, time\n'), ((909, 986), 'os.system', 'os.system', (["('lessc ' + self.style_less + ' ' + self.style_css + ' --clean-css')"], {}), "('lessc ' + self.style_less + ' ' + self.style_css + ' --clean-css')\n", (918, 986), False, 'import os, time\n'), ((845, 860), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (855, 860), False, 'import os, time\n'), ((1011, 1041), 'os.path.getmtime', 'path.getmtime', (['self.style_less'], {}), '(self.style_less)\n', (1024, 1041), False, 'from os import path\n'), ((579, 610), 'os.path.exists', 'os.path.exists', (['self.style_less'], {}), '(self.style_less)\n', (593, 610), False, 'import os, time\n'), ((695, 725), 'os.path.exists', 'os.path.exists', (['self.style_css'], {}), '(self.style_css)\n', (709, 725), False, 'import os, time\n'), ((754, 784), 'os.path.getmtime', 'path.getmtime', (['self.style_less'], {}), '(self.style_less)\n', (767, 784), False, 'from os import path\n'), ((787, 816), 'os.path.getmtime', 'path.getmtime', (['self.style_css'], {}), '(self.style_css)\n', (800, 816), False, 'from os import path\n'), ((1079, 1090), 'time.time', 'time.time', ([], {}), '()\n', (1088, 1090), False, 'import os, time\n')] |
# -- encoding: UTF-8 --
import json
import uuid
from admin_export_action import report
from admin_export_action.admin import export_selected_objects
from admin_export_action.config import default_config, get_config
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.utils.http import urlencode
from news.models import Attachment, Category, News, NewsTag, Video
from news.admin import NewsAdmin
class FakeDict(object):
def __getitem__(self, key):
return object()
class WS(object):
def __init__(self):
self.rows = []
self.cells = []
self.column_dimensions = FakeDict()
def cell(self, row, column):
pass
def append(self, row):
self.rows.append(row)
class FakeQueryset(object):
def __init__(self, num):
self.num = num
self.model = News
def values_list(self, field, flat=True):
return [i for i in range(1, self.num)]
class AdminExportActionTest(TestCase):
fixtures = ["tests.json"]
def test_config(self):
self.assertEqual(default_config.get('ENABLE_SITEWIDE'), True)
self.assertEqual(get_config('ENABLE_SITEWIDE'), False)
with self.settings(ADMIN_EXPORT_ACTION=None):
self.assertEqual(get_config('ENABLE_SITEWIDE'), True)
def test_export_selected_objects_session(self):
factory = RequestFactory()
request = factory.get('/news/admin/')
request.session = {}
modeladmin = NewsAdmin(model=News, admin_site=AdminSite())
qs = FakeQueryset(2000)
self.assertEqual(len(request.session), 0)
export_selected_objects(modeladmin, request, qs)
self.assertEqual(len(request.session), 1)
els = list(request.session.items())
self.assertEqual(els[0][1], qs.values_list('id'))
def test_get_field_verbose_name(self):
res = report.get_field_verbose_name(News.objects, 'tags__name')
assert res == 'all tags verbose name'
res = report.get_field_verbose_name(News.objects, 'share')
assert res == 'share'
def test_list_to_method_response_should_return_200_and_correct_values(
self):
admin = User.objects.get(pk=1)
data, messages = report.report_to_list(News.objects.all(),
['id', 'title', 'status'],
admin)
method = getattr(report, 'list_to_{}_response'.format('html'))
res = method(data)
assert res.status_code == 200
method = getattr(report, 'list_to_{}_response'.format('csv'))
res = method(data)
assert res.status_code == 200
assert res.content == b'1,<NAME>,published\r\n2,La mano de Dios,draft\r\n'
method = getattr(report, 'list_to_{}_response'.format('xlsx'))
res = method(data)
assert res.status_code == 200
method = getattr(report, 'list_to_{}_response'.format('json'))
res = method(data, header=['id', 'title', 'status'])
d = json.loads(res.content)
assert d[0]['id'] == 1
assert d[0]['title'] == "<NAME>"
assert d[0]['status'] == 'published'
assert d[1]['id'] == 2
assert d[1]['title'] == "La mano de Dios"
assert d[1]['status'] == 'draft'
assert res.status_code == 200
data, messages = report.report_to_list(News.objects.all(),
['id', 'title', 'status'],
admin,
raw_choices=True)
method = getattr(report, 'list_to_{}_response'.format('json'))
res = method(data, header=['id', 'title', 'status'])
d = json.loads(res.content)
assert d[0]['id'] == 1
assert d[0]['title'] == "<NAME>"
assert d[0]['status'] == 2
assert d[1]['id'] == 2
assert d[1]['title'] == "La mano de Dios"
assert d[1]['status'] == 1
assert res.status_code == 200
def test_list_to_csv_response_should_have_expected_content(self):
admin = User.objects.get(pk=1)
data, messages = report.report_to_list(News.objects.all(),
['id', 'title'], admin)
method = getattr(report, 'list_to_{}_response'.format('csv'))
res = method(data)
assert res.status_code == 200
assert res.content == b'1,<NAME>\r\n2,La mano de Dios\r\n'
def test_list_to_json_response_should_have_expected_content(self):
admin = User.objects.get(pk=1)
data, messages = report.report_to_list(News.objects.all(),
['id', 'title'], admin)
method = getattr(report, 'list_to_{}_response'.format('json'))
res = method(data, header=['id', 'title'])
d = json.loads(res.content)
assert d[0]['id'] == 1
assert d[0]['title'] == "<NAME>"
assert d[1]['id'] == 2
assert d[1]['title'] == "La mano de Dios"
assert res.status_code == 200
def test_admin_export_post_should_return_200(self):
for output_format in ['html', 'csv', 'xslx', 'json']:
params = {
'ct':
ContentType.objects.get_for_model(News).pk,
'ids':
','.join(
repr(pk)
for pk in News.objects.values_list('pk', flat=True))
}
data = {
"title": "on",
"__format": output_format,
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.post(url, data=data)
assert response.status_code == 200
def test_admin_export_get_should_return_200(self):
params = {
'ct':
ContentType.objects.get_for_model(News).pk,
'ids':
','.join(
repr(pk) for pk in News.objects.values_list('pk', flat=True))
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.get(url)
assert response.status_code == 200
def test_admin_export_with_related_get_should_return_200(self):
params = {
'related': True,
'model_ct': ContentType.objects.get_for_model(News).pk,
'field': 'category',
'path': 'category.name',
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.get(url)
assert response.status_code == 200
def test_admin_export_with_related_of_indirect_field_get_should_return_200(
self):
params = {
'related': True,
'model_ct': ContentType.objects.get_for_model(News).pk,
'field': 'newstag',
'path': 'newstag.id',
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.get(url)
assert response.status_code == 200
def test_admin_export_with_unregistered_model_should_raise_ValueError(
self):
params = {
'ct':
ContentType.objects.get_for_model(NewsTag).pk,
'ids':
','.join(
repr(pk)
for pk in NewsTag.objects.values_list('pk', flat=True))
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
try:
self.client.get(url)
self.fail()
except ValueError:
pass
def test_admin_action_should_redirect_to_export_view(self):
objects = News.objects.all()
ids = [repr(obj.pk) for obj in objects]
data = {
"action": "export_selected_objects",
"_selected_action": ids,
}
url = reverse('admin:news_news_changelist')
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.post(url, data=data)
expected_url = "{}?ct={ct}&ids={ids}".format(
reverse('admin_export_action:export'),
ct=ContentType.objects.get_for_model(News).pk,
ids=','.join(reversed(ids)))
assert response.status_code == 302
assert response.url.endswith(expected_url)
def test_export_with_related_should_return_200(self):
for output_format in ['html', 'csv', 'xslx', 'json']:
news = News.objects.all()
params = {
'ct':
ContentType.objects.get_for_model(News).pk,
'ids':
','.join(
repr(pk)
for pk in News.objects.values_list('pk', flat=True))
}
data = {
'id': 'on',
'title': 'on',
'status': 'on',
'category__name': 'on',
'tags__name': 'on',
'newstag__created_on': 'on',
"__format": output_format,
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.post(url, data=data)
assert response.status_code == 200
assert response.content
def test_build_sheet_convert_function(self):
data = [
['1', 5, 'convert', 9, {"foo": "bar"}, [1, 2], uuid.UUID("12345678123456781234567812345678")],
]
ws = WS()
report.build_sheet(data, ws, sheet_name='report', header=None, widths=None)
self.assertEqual(ws.rows, [['1', 5, 'converted', 9, "{'foo': 'bar'}", '[1, 2]', '12345678-1234-5678-1234-567812345678']])
| [
"django.test.RequestFactory",
"news.models.NewsTag.objects.values_list",
"json.loads",
"news.models.News.objects.all",
"django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"uuid.UUID",
"admin_export_action.report.get_field_verbose_name",
"admin_export_action.admin.export_selected_objects",
"django.utils.http.urlencode",
"admin_export_action.config.get_config",
"admin_export_action.report.build_sheet",
"django.contrib.admin.sites.AdminSite",
"django.urls.reverse",
"news.models.News.objects.values_list",
"admin_export_action.config.default_config.get",
"django.contrib.auth.models.User.objects.get"
]
| [((1538, 1554), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (1552, 1554), False, 'from django.test import TestCase, RequestFactory\n'), ((1788, 1836), 'admin_export_action.admin.export_selected_objects', 'export_selected_objects', (['modeladmin', 'request', 'qs'], {}), '(modeladmin, request, qs)\n', (1811, 1836), False, 'from admin_export_action.admin import export_selected_objects\n'), ((2047, 2104), 'admin_export_action.report.get_field_verbose_name', 'report.get_field_verbose_name', (['News.objects', '"""tags__name"""'], {}), "(News.objects, 'tags__name')\n", (2076, 2104), False, 'from admin_export_action import report\n'), ((2165, 2217), 'admin_export_action.report.get_field_verbose_name', 'report.get_field_verbose_name', (['News.objects', '"""share"""'], {}), "(News.objects, 'share')\n", (2194, 2217), False, 'from admin_export_action import report\n'), ((2359, 2381), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'pk': '(1)'}), '(pk=1)\n', (2375, 2381), False, 'from django.contrib.auth.models import User\n'), ((3215, 3238), 'json.loads', 'json.loads', (['res.content'], {}), '(res.content)\n', (3225, 3238), False, 'import json\n'), ((3922, 3945), 'json.loads', 'json.loads', (['res.content'], {}), '(res.content)\n', (3932, 3945), False, 'import json\n'), ((4294, 4316), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'pk': '(1)'}), '(pk=1)\n', (4310, 4316), False, 'from django.contrib.auth.models import User\n'), ((4747, 4769), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'pk': '(1)'}), '(pk=1)\n', (4763, 4769), False, 'from django.contrib.auth.models import User\n'), ((5043, 5066), 'json.loads', 'json.loads', (['res.content'], {}), '(res.content)\n', (5053, 5066), False, 'import json\n'), ((8408, 8426), 'news.models.News.objects.all', 'News.objects.all', ([], {}), '()\n', (8424, 8426), False, 'from news.models import Attachment, Category, News, NewsTag, Video\n'), ((8603, 8640), 'django.urls.reverse', 'reverse', (['"""admin:news_news_changelist"""'], {}), "('admin:news_news_changelist')\n", (8610, 8640), False, 'from django.urls import reverse\n'), ((10326, 10401), 'admin_export_action.report.build_sheet', 'report.build_sheet', (['data', 'ws'], {'sheet_name': '"""report"""', 'header': 'None', 'widths': 'None'}), "(data, ws, sheet_name='report', header=None, widths=None)\n", (10344, 10401), False, 'from admin_export_action import report\n'), ((1238, 1275), 'admin_export_action.config.default_config.get', 'default_config.get', (['"""ENABLE_SITEWIDE"""'], {}), "('ENABLE_SITEWIDE')\n", (1256, 1275), False, 'from admin_export_action.config import default_config, get_config\n'), ((1308, 1337), 'admin_export_action.config.get_config', 'get_config', (['"""ENABLE_SITEWIDE"""'], {}), "('ENABLE_SITEWIDE')\n", (1318, 1337), False, 'from admin_export_action.config import default_config, get_config\n'), ((2429, 2447), 'news.models.News.objects.all', 'News.objects.all', ([], {}), '()\n', (2445, 2447), False, 'from news.models import Attachment, Category, News, NewsTag, Video\n'), ((3564, 3582), 'news.models.News.objects.all', 'News.objects.all', ([], {}), '()\n', (3580, 3582), False, 'from news.models import Attachment, Category, News, NewsTag, Video\n'), ((4364, 4382), 'news.models.News.objects.all', 'News.objects.all', ([], {}), '()\n', (4380, 4382), False, 'from news.models import Attachment, Category, News, NewsTag, Video\n'), ((4817, 4835), 'news.models.News.objects.all', 'News.objects.all', ([], {}), '()\n', (4833, 4835), False, 'from news.models import Attachment, Category, News, NewsTag, Video\n'), ((6361, 6398), 'django.urls.reverse', 'reverse', (['"""admin_export_action:export"""'], {}), "('admin_export_action:export')\n", (6368, 6398), False, 'from django.urls import reverse\n'), ((6429, 6446), 'django.utils.http.urlencode', 'urlencode', (['params'], {}), '(params)\n', (6438, 6446), False, 'from django.utils.http import urlencode\n'), ((6892, 6929), 'django.urls.reverse', 'reverse', (['"""admin_export_action:export"""'], {}), "('admin_export_action:export')\n", (6899, 6929), False, 'from django.urls import reverse\n'), ((6960, 6977), 'django.utils.http.urlencode', 'urlencode', (['params'], {}), '(params)\n', (6969, 6977), False, 'from django.utils.http import urlencode\n'), ((7450, 7487), 'django.urls.reverse', 'reverse', (['"""admin_export_action:export"""'], {}), "('admin_export_action:export')\n", (7457, 7487), False, 'from django.urls import reverse\n'), ((7518, 7535), 'django.utils.http.urlencode', 'urlencode', (['params'], {}), '(params)\n', (7527, 7535), False, 'from django.utils.http import urlencode\n'), ((8056, 8093), 'django.urls.reverse', 'reverse', (['"""admin_export_action:export"""'], {}), "('admin_export_action:export')\n", (8063, 8093), False, 'from django.urls import reverse\n'), ((8124, 8141), 'django.utils.http.urlencode', 'urlencode', (['params'], {}), '(params)\n', (8133, 8141), False, 'from django.utils.http import urlencode\n'), ((8827, 8864), 'django.urls.reverse', 'reverse', (['"""admin_export_action:export"""'], {}), "('admin_export_action:export')\n", (8834, 8864), False, 'from django.urls import reverse\n'), ((9200, 9218), 'news.models.News.objects.all', 'News.objects.all', ([], {}), '()\n', (9216, 9218), False, 'from news.models import Attachment, Category, News, NewsTag, Video\n'), ((1430, 1459), 'admin_export_action.config.get_config', 'get_config', (['"""ENABLE_SITEWIDE"""'], {}), "('ENABLE_SITEWIDE')\n", (1440, 1459), False, 'from admin_export_action.config import default_config, get_config\n'), ((1684, 1695), 'django.contrib.admin.sites.AdminSite', 'AdminSite', ([], {}), '()\n', (1693, 1695), False, 'from django.contrib.admin.sites import AdminSite\n'), ((5789, 5826), 'django.urls.reverse', 'reverse', (['"""admin_export_action:export"""'], {}), "('admin_export_action:export')\n", (5796, 5826), False, 'from django.urls import reverse\n'), ((5861, 5878), 'django.utils.http.urlencode', 'urlencode', (['params'], {}), '(params)\n', (5870, 5878), False, 'from django.utils.http import urlencode\n'), ((6159, 6198), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['News'], {}), '(News)\n', (6192, 6198), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((6739, 6778), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['News'], {}), '(News)\n', (6772, 6778), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((7301, 7340), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['News'], {}), '(News)\n', (7334, 7340), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((7832, 7874), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['NewsTag'], {}), '(NewsTag)\n', (7865, 7874), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((9813, 9850), 'django.urls.reverse', 'reverse', (['"""admin_export_action:export"""'], {}), "('admin_export_action:export')\n", (9820, 9850), False, 'from django.urls import reverse\n'), ((9885, 9902), 'django.utils.http.urlencode', 'urlencode', (['params'], {}), '(params)\n', (9894, 9902), False, 'from django.utils.http import urlencode\n'), ((10240, 10285), 'uuid.UUID', 'uuid.UUID', (['"""12345678123456781234567812345678"""'], {}), "('12345678123456781234567812345678')\n", (10249, 10285), False, 'import uuid\n'), ((5438, 5477), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['News'], {}), '(News)\n', (5471, 5477), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((8881, 8920), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['News'], {}), '(News)\n', (8914, 8920), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((9281, 9320), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['News'], {}), '(News)\n', (9314, 9320), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((6279, 6320), 'news.models.News.objects.values_list', 'News.objects.values_list', (['"""pk"""'], {'flat': '(True)'}), "('pk', flat=True)\n", (6303, 6320), False, 'from news.models import Attachment, Category, News, NewsTag, Video\n'), ((7971, 8015), 'news.models.NewsTag.objects.values_list', 'NewsTag.objects.values_list', (['"""pk"""'], {'flat': '(True)'}), "('pk', flat=True)\n", (7998, 8015), False, 'from news.models import Attachment, Category, News, NewsTag, Video\n'), ((5590, 5631), 'news.models.News.objects.values_list', 'News.objects.values_list', (['"""pk"""'], {'flat': '(True)'}), "('pk', flat=True)\n", (5614, 5631), False, 'from news.models import Attachment, Category, News, NewsTag, Video\n'), ((9433, 9474), 'news.models.News.objects.values_list', 'News.objects.values_list', (['"""pk"""'], {'flat': '(True)'}), "('pk', flat=True)\n", (9457, 9474), False, 'from news.models import Attachment, Category, News, NewsTag, Video\n')] |
import base64
import datetime
from abc import ABC, abstractmethod
from .conditions import AnyValue
from .errors import FieldError, FormError
__all__ = [
'Field', 'StringField', 'IntegerField', 'FloatField', 'BooleanField',
'DateTimeField', 'DateField', 'TimeField', 'ListField','SetField', 'EnumField', 'BytesField'
]
class Field(ABC):
_default = None
def __new__(cls, *args, **kwargs):
if 'init' in kwargs:
kwargs.pop('init')
return super().__new__(cls)
return UnboundField(cls, *args, **kwargs)
def __init__(self,
condition=AnyValue(),
optional: bool = False,
default=None,
init=False):
self.condition = condition
self.optional = optional
self.default = default or self._default
self._data = None
self.is_empty = False
@property
def data(self):
return self._data
def mark_empty(self):
if not self.optional:
raise FieldError('cannot be blank')
self.is_empty = True
if callable(self.default):
self._data = self.default()
else:
self._data = self.default
@abstractmethod
def process_data(self, value):
self.condition.check(self)
class UnboundField:
def __init__(self, field_cls, *args, **kwargs):
self.field_cls = field_cls
self.args = args
self.kwargs = kwargs
self.kwargs['init'] = True
def bind(self):
return self.field_cls(*self.args, **self.kwargs)
class StringField(Field):
_default = ''
def process_data(self, value):
if not isinstance(value, str):
raise FieldError('invalid string')
self._data = value
super().process_data(value)
class IntegerField(Field):
_default = 0
def process_data(self, value):
if not isinstance(value, int):
raise FieldError('invalid integer')
self._data = value
super().process_data(value)
class FloatField(Field):
_default = 0.0
def process_data(self, value):
if not isinstance(value, float):
raise FieldError('invalid float')
self._data = value
super().process_data(value)
class BooleanField(Field):
def process_data(self, value):
if not isinstance(value, bool):
raise FieldError('invalid boolean')
self._data = value
super().process_data(value)
class DateTimeField(Field):
def __init__(self, pattern='%Y-%m-%dT%H:%M:%S', **kwargs):
super().__init__(**kwargs)
self.pattern = pattern
def process_data(self, value):
try:
self._data = datetime.datetime.strptime(value, self.pattern)
except ValueError:
raise FieldError('invalid datetime')
super().process_data(value)
class DateField(DateTimeField):
def __init__(self, pattern='%Y-%m-%d', **kwargs):
super().__init__(pattern, **kwargs)
def process_data(self, value):
try:
self._data = datetime.datetime.strptime(value, self.pattern).date()
except ValueError:
raise FieldError('invalid date')
super().process_data(value)
class TimeField(DateTimeField):
def __init__(self, pattern='%H:%M:%S', **kwargs):
super().__init__(pattern, **kwargs)
def process_jsondata(self, value):
try:
self._data = datetime.datetime.strptime(value, self.pattern).time()
except ValueError:
raise FieldError('invalid time')
super().process_data(value)
class EnumField(Field):
def __init__(self, enum_class, **kwargs):
super().__init__(**kwargs)
self.enum_class = enum_class
def process_data(self, value):
try:
enum_obj = self.enum_class[value]
except KeyError:
raise FieldError('invalid enum')
self._data = enum_obj
super().process_data(value)
class BytesField(Field):
def __init__(self, length, **kwargs):
super().__init__(**kwargs)
self.length = length
def process_data(self, value):
try:
self.data = base64.decodebytes(value)
except (ValueError, TypeError):
raise FieldError('invalid base64 string')
if len(self.data) != self.length:
raise FieldError('invalid length')
super().process_data(value)
class ListField(Field):
def __init__(self, field, default=list, **kwargs):
self.field = field
self.data_ = None
super().__init__(default=default, **kwargs)
@property
def data(self):
if not self.data_:
self.data_ = [field.data for field in self._data]
return self.data_
def process_data(self, value):
if not isinstance(value, list):
raise FieldError('invalid list')
self._data = list()
e = FieldError()
for i, val in enumerate(value):
field = self.field.bind()
try:
field.process_data(val)
except FieldError as e_:
e[i] = e_.error
self._data.append(field)
if e:
raise e
super().process_data(value)
class SetField(Field):
def __init__(self, field, default=set, **kwargs):
self.field = field
self.data_ = None
super().__init__(default=default, **kwargs)
@property
def data(self):
if not self.data_:
self.data_ = {field.data for field in self._data}
return self.data_
def process_data(self, value):
if not isinstance(value, list):
raise FieldError('invalid list')
self._data = set()
e = FieldError()
for i, val in enumerate(set(value)):
field = self.field.bind()
try:
field.process_data(val)
except FieldError as e_:
e[i] = e_.error
self._data.add(field)
if e:
raise e
super().process_data(value)
class SubForm(Field):
def __init__(self, form, **kwargs):
self.form = form
kwargs.pop('condition', None)
super().__init__(**kwargs)
def process_data(self, value):
try:
self.form.process(jsondata=value)
except FormError as e_:
e = FieldError()
if e_.error:
e['error'] = e_.error
if e_.f_errors:
e['f_errors'] = e_.f_errors
raise e
self._data = {name: self.form[name] for name in self.form.fields}
| [
"datetime.datetime.strptime",
"base64.decodebytes"
]
| [((2728, 2775), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['value', 'self.pattern'], {}), '(value, self.pattern)\n', (2754, 2775), False, 'import datetime\n'), ((4211, 4236), 'base64.decodebytes', 'base64.decodebytes', (['value'], {}), '(value)\n', (4229, 4236), False, 'import base64\n'), ((3094, 3141), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['value', 'self.pattern'], {}), '(value, self.pattern)\n', (3120, 3141), False, 'import datetime\n'), ((3467, 3514), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['value', 'self.pattern'], {}), '(value, self.pattern)\n', (3493, 3514), False, 'import datetime\n')] |
import numpy as np
import cv2
#define a canvas of size 300x300 px, with 3 channels (R,G,B) and data type as 8 bit unsigned integer
canvas = np.zeros((300,300,3), dtype ="uint8")
#define color
#draw a circle
#arguments are canvas/image, midpoint, radius, color, thickness(optional)
#display in cv2 window
green = (0,255,0)
cv2.circle(canvas,(100,100), 10, green)
cv2.imshow("Single circle", canvas)
cv2.waitKey(0)
# draw concentric white circles
# calculate the center point of canvas
# generate circles using for loop
# clearning the canvas
canvas = np.zeros((300,300,3), dtype ="uint8")
white = (255,255,255)
(centerX, centerY) = (canvas.shape[1]//2, canvas.shape[0]//2)
for r in range(0,175,25):
cv2.circle(canvas, (centerX,centerY), r, white)
cv2.imshow("concentric circles", canvas)
cv2.waitKey(0)
# generate random radius, center point, color
# draw circles in for loop
canvas = np.zeros((300,300,3), dtype ="uint8")
for i in range(0, 25):
radius = np.random.randint(5, high = 200)
color = np.random.randint(0, high = 256, size = (3,)).tolist()
pt = np.random.randint(0, high = 300, size = (2,))
cv2.circle(canvas, tuple(pt), radius, color, -1)
cv2.imshow("Canvas", canvas)
cv2.waitKey(0) | [
"cv2.imshow",
"cv2.circle",
"numpy.zeros",
"numpy.random.randint",
"cv2.waitKey"
]
| [((141, 179), 'numpy.zeros', 'np.zeros', (['(300, 300, 3)'], {'dtype': '"""uint8"""'}), "((300, 300, 3), dtype='uint8')\n", (149, 179), True, 'import numpy as np\n'), ((324, 365), 'cv2.circle', 'cv2.circle', (['canvas', '(100, 100)', '(10)', 'green'], {}), '(canvas, (100, 100), 10, green)\n', (334, 365), False, 'import cv2\n'), ((364, 399), 'cv2.imshow', 'cv2.imshow', (['"""Single circle"""', 'canvas'], {}), "('Single circle', canvas)\n", (374, 399), False, 'import cv2\n'), ((400, 414), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (411, 414), False, 'import cv2\n'), ((553, 591), 'numpy.zeros', 'np.zeros', (['(300, 300, 3)'], {'dtype': '"""uint8"""'}), "((300, 300, 3), dtype='uint8')\n", (561, 591), True, 'import numpy as np\n'), ((755, 795), 'cv2.imshow', 'cv2.imshow', (['"""concentric circles"""', 'canvas'], {}), "('concentric circles', canvas)\n", (765, 795), False, 'import cv2\n'), ((796, 810), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (807, 810), False, 'import cv2\n'), ((895, 933), 'numpy.zeros', 'np.zeros', (['(300, 300, 3)'], {'dtype': '"""uint8"""'}), "((300, 300, 3), dtype='uint8')\n", (903, 933), True, 'import numpy as np\n'), ((1177, 1205), 'cv2.imshow', 'cv2.imshow', (['"""Canvas"""', 'canvas'], {}), "('Canvas', canvas)\n", (1187, 1205), False, 'import cv2\n'), ((1206, 1220), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1217, 1220), False, 'import cv2\n'), ((706, 754), 'cv2.circle', 'cv2.circle', (['canvas', '(centerX, centerY)', 'r', 'white'], {}), '(canvas, (centerX, centerY), r, white)\n', (716, 754), False, 'import cv2\n'), ((969, 999), 'numpy.random.randint', 'np.random.randint', (['(5)'], {'high': '(200)'}), '(5, high=200)\n', (986, 999), True, 'import numpy as np\n'), ((1078, 1119), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(300)', 'size': '(2,)'}), '(0, high=300, size=(2,))\n', (1095, 1119), True, 'import numpy as np\n'), ((1014, 1055), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(256)', 'size': '(3,)'}), '(0, high=256, size=(3,))\n', (1031, 1055), True, 'import numpy as np\n')] |
import io
from PIL import Image as PILImage
from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String
from resources.models.ModelBase import Base
class Image(Base):
# If this is used then the image is stored in the database
image = Column(LargeBinary(length=16777215), default=None)
# If this is used then the image is remote and this is the url
url = Column(String(500))
caption = Column(String(100))
# This is a PhotoHash of the image for assistance in deduping
signature = Column(String(50))
artistId = Column(Integer, ForeignKey("artist.id"), index=True)
releaseId = Column(Integer, ForeignKey("release.id"), index=True)
def averageHash(self):
try:
hash_size = 8
# Open the image, resize it and convert it to black & white.
image = PILImage.open(io.BytesIO(self.image)).resize((hash_size, hash_size), PILImage.ANTIALIAS).convert(
'L')
pixels = list(image.getdata())
# Compute the hash based on each pixels value compared to the average.
avg = sum(pixels) / len(pixels)
bits = "".join(map(lambda pixel: '1' if pixel > avg else '0', pixels))
hashformat = "0{hashlength}x".format(hashlength=hash_size ** 2 // 4)
return int(bits, 2).__format__(hashformat)
except:
return None
def __unicode__(self):
return self.caption
def __str__(self):
return self.caption or self.signature
| [
"sqlalchemy.String",
"sqlalchemy.LargeBinary",
"sqlalchemy.ForeignKey",
"io.BytesIO"
]
| [((271, 299), 'sqlalchemy.LargeBinary', 'LargeBinary', ([], {'length': '(16777215)'}), '(length=16777215)\n', (282, 299), False, 'from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String\n'), ((399, 410), 'sqlalchemy.String', 'String', (['(500)'], {}), '(500)\n', (405, 410), False, 'from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String\n'), ((433, 444), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (439, 444), False, 'from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String\n'), ((535, 545), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (541, 545), False, 'from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String\n'), ((579, 602), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""artist.id"""'], {}), "('artist.id')\n", (589, 602), False, 'from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String\n'), ((648, 672), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""release.id"""'], {}), "('release.id')\n", (658, 672), False, 'from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String\n'), ((860, 882), 'io.BytesIO', 'io.BytesIO', (['self.image'], {}), '(self.image)\n', (870, 882), False, 'import io\n')] |
import os
import sys
import argparse
import json
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from arch2vec.models.pretraining_nasbench101 import configs
from arch2vec.utils import load_json, preprocessing, one_hot_darts
from arch2vec.preprocessing.gen_isomorphism_graphs import process
from arch2vec.models.model import Model
from torch.distributions import MultivariateNormal
from arch2vec.darts.cnn.train_search import Train
class Env(object):
def __init__(self, name, seed, cfg, data_path=None, save=False):
self.name = name
self.seed = seed
self.model = Model(input_dim=args.input_dim, hidden_dim=args.hidden_dim, latent_dim=args.dim,
num_hops=args.hops, num_mlp_layers=args.mlps, dropout=args.dropout, **cfg['GAE']).cuda()
self.dir_name = 'pretrained/dim-{}'.format(args.dim)
if not os.path.exists(os.path.join(self.dir_name, 'model-darts.pt')):
exit()
self.model.load_state_dict(torch.load(os.path.join(self.dir_name, 'model-darts.pt').format(args.dim))['model_state'])
self.visited = {}
self.features = []
self.genotype = []
self.embedding = {}
self._reset(data_path, save)
def _reset(self, data_path, save):
if not save:
print("extract arch2vec on DARTS search space ...")
dataset = load_json(data_path)
print("length of the dataset: {}".format(len(dataset)))
self.f_path = os.path.join(self.dir_name, 'arch2vec-darts.pt')
if os.path.exists(self.f_path):
print('{} is already saved'.format(self.f_path))
exit()
print('save to {}'.format(self.f_path))
counter = 0
self.model.eval()
for k, v in dataset.items():
adj = torch.Tensor(v[0]).unsqueeze(0).cuda()
ops = torch.Tensor(one_hot_darts(v[1])).unsqueeze(0).cuda()
adj, ops, prep_reverse = preprocessing(adj, ops, **cfg['prep'])
with torch.no_grad():
x, _ = self.model._encoder(ops, adj)
self.embedding[counter] = {'feature': x.squeeze(0).mean(dim=0).cpu(), 'genotype': process(v[2])}
print("{}/{}".format(counter, len(dataset)))
counter += 1
torch.save(self.embedding, self.f_path)
print("finished arch2vec extraction")
exit()
else:
self.f_path = os.path.join(self.dir_name, 'arch2vec-darts.pt')
print("load arch2vec from: {}".format(self.f_path))
self.embedding = torch.load(self.f_path)
for ind in range(len(self.embedding)):
self.features.append(self.embedding[ind]['feature'])
self.genotype.append(self.embedding[ind]['genotype'])
self.features = torch.stack(self.features, dim=0)
print('loading finished. pretrained embeddings shape: {}'.format(self.features.shape))
def get_init_state(self):
"""
:return: 1 x dim
"""
rand_indices = random.randint(0, self.features.shape[0])
self.visited[rand_indices] = True
return self.features[rand_indices], self.genotype[rand_indices]
def step(self, action):
"""
action: 1 x dim
self.features. N x dim
"""
dist = torch.norm(self.features - action.cpu(), dim=1)
knn = (-1 * dist).topk(dist.shape[0])
min_dist, min_idx = knn.values, knn.indices
count = 0
while True:
if len(self.visited) == dist.shape[0]:
print("CANNOT FIND IN THE DATASET!")
exit()
if min_idx[count].item() not in self.visited:
self.visited[min_idx[count].item()] = True
break
count += 1
return self.features[min_idx[count].item()], self.genotype[min_idx[count].item()]
class Policy(nn.Module):
def __init__(self, hidden_dim1, hidden_dim2):
super(Policy, self).__init__()
self.fc1 = nn.Linear(hidden_dim1, hidden_dim2)
self.fc2 = nn.Linear(hidden_dim2, hidden_dim1)
self.saved_log_probs = []
self.rewards = []
def forward(self, input):
x = F.relu(self.fc1(input))
out = self.fc2(x)
return out
class Policy_LSTM(nn.Module):
def __init__(self, hidden_dim1, hidden_dim2):
super(Policy_LSTM, self).__init__()
self.lstm = torch.nn.LSTMCell(input_size=hidden_dim1, hidden_size=hidden_dim2)
self.fc = nn.Linear(hidden_dim2, hidden_dim1)
self.saved_log_probs = []
self.rewards = []
self.hx = None
self.cx = None
def forward(self, input):
if self.hx is None and self.cx is None:
self.hx, self.cx = self.lstm(input)
else:
self.hx, self.cx = self.lstm(input, (self.hx, self.cx))
mean = self.fc(self.hx)
return mean
def select_action(state, policy):
"""
MVN based action selection.
:param state: 1 x dim
:param policy: policy network
:return: selected action: 1 x dim
"""
mean = policy(state.view(1, state.shape[0]))
mvn = MultivariateNormal(mean, torch.eye(state.shape[0]).cuda())
action = mvn.sample()
policy.saved_log_probs.append(torch.mean(mvn.log_prob(action)))
return action
def finish_episode(policy, optimizer):
R = 0
policy_loss = []
returns = []
for r in policy.rewards:
R = r + args.gamma * R
returns.append(R)
returns = torch.Tensor(policy.rewards)
val, indices = torch.sort(returns)
print("sorted validation reward:", val)
returns = returns - args.objective
for log_prob, R in zip(policy.saved_log_probs, returns):
policy_loss.append(-log_prob * R)
optimizer.zero_grad()
policy_loss = torch.mean(torch.stack(policy_loss, dim=0))
print("average reward: {}, policy loss: {}".format(sum(policy.rewards)/len(policy.rewards), policy_loss.item()))
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
policy.hx = None
policy.cx = None
def query(counter, seed, genotype, epochs):
trainer = Train()
rewards, rewards_test = trainer.main(counter, seed, genotype, epochs=epochs, train_portion=args.train_portion, save=args.logging_path)
val_sum = 0
for epoch, val_acc in rewards:
val_sum += val_acc
val_avg = val_sum / len(rewards)
return val_avg / 100. , rewards_test[-1][-1] / 100.
def reinforce_search(env):
""" implementation of arch2vec-RL on DARTS Search Space """
policy = Policy_LSTM(args.dim, 128).cuda()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
counter = 0
MAX_BUDGET = args.max_budgets
state, genotype = env.get_init_state()
CURR_BEST_VALID = 0
CURR_BEST_TEST = 0
CURR_BEST_GENOTYPE = None
test_trace = []
valid_trace = []
genotype_trace = []
counter_trace = []
while counter < MAX_BUDGET:
for c in range(args.bs):
state = state.cuda()
action = select_action(state, policy)
state, genotype = env.step(action)
reward, reward_test = query(counter=counter, seed=args.seed, genotype=genotype, epochs=args.inner_epochs)
policy.rewards.append(reward)
counter += 1
print('counter: {}, validation reward: {}, test reward: {}, genotype: {}'.format(counter, reward, reward_test, genotype))
if reward > CURR_BEST_VALID:
CURR_BEST_VALID = reward
CURR_BEST_TEST = reward_test
CURR_BEST_GENOTYPE = genotype
valid_trace.append(float(CURR_BEST_VALID))
test_trace.append(float(CURR_BEST_TEST))
genotype_trace.append(CURR_BEST_GENOTYPE)
counter_trace.append(counter)
if counter >= MAX_BUDGET:
break
finish_episode(policy, optimizer)
res = dict()
res['validation_acc'] = valid_trace
res['test_acc'] = test_trace
res['genotype'] = genotype_trace
res['counter'] = counter_trace
save_path = os.path.join(args.output_path, 'dim{}'.format(args.dim))
if not os.path.exists(save_path):
os.mkdir(save_path)
print('save to {}'.format(save_path))
fh = open(os.path.join(save_path, 'run_{}_arch2vec_model_darts.json'.format(args.seed)), 'w')
json.dump(res, fh)
fh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="arch2vec-REINFORCE")
parser.add_argument("--gamma", type=float, default=0.8, help="discount factor (default 0.99)")
parser.add_argument("--seed", type=int, default=3, help="random seed")
parser.add_argument('--cfg', type=int, default=4, help='configuration (default: 4)')
parser.add_argument('--bs', type=int, default=16, help='batch size')
parser.add_argument('--objective', type=float, default=0.95, help='rl baseline')
parser.add_argument('--max_budgets', type=int, default=100, help='number of queries')
parser.add_argument('--inner_epochs', type=int, default=50, help='inner loop epochs')
parser.add_argument('--train_portion', type=float, default=0.9, help='train/validation split portion')
parser.add_argument('--output_path', type=str, default='rl', help='rl/bo (default: rl)')
parser.add_argument('--logging_path', type=str, default='', help='search logging path')
parser.add_argument('--saved_arch2vec', action="store_true", default=False)
parser.add_argument('--input_dim', type=int, default=11)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--dim', type=int, default=16,
help='feature dimension (default: 16)')
parser.add_argument('--hops', type=int, default=5)
parser.add_argument('--mlps', type=int, default=2)
parser.add_argument('--dropout', type=float, default=0.3)
args = parser.parse_args()
cfg = configs[args.cfg]
env = Env('REINFORCE', args.seed, cfg, data_path='data/data_darts_counter600000.json', save=args.saved_arch2vec)
torch.manual_seed(args.seed)
reinforce_search(env)
| [
"os.path.exists",
"argparse.ArgumentParser",
"torch.eye",
"arch2vec.utils.one_hot_darts",
"os.mkdir",
"arch2vec.models.model.Model",
"random.randint",
"torch.sort",
"arch2vec.preprocessing.gen_isomorphism_graphs.process",
"torch.Tensor",
"torch.save",
"arch2vec.utils.load_json",
"torch.manual_seed",
"torch.nn.LSTMCell",
"torch.load",
"torch.stack",
"os.path.join",
"arch2vec.utils.preprocessing",
"torch.nn.Linear",
"arch2vec.darts.cnn.train_search.Train",
"torch.no_grad",
"json.dump"
]
| [((5645, 5673), 'torch.Tensor', 'torch.Tensor', (['policy.rewards'], {}), '(policy.rewards)\n', (5657, 5673), False, 'import torch\n'), ((5693, 5712), 'torch.sort', 'torch.sort', (['returns'], {}), '(returns)\n', (5703, 5712), False, 'import torch\n'), ((6314, 6321), 'arch2vec.darts.cnn.train_search.Train', 'Train', ([], {}), '()\n', (6319, 6321), False, 'from arch2vec.darts.cnn.train_search import Train\n'), ((8531, 8549), 'json.dump', 'json.dump', (['res', 'fh'], {}), '(res, fh)\n', (8540, 8549), False, 'import json\n'), ((8607, 8664), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""arch2vec-REINFORCE"""'}), "(description='arch2vec-REINFORCE')\n", (8630, 8664), False, 'import argparse\n'), ((10233, 10261), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (10250, 10261), False, 'import torch\n'), ((3167, 3208), 'random.randint', 'random.randint', (['(0)', 'self.features.shape[0]'], {}), '(0, self.features.shape[0])\n', (3181, 3208), False, 'import random\n'), ((4145, 4180), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim1', 'hidden_dim2'], {}), '(hidden_dim1, hidden_dim2)\n', (4154, 4180), True, 'import torch.nn as nn\n'), ((4200, 4235), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim2', 'hidden_dim1'], {}), '(hidden_dim2, hidden_dim1)\n', (4209, 4235), True, 'import torch.nn as nn\n'), ((4554, 4620), 'torch.nn.LSTMCell', 'torch.nn.LSTMCell', ([], {'input_size': 'hidden_dim1', 'hidden_size': 'hidden_dim2'}), '(input_size=hidden_dim1, hidden_size=hidden_dim2)\n', (4571, 4620), False, 'import torch\n'), ((4639, 4674), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim2', 'hidden_dim1'], {}), '(hidden_dim2, hidden_dim1)\n', (4648, 4674), True, 'import torch.nn as nn\n'), ((5955, 5986), 'torch.stack', 'torch.stack', (['policy_loss'], {'dim': '(0)'}), '(policy_loss, dim=0)\n', (5966, 5986), False, 'import torch\n'), ((8332, 8357), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (8346, 8357), False, 'import os\n'), ((8367, 8386), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (8375, 8386), False, 'import os\n'), ((1423, 1443), 'arch2vec.utils.load_json', 'load_json', (['data_path'], {}), '(data_path)\n', (1432, 1443), False, 'from arch2vec.utils import load_json, preprocessing, one_hot_darts\n'), ((1538, 1586), 'os.path.join', 'os.path.join', (['self.dir_name', '"""arch2vec-darts.pt"""'], {}), "(self.dir_name, 'arch2vec-darts.pt')\n", (1550, 1586), False, 'import os\n'), ((1602, 1629), 'os.path.exists', 'os.path.exists', (['self.f_path'], {}), '(self.f_path)\n', (1616, 1629), False, 'import os\n'), ((2397, 2436), 'torch.save', 'torch.save', (['self.embedding', 'self.f_path'], {}), '(self.embedding, self.f_path)\n', (2407, 2436), False, 'import torch\n'), ((2546, 2594), 'os.path.join', 'os.path.join', (['self.dir_name', '"""arch2vec-darts.pt"""'], {}), "(self.dir_name, 'arch2vec-darts.pt')\n", (2558, 2594), False, 'import os\n'), ((2688, 2711), 'torch.load', 'torch.load', (['self.f_path'], {}), '(self.f_path)\n', (2698, 2711), False, 'import torch\n'), ((2930, 2963), 'torch.stack', 'torch.stack', (['self.features'], {'dim': '(0)'}), '(self.features, dim=0)\n', (2941, 2963), False, 'import torch\n'), ((654, 826), 'arch2vec.models.model.Model', 'Model', ([], {'input_dim': 'args.input_dim', 'hidden_dim': 'args.hidden_dim', 'latent_dim': 'args.dim', 'num_hops': 'args.hops', 'num_mlp_layers': 'args.mlps', 'dropout': 'args.dropout'}), "(input_dim=args.input_dim, hidden_dim=args.hidden_dim, latent_dim=args\n .dim, num_hops=args.hops, num_mlp_layers=args.mlps, dropout=args.\n dropout, **cfg['GAE'])\n", (659, 826), False, 'from arch2vec.models.model import Model\n'), ((938, 983), 'os.path.join', 'os.path.join', (['self.dir_name', '"""model-darts.pt"""'], {}), "(self.dir_name, 'model-darts.pt')\n", (950, 983), False, 'import os\n'), ((2044, 2082), 'arch2vec.utils.preprocessing', 'preprocessing', (['adj', 'ops'], {}), "(adj, ops, **cfg['prep'])\n", (2057, 2082), False, 'from arch2vec.utils import load_json, preprocessing, one_hot_darts\n'), ((5310, 5335), 'torch.eye', 'torch.eye', (['state.shape[0]'], {}), '(state.shape[0])\n', (5319, 5335), False, 'import torch\n'), ((2104, 2119), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2117, 2119), False, 'import torch\n'), ((2280, 2293), 'arch2vec.preprocessing.gen_isomorphism_graphs.process', 'process', (['v[2]'], {}), '(v[2])\n', (2287, 2293), False, 'from arch2vec.preprocessing.gen_isomorphism_graphs import process\n'), ((1051, 1096), 'os.path.join', 'os.path.join', (['self.dir_name', '"""model-darts.pt"""'], {}), "(self.dir_name, 'model-darts.pt')\n", (1063, 1096), False, 'import os\n'), ((1888, 1906), 'torch.Tensor', 'torch.Tensor', (['v[0]'], {}), '(v[0])\n', (1900, 1906), False, 'import torch\n'), ((1962, 1981), 'arch2vec.utils.one_hot_darts', 'one_hot_darts', (['v[1]'], {}), '(v[1])\n', (1975, 1981), False, 'from arch2vec.utils import load_json, preprocessing, one_hot_darts\n')] |
"""Test AdaNet estimator single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
from absl import logging
from absl.testing import parameterized
from adanet import replay
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.estimator import Estimator
from adanet.core.evaluator import Evaluator
from adanet.core.report_materializer import ReportMaterializer
from adanet.distributed.placement import RoundRobinStrategy
from adanet.ensemble import AllStrategy
from adanet.ensemble import ComplexityRegularizedEnsembler
from adanet.ensemble import GrowStrategy
from adanet.ensemble import MixtureWeightType
from adanet.ensemble import SoloStrategy
from adanet.subnetwork import Builder
from adanet.subnetwork import Generator
from adanet.subnetwork import MaterializedReport
from adanet.subnetwork import Report
from adanet.subnetwork import SimpleGenerator
from adanet.subnetwork import Subnetwork
from adanet.subnetwork import TrainOpSpec
import numpy as np
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.tools import saved_model_utils
# pylint: enable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator.canned.head import _binary_logistic_head_with_sigmoid_cross_entropy_loss as binary_class_head_v1
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.head import binary_class_head
from tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib
from tensorflow_estimator.python.estimator.head import regression_head
logging.set_verbosity(logging.INFO)
XOR_FEATURES = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
XOR_LABELS = [[1.], [0.], [1.], [0.]]
class _DNNBuilder(Builder):
"""A simple DNN subnetwork builder."""
def __init__(self,
name,
learning_rate=.001,
mixture_weight_learning_rate=.001,
return_penultimate_layer=True,
layer_size=1,
subnetwork_chief_hooks=None,
subnetwork_hooks=None,
mixture_weight_chief_hooks=None,
mixture_weight_hooks=None,
seed=13):
self._name = name
self._learning_rate = learning_rate
self._mixture_weight_learning_rate = mixture_weight_learning_rate
self._return_penultimate_layer = return_penultimate_layer
self._layer_size = layer_size
self._subnetwork_chief_hooks = subnetwork_chief_hooks
self._subnetwork_hooks = subnetwork_hooks
self._mixture_weight_chief_hooks = mixture_weight_chief_hooks
self._mixture_weight_hooks = mixture_weight_hooks
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("dnn"):
persisted_tensors = {}
with tf_compat.v1.variable_scope("hidden_layer"):
w = tf_compat.v1.get_variable(
shape=[2, self._layer_size],
initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed),
name="weight")
disjoint_op = tf.constant([1], name="disjoint_op")
with tf_compat.v1.colocate_with(disjoint_op): # tests b/118865235
hidden_layer = tf.matmul(features["x"], w)
if previous_ensemble:
other_hidden_layer = previous_ensemble.weighted_subnetworks[
-1].subnetwork.persisted_tensors["hidden_layer"]
hidden_layer = tf.concat([hidden_layer, other_hidden_layer], axis=1)
# Use a leaky-relu activation so that gradients can flow even when
# outputs are negative. Leaky relu has a non-zero slope when x < 0.
# Otherwise success at learning is completely dependent on random seed.
hidden_layer = tf.nn.leaky_relu(hidden_layer, alpha=.2)
persisted_tensors["hidden_layer"] = hidden_layer
if training:
# This change will only be in the next iteration if
# `freeze_training_graph` is `True`.
persisted_tensors["hidden_layer"] = 2 * hidden_layer
last_layer = hidden_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
hidden_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
summary.scalar("scalar", 3)
batch_size = features["x"].get_shape().as_list()[0]
summary.image("image", tf.ones([batch_size, 3, 3, 1]))
with tf_compat.v1.variable_scope("nested"):
summary.scalar("scalar", 5)
return Subnetwork(
last_layer=last_layer if self._return_penultimate_layer else logits,
logits=logits,
complexity=3,
persisted_tensors=persisted_tensors,
shared=persisted_tensors)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._learning_rate)
train_op = optimizer.minimize(loss, var_list=var_list)
if not self._subnetwork_hooks:
return train_op
return TrainOpSpec(train_op, self._subnetwork_chief_hooks,
self._subnetwork_hooks)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._mixture_weight_learning_rate)
train_op = optimizer.minimize(loss, var_list=var_list)
if not self._mixture_weight_hooks:
return train_op
return TrainOpSpec(train_op, self._mixture_weight_chief_hooks,
self._mixture_weight_hooks)
def build_subnetwork_report(self):
return Report(
hparams={"layer_size": self._layer_size},
attributes={"complexity": tf.constant(3, dtype=tf.int32)},
metrics={
"moo": (tf.constant(3,
dtype=tf.int32), tf.constant(3, dtype=tf.int32))
})
class _SimpleBuilder(Builder):
"""A simple subnetwork builder that takes feature_columns."""
def __init__(self, name, feature_columns, seed=42):
self._name = name
self._feature_columns = feature_columns
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("simple"):
input_layer = tf_compat.v1.feature_column.input_layer(
features=features, feature_columns=self._feature_columns)
last_layer = input_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
last_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
return Subnetwork(
last_layer=last_layer,
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
class _NanLossBuilder(Builder):
"""A subnetwork builder always produces a NaN loss."""
@property
def name(self):
return "nan"
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=42)) * np.nan
return Subnetwork(last_layer=logits, logits=logits, complexity=0)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
return tf.no_op()
class _LinearBuilder(Builder):
"""A simple linear subnetwork builder."""
def __init__(self, name, mixture_weight_learning_rate=.001, seed=42):
self._name = name
self._mixture_weight_learning_rate = mixture_weight_learning_rate
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=self._seed))
return Subnetwork(
last_layer=features["x"],
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._mixture_weight_learning_rate)
return optimizer.minimize(loss, var_list=var_list)
class _FakeGenerator(Generator):
"""Generator that exposed generate_candidates' arguments."""
def __init__(self, spy_fn, subnetwork_builders):
"""Checks the arguments passed to generate_candidates.
Args:
spy_fn: (iteration_number, previous_ensemble_reports, all_reports) -> ().
Spies on the arguments passed to generate_candidates whenever it is
called.
subnetwork_builders: List of `Builder`s to return in every call to
generate_candidates.
"""
self._spy_fn = spy_fn
self._subnetwork_builders = subnetwork_builders
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
"""Spys on arguments passed in, then returns a fixed list of candidates."""
del previous_ensemble # unused
self._spy_fn(iteration_number, previous_ensemble_reports, all_reports)
return self._subnetwork_builders
class _WidthLimitingDNNBuilder(_DNNBuilder):
"""Limits the width of the previous_ensemble."""
def __init__(self,
name,
learning_rate=.001,
mixture_weight_learning_rate=.001,
return_penultimate_layer=True,
layer_size=1,
width_limit=None,
seed=13):
if width_limit is not None and width_limit == 0:
raise ValueError("width_limit must be at least 1 or None.")
super(_WidthLimitingDNNBuilder,
self).__init__(name, learning_rate, mixture_weight_learning_rate,
return_penultimate_layer, layer_size, seed)
self._width_limit = width_limit
def prune_previous_ensemble(self, previous_ensemble):
indices = range(len(previous_ensemble.weighted_subnetworks))
if self._width_limit is None:
return indices
if self._width_limit == 1:
return []
return indices[-self._width_limit + 1:] # pylint: disable=invalid-unary-operand-type
class _FakeEvaluator(object):
"""Fakes an `adanet.Evaluator`."""
def __init__(self, input_fn):
self._input_fn = input_fn
@property
def input_fn(self):
"""Return the input_fn."""
return self._input_fn
@property
def steps(self):
"""Return the number of evaluation steps."""
return 1
@property
def metric_name(self):
"""Returns the name of the metric being optimized."""
return "adanet_loss"
@property
def objective_fn(self):
"""Always returns the minimize objective."""
return np.nanargmin
def evaluate(self, sess, ensemble_metrics):
"""Abstract method to be overridden in subclasses."""
del sess, ensemble_metrics # Unused.
raise NotImplementedError
class _AlwaysLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-1] = 0.
return losses
class _AlwaysSecondToLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the second to last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-2] = 0.
return losses
class _EarlyStoppingHook(tf_compat.SessionRunHook):
"""Hook that immediately requests training to stop."""
def after_run(self, run_context, run_values):
run_context.request_stop()
class EstimatorTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "one_step",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 1,
"max_steps": None,
"want_loss": 0.49899703,
"want_iteration": 0,
"want_global_step": 1,
},
{
"testcase_name": "none_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": None,
"steps": 300,
"max_steps": None,
"want_loss": 0.32487726,
"want_iteration": 0,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"max_steps": 300,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"steps": 300,
"max_steps": None,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_two_max_iteration_fewer_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"max_iterations": 2,
"max_steps": 300,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_no_bias",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"use_bias": False,
"want_loss": 0.496736,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name":
"single_builder_subnetwork_hooks",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder(
"dnn",
subnetwork_chief_hooks=[
tu.ModifierSessionRunHook("chief_hook_var")
],
subnetwork_hooks=[tu.ModifierSessionRunHook("hook_var")])
]),
"max_iteration_steps":
200,
"use_bias":
False,
"want_loss":
0.496736,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_mixture_weight_hooks",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder(
"dnn",
mixture_weight_chief_hooks=[
tu.ModifierSessionRunHook("chief_hook_var")
],
mixture_weight_hooks=[
tu.ModifierSessionRunHook("hook_var")
])
]),
"max_iteration_steps":
200,
"use_bias":
False,
"want_loss":
0.496736,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_scalar_mixture_weight",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn", return_penultimate_layer=False)]),
"max_iteration_steps":
200,
"mixture_weight_type":
MixtureWeightType.SCALAR,
"want_loss":
0.32317898,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_vector_mixture_weight",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn", return_penultimate_layer=False)]),
"max_iteration_steps":
200,
"mixture_weight_type":
MixtureWeightType.VECTOR,
"want_loss":
0.32317898,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name": "single_builder_replicate_ensemble_in_training",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"replicate_ensemble_in_training": True,
"max_iteration_steps": 200,
"max_steps": 300,
"want_loss": 0.32420215,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_with_hook",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"hooks": [tu.ModifierSessionRunHook()],
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "high_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 500,
"want_loss": 0.32487726,
"want_iteration": 0,
"want_global_step": 300,
},
{
"testcase_name":
"two_builders",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", seed=99)]),
"max_iteration_steps":
200,
"want_loss":
0.27713922,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"two_builders_different_layer_sizes",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"two_builders_one_max_iteration_none_steps_and_none_max_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
1,
"steps":
None,
"max_steps":
None,
"want_loss":
0.35249719,
"want_iteration":
0,
"want_global_step":
200,
},
{
"testcase_name":
"two_builders_one_max_iteration_two_hundred_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
1,
"steps":
300,
"max_steps":
None,
"want_loss":
0.35249719,
"want_iteration":
0,
"want_global_step":
200,
},
{
"testcase_name":
"two_builders_two_max_iteration_none_steps_and_none_max_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
2,
"steps":
None,
"max_steps":
None,
"want_loss":
0.26503286,
"want_iteration":
1,
"want_global_step":
400,
},
{
"testcase_name":
"two_builders_different_layer_sizes_three_iterations",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
100,
"want_loss":
0.26433355,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"two_dnn_export_subnetworks",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
100,
"want_loss":
0.26433355,
"want_iteration":
2,
"want_global_step":
300,
"export_subnetworks":
True,
},
{
"testcase_name":
"width_limiting_builder_no_pruning",
"subnetwork_generator":
SimpleGenerator([_WidthLimitingDNNBuilder("no_pruning")]),
"max_iteration_steps":
75,
"want_loss":
0.32001898,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_some_pruning",
"subnetwork_generator":
SimpleGenerator(
[_WidthLimitingDNNBuilder("some_pruning", width_limit=2)]),
"max_iteration_steps":
75,
"want_loss":
0.38592532,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_prune_all",
"subnetwork_generator":
SimpleGenerator(
[_WidthLimitingDNNBuilder("prune_all", width_limit=1)]),
"max_iteration_steps":
75,
"want_loss":
0.43492866,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_mixed",
"subnetwork_generator":
SimpleGenerator([
_WidthLimitingDNNBuilder("no_pruning"),
_WidthLimitingDNNBuilder("some_pruning", width_limit=2),
_WidthLimitingDNNBuilder("prune_all", width_limit=1)
]),
"max_iteration_steps":
75,
"want_loss":
0.32001898,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_good_input",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=3),
"max_iteration_steps":
200,
"want_loss":
0.36189985,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_bad_input",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[1.]]), steps=3),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_always_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
]),
"evaluator":
_AlwaysLastEvaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]])),
"max_iteration_steps":
None,
"want_loss":
0.31389591,
"want_iteration":
0,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_always_second_to_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
]),
"evaluator":
_AlwaysSecondToLastEvaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]])),
"max_iteration_steps":
None,
"want_loss":
0.32487726,
"want_iteration":
0,
"want_global_step":
300,
},
{
"testcase_name":
"report_materializer",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"report_materializer":
ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"all_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [AllStrategy()],
"max_iteration_steps":
200,
"want_loss":
0.29196805,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"all_strategy_multiple_ensemblers",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [AllStrategy()],
"ensemblers": [
ComplexityRegularizedEnsembler(),
ComplexityRegularizedEnsembler(use_bias=True, name="with_bias")
],
"max_iteration_steps":
200,
"want_loss":
0.23053232,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"solo_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [SoloStrategy()],
"max_iteration_steps":
200,
"want_loss":
0.35249719,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"solo_strategy_three_iterations",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [SoloStrategy()],
"max_iteration_steps":
100,
"want_loss":
0.36163166,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"multi_ensemble_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies":
[AllStrategy(), GrowStrategy(),
SoloStrategy()],
"max_iteration_steps":
100,
"want_loss":
0.24838975,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"dataset_train_input_fn",
"subnetwork_generator":
SimpleGenerator([_DNNBuilder("dnn")]),
# pylint: disable=g-long-lambda
"train_input_fn":
lambda: tf.data.Dataset.from_tensors(({
"x": XOR_FEATURES
}, XOR_LABELS)).repeat(),
# pylint: enable=g-long-lambda
"max_iteration_steps":
100,
"want_loss":
0.32219219,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"early_stopping_subnetwork",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", subnetwork_hooks=[_EarlyStoppingHook()])
]),
"max_iteration_steps":
100,
"max_steps":
200,
"want_loss":
0.2958503,
# Since one subnetwork stops after 1 step and global step is the
# mean of iteration steps, global step will be incremented at half
# the rate.
"want_iteration":
3,
"want_global_step":
200,
})
def test_lifecycle(self,
subnetwork_generator,
want_loss,
want_iteration,
want_global_step,
max_iteration_steps,
mixture_weight_type=MixtureWeightType.MATRIX,
evaluator=None,
use_bias=True,
replicate_ensemble_in_training=False,
hooks=None,
ensemblers=None,
ensemble_strategies=None,
max_steps=300,
steps=None,
report_materializer=None,
train_input_fn=None,
max_iterations=None,
export_subnetworks=False):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
def _metric_fn(predictions):
mean = tf.keras.metrics.Mean()
mean.update_state(predictions["predictions"])
return {"keras_mean": mean}
default_ensembler_kwargs = {
"mixture_weight_type": mixture_weight_type,
"mixture_weight_initializer": tf_compat.v1.zeros_initializer(),
"warm_start_mixture_weights": True,
"use_bias": use_bias,
}
if ensemblers:
default_ensembler_kwargs = {}
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
evaluator=evaluator,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
report_materializer=report_materializer,
replicate_ensemble_in_training=replicate_ensemble_in_training,
metric_fn=_metric_fn,
model_dir=self.test_subdirectory,
config=run_config,
max_iterations=max_iterations,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks,
**default_ensembler_kwargs)
if not train_input_fn:
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train.
estimator.train(
input_fn=train_input_fn, steps=steps, max_steps=max_steps, hooks=hooks)
# Evaluate.
eval_results = estimator.evaluate(
input_fn=train_input_fn, steps=10, hooks=hooks)
logging.info("%s", eval_results)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
self.assertEqual(want_global_step, eval_results["global_step"])
self.assertEqual(want_iteration, eval_results["iteration"])
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if export_subnetworks:
saved_model = saved_model_utils.read_saved_model(
os.path.join(export_dir_base,
tf.io.gfile.listdir(export_dir_base)[0]))
export_signature_def = saved_model.meta_graphs[0].signature_def
self.assertIn("subnetwork_logits", export_signature_def.keys())
self.assertIn("subnetwork_last_layer", export_signature_def.keys())
@parameterized.named_parameters(
{
"testcase_name":
"hash_bucket_with_one_hot",
"feature_column": (tf.feature_column.indicator_column(
categorical_column=(
tf.feature_column.categorical_column_with_hash_bucket(
key="human_names", hash_bucket_size=4, dtype=tf.string)))
),
}, {
"testcase_name":
"vocab_list_with_one_hot",
"feature_column": (tf.feature_column.indicator_column(
categorical_column=(
tf.feature_column.categorical_column_with_vocabulary_list(
key="human_names",
vocabulary_list=["alice", "bob"],
dtype=tf.string)))),
}, {
"testcase_name":
"hash_bucket_with_embedding",
"feature_column": (tf.feature_column.embedding_column(
categorical_column=(
tf.feature_column.categorical_column_with_hash_bucket(
key="human_names", hash_bucket_size=4, dtype=tf.string)),
dimension=2)),
}, {
"testcase_name":
"vocab_list_with_embedding",
"feature_column": (tf.feature_column.embedding_column(
categorical_column=(
tf.feature_column.categorical_column_with_vocabulary_list(
key="human_names",
vocabulary_list=["alice", "bob"],
dtype=tf.string)),
dimension=2)),
})
def test_categorical_columns(self, feature_column):
def train_input_fn():
input_features = {
"human_names": tf.constant([["alice"], ["bob"]], name="human_names")
}
input_labels = tf.constant([[1.], [0.]], name="starts_with_a")
return input_features, input_labels
report_materializer = ReportMaterializer(input_fn=train_input_fn, steps=1)
estimator = Estimator(
head=regression_head.RegressionHead(),
subnetwork_generator=SimpleGenerator(
[_SimpleBuilder(name="simple", feature_columns=[feature_column])]),
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory)
estimator.train(input_fn=train_input_fn, max_steps=3)
@parameterized.named_parameters(
{
"testcase_name": "no_subnetwork_generator",
"subnetwork_generator": None,
"max_iteration_steps": 100,
"want_error": ValueError,
},
{
"testcase_name": "negative_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": -1,
"want_error": ValueError,
},
{
"testcase_name": "zero_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 0,
"want_error": ValueError,
},
{
"testcase_name": "negative_max_iterations",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"max_iterations": -1,
"want_error": ValueError,
},
{
"testcase_name": "zero_max_iterations",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"max_iterations": 0,
"want_error": ValueError,
},
{
"testcase_name": "steps_and_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 1,
"max_steps": 1,
"want_error": ValueError,
},
{
"testcase_name": "zero_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 0,
"max_steps": None,
"want_error": ValueError,
},
{
"testcase_name": "nan_loss_builder",
"subnetwork_generator": SimpleGenerator([_NanLossBuilder()]),
"max_iteration_steps": 1,
"max_steps": None,
"want_error": tf_compat.v1.estimator.NanLossDuringTrainingError,
},
{
"testcase_name":
"nan_loss_builder_first",
"subnetwork_generator":
SimpleGenerator([
_NanLossBuilder(),
_DNNBuilder("dnn"),
]),
"max_iteration_steps":
1,
"max_steps":
None,
"want_error":
tf_compat.v1.estimator.NanLossDuringTrainingError,
},
{
"testcase_name":
"nan_loss_builder_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
]),
"max_iteration_steps":
1,
"max_steps":
None,
"want_error":
tf_compat.v1.estimator.NanLossDuringTrainingError,
},
)
def test_train_error(self,
subnetwork_generator,
max_iteration_steps,
want_error,
steps=None,
max_steps=10,
max_iterations=None):
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
with self.assertRaises(want_error):
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
max_iterations=max_iterations,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, steps=steps, max_steps=max_steps)
def test_binary_head_asserts_are_disabled(self):
"""Tests b/140267630."""
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
])
estimator = Estimator(
head=binary_class_head_v1(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
model_dir=self.test_subdirectory)
eval_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.evaluate(input_fn=eval_input_fn, steps=1)
class KerasCNNBuilder(Builder):
"""Builds a CNN subnetwork for AdaNet."""
def __init__(self, learning_rate, seed=42):
"""Initializes a `SimpleCNNBuilder`.
Args:
learning_rate: The float learning rate to use.
seed: The random seed.
Returns:
An instance of `SimpleCNNBuilder`.
"""
self._learning_rate = learning_rate
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
images = list(features.values())[0]
images = tf.reshape(images, [-1, 2, 2, 1])
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = tf.keras.layers.Conv2D(
filters=3,
kernel_size=1,
padding="same",
activation="relu",
kernel_initializer=kernel_initializer)(
images)
x = tf.keras.layers.MaxPool2D(pool_size=2, strides=1)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
units=3, activation="relu", kernel_initializer=kernel_initializer)(
x)
logits = tf_compat.v1.layers.Dense(
units=1, activation=None, kernel_initializer=kernel_initializer)(
x)
complexity = tf.constant(1)
return Subnetwork(
last_layer=x,
logits=logits,
complexity=complexity,
persisted_tensors={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
return tf.no_op()
@property
def name(self):
return "simple_cnn"
class EstimatorKerasLayersTest(tu.AdanetTestCase):
def test_lifecycle(self):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=SimpleGenerator(
[KerasCNNBuilder(learning_rate=.001)]),
max_iteration_steps=3,
evaluator=Evaluator(
input_fn=tu.dummy_input_fn([[1., 1., .1, .1]], [[0.]]), steps=3),
model_dir=self.test_subdirectory,
config=run_config)
xor_features = [[1., 0., 1., 0.], [0., 0., 0., 0.], [0., 1., 0., 1.],
[1., 1., 1., 1.]]
xor_labels = [[1.], [0.], [1.], [0.]]
train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=9)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=3)
logging.info("%s", eval_results)
want_loss = 0.16915826
if tf_compat.version_greater_or_equal("1.10.0"):
# After TF v1.10.0 the loss computed from a neural network using Keras
# layers changed, however it is not clear why.
want_loss = 0.26195815
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0., 0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0., 0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_saved_model_fn(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
class MultiHeadBuilder(Builder):
"""Builds a subnetwork for AdaNet that uses dict labels."""
def __init__(self, learning_rate=.001, split_logits=False, seed=42):
"""Initializes a `LabelsDictBuilder`.
Args:
learning_rate: The float learning rate to use.
split_logits: Whether to return a dict of logits or a single concatenated
logits `Tensor`.
seed: The random seed.
Returns:
An instance of `MultiHeadBuilder`.
"""
self._learning_rate = learning_rate
self._split_logits = split_logits
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = features["x"]
logits = tf_compat.v1.layers.dense(
x,
units=logits_dimension,
activation=None,
kernel_initializer=kernel_initializer)
if self._split_logits:
# Return different logits, one for each head.
logits1, logits2 = tf.split(logits, [1, 1], 1)
logits = {
"head1": logits1,
"head2": logits2,
}
complexity = tf.constant(1)
return Subnetwork(
last_layer=logits,
logits=logits,
complexity=complexity,
persisted_tensors={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
return "multi_head"
class EstimatorMultiHeadTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "concatenated_logits",
"builders": [MultiHeadBuilder()],
"want_loss": 3.218,
}, {
"testcase_name": "split_logits_with_export_subnetworks",
"builders": [MultiHeadBuilder(split_logits=True)],
"want_loss": 3.224,
"export_subnetworks": True,
}, {
"testcase_name": "split_logits",
"builders": [MultiHeadBuilder(split_logits=True)],
"want_loss": 3.224,
})
def test_lifecycle(self, builders, want_loss, export_subnetworks=False):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
xor_features = [[1., 0., 1., 0.], [0., 0., 0., 0.], [0., 1., 0., 1.],
[1., 1., 1., 1.]]
xor_labels = [[1.], [0.], [1.], [0.]]
def train_input_fn():
return {
"x": tf.constant(xor_features)
}, {
"head1": tf.constant(xor_labels),
"head2": tf.constant(xor_labels)
}
estimator = Estimator(
head=multi_head_lib.MultiHead(heads=[
regression_head.RegressionHead(
name="head1", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
regression_head.RegressionHead(
name="head2", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
]),
subnetwork_generator=SimpleGenerator(builders),
max_iteration_steps=3,
evaluator=Evaluator(input_fn=train_input_fn, steps=1),
model_dir=self.test_subdirectory,
config=run_config,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=9)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=3)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0., 0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction[("head1", "predictions")])
self.assertIsNotNone(prediction[("head2", "predictions")])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0., 0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if export_subnetworks:
saved_model = saved_model_utils.read_saved_model(
os.path.join(export_dir_base,
tf.io.gfile.listdir(export_dir_base)[0]))
export_signature_def = saved_model.meta_graphs[0].signature_def
self.assertIn("subnetwork_logits_head1", export_signature_def.keys())
self.assertIn("subnetwork_logits_head2", export_signature_def.keys())
self.assertIn("subnetwork_last_layer_head1", export_signature_def.keys())
self.assertIn("subnetwork_last_layer_head2", export_signature_def.keys())
class EstimatorCallingModelFnDirectlyTest(tu.AdanetTestCase):
"""Tests b/112108745. Warn users not to call model_fn directly."""
def test_calling_model_fn_directly(self):
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=3,
use_bias=True,
model_dir=self.test_subdirectory)
model_fn = estimator.model_fn
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
tf_compat.v1.train.create_global_step()
features, labels = train_input_fn()
with self.assertRaises(UserWarning):
model_fn(
features=features,
mode=tf.estimator.ModeKeys.TRAIN,
labels=labels,
config={})
def test_calling_model_fn_directly_for_predict(self):
with context.graph_mode():
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=3,
use_bias=True,
model_dir=self.test_subdirectory)
model_fn = estimator.model_fn
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
tf_compat.v1.train.create_global_step()
features, labels = train_input_fn()
model_fn(
features=features,
mode=tf.estimator.ModeKeys.PREDICT,
labels=labels,
config=tf.estimator.RunConfig(
save_checkpoints_steps=1,
keep_checkpoint_max=3,
model_dir=self.test_subdirectory,
))
class EstimatorCheckpointTest(tu.AdanetTestCase):
"""Tests estimator checkpoints."""
@parameterized.named_parameters(
{
"testcase_name": "single_iteration",
"max_iteration_steps": 3,
"keep_checkpoint_max": 3,
"want_num_checkpoints": 3,
}, {
"testcase_name": "single_iteration_keep_one",
"max_iteration_steps": 3,
"keep_checkpoint_max": 1,
"want_num_checkpoints": 1,
}, {
"testcase_name": "three_iterations",
"max_iteration_steps": 1,
"keep_checkpoint_max": 3,
"want_num_checkpoints": 3,
}, {
"testcase_name": "three_iterations_keep_one",
"max_iteration_steps": 1,
"keep_checkpoint_max": 1,
"want_num_checkpoints": 1,
})
def test_checkpoints(self,
max_iteration_steps,
keep_checkpoint_max,
want_num_checkpoints,
max_steps=3):
config = tf.estimator.RunConfig(
save_checkpoints_steps=1,
keep_checkpoint_max=keep_checkpoint_max,
)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
config=config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
checkpoints = tf.io.gfile.glob(
os.path.join(self.test_subdirectory, "*.meta"))
self.assertEqual(want_num_checkpoints, len(checkpoints))
def _check_eventfile_for_keyword(keyword, dir_):
"""Checks event files for the keyword."""
tf_compat.v1.summary.FileWriterCache.clear()
if not tf.io.gfile.exists(dir_):
raise ValueError("Directory '{}' not found.".format(dir_))
# Get last `Event` written.
filenames = os.path.join(dir_, "events*")
event_paths = tf.io.gfile.glob(filenames)
if not event_paths:
raise ValueError("Path '{}' not found.".format(filenames))
for last_event in tf_compat.v1.train.summary_iterator(event_paths[-1]):
if last_event.summary is not None:
for value in last_event.summary.value:
if keyword == value.tag:
if value.HasField("simple_value"):
return value.simple_value
if value.HasField("image"):
return (value.image.height, value.image.width,
value.image.colorspace)
if value.HasField("tensor"):
return value.tensor.string_val
raise ValueError("Keyword '{}' not found in path '{}'.".format(
keyword, filenames))
class _FakeMetric(object):
"""A fake metric."""
def __init__(self, value, dtype):
self._value = value
self._dtype = dtype
def to_metric(self):
tensor = tf.convert_to_tensor(value=self._value, dtype=self._dtype)
return (tensor, tensor)
class _EvalMetricsHead(object):
"""A fake head with the given evaluation metrics."""
def __init__(self, fake_metrics):
self._fake_metrics = fake_metrics
@property
def logits_dimension(self):
return 1
def create_estimator_spec(self,
features,
mode,
logits,
labels=None,
train_op_fn=None):
del features # Unused
metric_ops = None
if self._fake_metrics:
metric_ops = {}
for k, fake_metric in self._fake_metrics.items():
metric_ops[k] = fake_metric.to_metric()
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=logits,
loss=tf.reduce_mean(input_tensor=labels - logits),
eval_metric_ops=metric_ops,
train_op=train_op_fn(1))
def _mean_keras_metric(value):
"""Returns the mean of given value as a Keras metric."""
mean = tf.keras.metrics.Mean()
mean.update_state(value)
return mean
class EstimatorSummaryWriterTest(tu.AdanetTestCase):
"""Test that Tensorboard summaries get written correctly."""
@tf_compat.skip_for_tf2
def test_summaries(self):
"""Tests that summaries are written to candidate directory."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", mixture_weight_learning_rate=.001)])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=run_config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
_check_eventfile_for_keyword("global_step/sec", self.test_subdirectory))
self.assertEqual(
0.,
_check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
self.assertAlmostEqual(
3., _check_eventfile_for_keyword("scalar", subnetwork_subdir), places=3)
self.assertEqual((3, 3, 1),
_check_eventfile_for_keyword("image/image/0",
subnetwork_subdir))
self.assertAlmostEqual(
5.,
_check_eventfile_for_keyword("nested/scalar", subnetwork_subdir),
places=3)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir),
places=3)
self.assertAlmostEqual(
0.,
_check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir),
places=3)
self.assertAlmostEqual(
0.,
_check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir),
places=3)
@tf_compat.skip_for_tf2
def test_disable_summaries(self):
"""Tests that summaries can be disabled for ensembles and subnetworks."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", mixture_weight_learning_rate=.001)])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=run_config,
model_dir=self.test_subdirectory,
enable_ensemble_summaries=False,
enable_subnetwork_summaries=False,
)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
_check_eventfile_for_keyword("global_step/sec", self.test_subdirectory))
self.assertEqual(
0.,
_check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("scalar", subnetwork_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("image/image/0", subnetwork_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("nested/scalar", subnetwork_subdir)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name": "none_metrics",
"head": _EvalMetricsHead(None),
"want_summaries": [],
"want_loss": -1.791,
}, {
"testcase_name":
"metrics_fn",
"head":
_EvalMetricsHead(None),
"metric_fn":
lambda predictions: {
"avg": tf_compat.v1.metrics.mean(predictions)
},
"want_summaries": ["avg"],
"want_loss":
-1.791,
}, {
"testcase_name":
"keras_metrics_fn",
"head":
_EvalMetricsHead(None),
"metric_fn":
lambda predictions: {
"avg": _mean_keras_metric(predictions)
},
"want_summaries": ["avg"],
"want_loss":
-1.791,
}, {
"testcase_name": "empty_metrics",
"head": _EvalMetricsHead({}),
"want_summaries": [],
"want_loss": -1.791,
}, {
"testcase_name":
"evaluation_name",
"head":
_EvalMetricsHead({}),
"evaluation_name":
"continuous",
"want_summaries": [],
"want_loss":
-1.791,
"global_subdir":
"eval_continuous",
"subnetwork_subdir":
"subnetwork/t0_dnn/eval_continuous",
"ensemble_subdir":
"ensemble/t0_dnn_grow_complexity_regularized/eval_continuous",
}, {
"testcase_name":
"regression_head",
"head":
regression_head.RegressionHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"want_summaries": ["average_loss"],
"want_loss":
.256,
}, {
"testcase_name":
"binary_classification_head",
"head":
binary_class_head.BinaryClassHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"learning_rate":
.6,
"want_summaries": ["average_loss", "accuracy", "recall"],
"want_loss":
0.122,
}, {
"testcase_name":
"all_metrics",
"head":
_EvalMetricsHead({
"float32":
_FakeMetric(1., tf.float32),
"float64":
_FakeMetric(1., tf.float64),
"serialized_summary":
_FakeMetric(
tf_compat.v1.Summary(value=[
tf_compat.v1.Summary.Value(
tag="summary_tag", simple_value=1.)
]).SerializeToString(), tf.string),
}),
"want_summaries": [
"float32",
"float64",
"serialized_summary/0",
],
"want_loss":
-1.791,
})
# pylint: enable=g-long-lambda
def test_eval_metrics(
self,
head,
want_loss,
want_summaries,
evaluation_name=None,
metric_fn=None,
learning_rate=.01,
global_subdir="eval",
subnetwork_subdir="subnetwork/t0_dnn/eval",
ensemble_subdir="ensemble/t0_dnn_grow_complexity_regularized/eval"):
"""Test that AdaNet evaluation metrics get persisted correctly."""
seed = 42
run_config = tf.estimator.RunConfig(tf_random_seed=seed)
subnetwork_generator = SimpleGenerator([
_DNNBuilder(
"dnn",
learning_rate=learning_rate,
mixture_weight_learning_rate=0.,
layer_size=8,
seed=seed)
])
estimator = Estimator(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=100,
metric_fn=metric_fn,
config=run_config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
estimator.train(input_fn=train_input_fn, max_steps=100)
metrics = estimator.evaluate(
input_fn=train_input_fn, steps=1, name=evaluation_name)
self.assertAlmostEqual(want_loss, metrics["loss"], places=3)
global_subdir = os.path.join(self.test_subdirectory, global_subdir)
subnetwork_subdir = os.path.join(self.test_subdirectory, subnetwork_subdir)
ensemble_subdir = os.path.join(self.test_subdirectory, ensemble_subdir)
self.assertAlmostEqual(
want_loss,
_check_eventfile_for_keyword("loss", subnetwork_subdir),
places=3)
for metric in want_summaries:
self.assertIsNotNone(
_check_eventfile_for_keyword(metric, subnetwork_subdir),
msg="{} should be under 'eval'.".format(metric))
for dir_ in [global_subdir, ensemble_subdir]:
self.assertAlmostEqual(metrics["loss"],
_check_eventfile_for_keyword("loss", dir_))
self.assertEqual([b"| dnn |"],
_check_eventfile_for_keyword(
"architecture/adanet/ensembles/0", dir_))
for metric in want_summaries:
self.assertTrue(
_check_eventfile_for_keyword(metric, dir_) > 0.,
msg="{} should be under 'eval'.".format(metric))
class EstimatorMembersOverrideTest(tu.AdanetTestCase):
"""Tests b/77494544 fix."""
def test_assert_members_are_not_overridden(self):
"""Assert that AdaNet estimator does not break other estimators."""
config = tf.estimator.RunConfig()
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
adanet = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=config)
self.assertIsNotNone(adanet)
if hasattr(tf.estimator, "LinearEstimator"):
estimator_fn = tf.estimator.LinearEstimator
else:
estimator_fn = tf.contrib.estimator.LinearEstimator
linear = estimator_fn(
head=tu.head(), feature_columns=[tf.feature_column.numeric_column("x")])
self.assertIsNotNone(linear)
def _dummy_feature_dict_input_fn(features, labels):
"""Returns an input_fn that returns feature and labels `Tensors`."""
def _input_fn():
input_features = {}
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
return input_features, input_labels
return _input_fn
class EstimatorDifferentFeaturesPerModeTest(tu.AdanetTestCase):
"""Tests b/109751254."""
@parameterized.named_parameters(
{
"testcase_name": "extra_train_features",
"train_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
},
}, {
"testcase_name": "extra_eval_features",
"train_features": {
"x": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
},
}, {
"testcase_name": "extra_predict_features",
"train_features": {
"x": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
})
def test_different_features_per_mode(self, train_features, eval_features,
predict_features):
"""Tests tests different numbers of features per mode."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory,
config=run_config)
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(train_features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Evaluate.
eval_input_fn = _dummy_feature_dict_input_fn(eval_features, labels)
estimator.evaluate(input_fn=eval_input_fn, steps=1)
# Predict.
predict_input_fn = _dummy_feature_dict_input_fn(predict_features, None)
estimator.predict(input_fn=predict_input_fn)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
features = {}
for key, value in predict_features.items():
features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_saved_model_fn(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
class EstimatorExportSavedModelTest(tu.AdanetTestCase):
def test_export_saved_model_for_predict(self):
"""Tests SavedModel exporting functionality for predict (b/110435640)."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory,
config=run_config)
features = {"x": [[1., 0.]]}
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
for key, value in features.items():
features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
estimator.export_saved_model(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
@test_util.run_in_graph_and_eager_modes
def test_export_saved_model_for_eval(self):
"""Tests SavedModel exporting functionality for eval (b/110991908)."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", layer_size=8, learning_rate=1.)])
estimator = Estimator(
head=binary_class_head.BinaryClassHead(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=100,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=300)
metrics = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertAlmostEqual(.067, metrics["average_loss"], places=3)
self.assertAlmostEqual(1., metrics["recall"], places=3)
self.assertAlmostEqual(1., metrics["accuracy"], places=3)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return export.SupervisedInputReceiver(
features={"x": tf.constant(XOR_FEATURES)},
labels=tf.constant(XOR_LABELS),
receiver_tensors=serialized_example)
export_dir_base = os.path.join(self.test_subdirectory, "export")
try:
estimator.export_saved_model(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.EVAL)
except AttributeError:
pass
try:
tf.contrib.estimator.export_saved_model_for_mode(
estimator,
export_dir_base=export_dir_base,
input_receiver_fn=serving_input_fn,
mode=tf.estimator.ModeKeys.EVAL)
except AttributeError:
pass
subdir = tf.io.gfile.listdir(export_dir_base)[0]
with context.graph_mode(), self.test_session() as sess:
meta_graph_def = tf_compat.v1.saved_model.loader.load(
sess, ["eval"], os.path.join(export_dir_base, subdir))
signature_def = meta_graph_def.signature_def.get("eval")
# Read zero metric.
self.assertAlmostEqual(
0.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/value"])),
places=3)
# Run metric update op.
sess.run((tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/update_op"]),
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/accuracy/update_op"]),
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/recall/update_op"])))
# Read metric again; it should no longer be zero.
self.assertAlmostEqual(
0.067,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/value"])),
places=3)
self.assertAlmostEqual(
1.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/recall/value"])),
places=3)
self.assertAlmostEqual(
1.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/accuracy/value"])),
places=3)
def test_export_saved_model_always_uses_replication_placement(self):
"""Tests b/137675014."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn1"), _DNNBuilder("dnn2")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config,
experimental_placement_strategy=RoundRobinStrategy())
features = {"x": [[1., 0.]]}
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
tensor_features = {}
for key, value in features.items():
tensor_features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=tensor_features, receiver_tensors=serialized_example)
# Fake the number of PS replicas so RoundRobinStrategy will be used.
estimator._config._num_ps_replicas = 2
# If we're still using RoundRobinStrategy, this call will fail by trying
# to place ops on non-existent devices.
# Check all three export methods.
estimator.export_saved_model(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
try:
estimator.export_savedmodel(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
except AttributeError as error:
# Log deprecation errors.
logging.warning("Testing estimator#export_savedmodel: %s", error)
estimator.experimental_export_all_saved_models(
export_dir_base=self.test_subdirectory,
input_receiver_fn_map={
tf.estimator.ModeKeys.PREDICT: serving_input_fn,
})
class EstimatorReportTest(tu.AdanetTestCase):
"""Tests report generation and usage."""
def compare_report_lists(self, report_list1, report_list2):
# Essentially assertEqual(report_list1, report_list2), but ignoring
# the "metrics" attribute.
def make_qualified_name(iteration_number, name):
return "iteration_{}/{}".format(iteration_number, name)
report_dict_1 = {
make_qualified_name(report.iteration_number, report.name): report
for report in report_list1
}
report_dict_2 = {
make_qualified_name(report.iteration_number, report.name): report
for report in report_list2
}
self.assertEqual(len(report_list1), len(report_list2))
for qualified_name in report_dict_1.keys():
report_1 = report_dict_1[qualified_name]
report_2 = report_dict_2[qualified_name]
self.assertEqual(
report_1.hparams,
report_2.hparams,
msg="{} vs. {}".format(report_1, report_2))
self.assertEqual(
report_1.attributes,
report_2.attributes,
msg="{} vs. {}".format(report_1, report_2))
self.assertEqual(
report_1.included_in_final_ensemble,
report_2.included_in_final_ensemble,
msg="{} vs. {}".format(report_1, report_2))
for metric_key, metric_value in report_1.metrics.items():
self.assertEqual(
metric_value,
report_2.metrics[metric_key],
msg="{} vs. {}".format(report_1, report_2))
@parameterized.named_parameters(
{
"testcase_name": "one_iteration_one_subnetwork",
"subnetwork_builders": [_DNNBuilder("dnn", layer_size=1),],
"num_iterations": 1,
"want_materialized_iteration_reports": [[
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
]],
"want_previous_ensemble_reports": [],
"want_all_reports": [],
},
{
"testcase_name": "one_iteration_three_subnetworks",
"subnetwork_builders": [
# learning_rate is set to 0 for all but one Builder
# to make sure that only one of them can learn.
_DNNBuilder(
"dnn_1",
layer_size=1,
learning_rate=0.,
mixture_weight_learning_rate=0.),
_DNNBuilder(
"dnn_2",
layer_size=2,
learning_rate=0.,
mixture_weight_learning_rate=0.),
# fixing the match for dnn_3 to win.
_DNNBuilder("dnn_3", layer_size=3),
],
"num_iterations": 1,
"want_materialized_iteration_reports": [[
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
]],
"want_previous_ensemble_reports": [],
"want_all_reports": [],
},
{
"testcase_name":
"three_iterations_one_subnetwork",
"subnetwork_builders": [_DNNBuilder("dnn", layer_size=1),],
"num_iterations":
3,
"want_materialized_iteration_reports": [
[
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
)
],
[
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=2,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
],
"want_previous_ensemble_reports": [
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
"want_all_reports": [
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
},
{
"testcase_name":
"three_iterations_three_subnetworks",
"subnetwork_builders": [
# learning_rate is set to 0 for all but one Builder
# to make sure that only one of them can learn.
_DNNBuilder(
"dnn_1",
layer_size=1,
learning_rate=0.,
mixture_weight_learning_rate=0.),
_DNNBuilder(
"dnn_2",
layer_size=2,
learning_rate=0.,
mixture_weight_learning_rate=0.),
# fixing the match for dnn_3 to win in every iteration.
_DNNBuilder("dnn_3", layer_size=3),
],
"num_iterations":
3,
"want_materialized_iteration_reports": [
[
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=2,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
],
"want_previous_ensemble_reports": [
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
"want_all_reports": [
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
},
)
def test_report_generation_and_usage(self, subnetwork_builders,
num_iterations,
want_materialized_iteration_reports,
want_previous_ensemble_reports,
want_all_reports):
# Stores the iteration_number, previous_ensemble_reports and all_reports
# arguments in the self._iteration_reports dictionary, overwriting what
# was seen in previous iterations.
spied_iteration_reports = {}
def _spy_fn(iteration_number, previous_ensemble_reports, all_reports):
spied_iteration_reports[iteration_number] = {
"previous_ensemble_reports": previous_ensemble_reports,
"all_reports": all_reports,
}
subnetwork_generator = _FakeGenerator(
spy_fn=_spy_fn, subnetwork_builders=subnetwork_builders)
max_iteration_steps = 5
max_steps = max_iteration_steps * num_iterations + 1
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
report_materializer=ReportMaterializer(
input_fn=train_input_fn, steps=1),
model_dir=self.test_subdirectory)
report_accessor = estimator._report_accessor
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
materialized_iteration_reports = list(
report_accessor.read_iteration_reports())
self.assertEqual(num_iterations, len(materialized_iteration_reports))
for i in range(num_iterations):
want_materialized_reports = (want_materialized_iteration_reports[i])
materialized_reports = materialized_iteration_reports[i]
self.compare_report_lists(want_materialized_reports, materialized_reports)
# Compute argmin adanet loss.
argmin_adanet_loss = 0
smallest_known_adanet_loss = float("inf")
for j, materialized_subnetwork_report in enumerate(materialized_reports):
if (smallest_known_adanet_loss >
materialized_subnetwork_report.metrics["adanet_loss"]):
smallest_known_adanet_loss = (
materialized_subnetwork_report.metrics["adanet_loss"])
argmin_adanet_loss = j
# Check that the subnetwork with the lowest adanet loss is the one
# that is included in the final ensemble.
for j, materialized_reports in enumerate(materialized_reports):
self.assertEqual(j == argmin_adanet_loss,
materialized_reports.included_in_final_ensemble)
# Check the arguments passed into the generate_candidates method of the
# Generator.
iteration_report = spied_iteration_reports[num_iterations - 1]
self.compare_report_lists(want_previous_ensemble_reports,
iteration_report["previous_ensemble_reports"])
self.compare_report_lists(want_all_reports, iteration_report["all_reports"])
class EstimatorForceGrowTest(tu.AdanetTestCase):
"""Tests the force_grow override.
Uses linear subnetworks with the same seed. They will produce identical
outputs, so unless the `force_grow` override is set, none of the new
subnetworks will improve the AdaNet objective, and AdaNet will not add them to
the ensemble.
"""
@parameterized.named_parameters(
{
"testcase_name": "one_builder_no_force_grow",
"builders":
[_LinearBuilder("linear", mixture_weight_learning_rate=0.)],
"force_grow": False,
"want_subnetworks": 1,
}, {
"testcase_name": "one_builder",
"builders":
[_LinearBuilder("linear", mixture_weight_learning_rate=0.)],
"force_grow": True,
"want_subnetworks": 2,
}, {
"testcase_name": "two_builders",
"builders": [
_LinearBuilder("linear", mixture_weight_learning_rate=0.),
_LinearBuilder("linear2", mixture_weight_learning_rate=0.)
],
"force_grow": True,
"want_subnetworks": 2,
}, {
"testcase_name":
"two_builders_with_evaluator",
"builders": [
_LinearBuilder("linear", mixture_weight_learning_rate=0.),
_LinearBuilder("linear2", mixture_weight_learning_rate=0.)
],
"force_grow":
True,
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1),
"want_subnetworks":
3,
})
def test_force_grow(self,
builders,
force_grow,
want_subnetworks,
evaluator=None):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(builders)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
evaluator=evaluator,
force_grow=force_grow,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train for four iterations.
estimator.train(input_fn=train_input_fn, max_steps=3)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertEqual(
want_subnetworks,
str(eval_results["architecture/adanet/ensembles"]).count(" linear "))
class EstimatorDebugTest(tu.AdanetTestCase):
"""Tests b/125483534. Detect NaNs in input_fns."""
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name":
"nan_features",
"head":
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"input_fn":
lambda: ({
"x": tf.math.log([[1., 0.]])
}, tf.zeros([1, 1]))
}, {
"testcase_name":
"nan_label",
"head":
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"input_fn":
lambda: ({
"x": tf.ones([1, 2])
}, tf.math.log([[0.]]))
}, {
"testcase_name":
"nan_labels_dict",
"head":
multi_head_lib.MultiHead(heads=[
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
]),
"input_fn":
lambda: ({
"x": tf.ones([1, 2])
}, {
"y": tf.math.log([[0.]])
})
})
# pylint: enable=g-long-lambda
def test_nans_from_input_fn(self, head, input_fn):
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=3,
model_dir=self.test_subdirectory,
debug=True)
with self.assertRaises(tf.errors.InvalidArgumentError):
estimator.train(input_fn=input_fn, max_steps=3)
class EstimatorEvaluateDuringTrainHookTest(tu.AdanetTestCase):
"""Tests b/129000842 with a hook that calls estimator.evaluate()."""
def test_train(self):
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
class EvalTrainHook(tf.estimator.SessionRunHook):
def end(self, session):
estimator.evaluate(input_fn=train_input_fn, steps=1)
# This should not infinite loop.
estimator.train(
input_fn=train_input_fn, max_steps=3, hooks=[EvalTrainHook()])
class CheckpointSaverHookDuringTrainingTest(tu.AdanetTestCase):
"""Tests b/139057887."""
def test_checkpoint_saver_hooks_not_decorated_during_training(self):
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
saver_hook = tf_compat.v1.train.CheckpointSaverHook(
checkpoint_dir=self.test_subdirectory, save_steps=10)
listener = tf_compat.v1.train.CheckpointSaverListener()
estimator.train(
input_fn=train_input_fn,
max_steps=3,
hooks=[saver_hook],
saving_listeners=[listener])
# If CheckpointSaverHook was not recognized during training then all
# saving_listeners would be attached to a default CheckpointSaverHook that
# Estimator creates.
self.assertLen(saver_hook._listeners, 1)
self.assertIs(saver_hook._listeners[0], listener)
class EstimatorTFLearnRunConfigTest(tu.AdanetTestCase):
"""Tests b/129483642 for tf.contrib.learn.RunConfig.
Checks that TF_CONFIG is overwritten correctly when no cluster is specified
in the RunConfig and the only task is of type chief.
"""
def test_train(self):
try:
run_config = tf.contrib.learn.RunConfig(tf_random_seed=42)
# Removed in TF 1.15 (nightly). See
# https://travis-ci.org/tensorflow/adanet/jobs/583471908
_ = run_config._session_creation_timeout_secs
except AttributeError:
self.skipTest("There is no tf.contrib in TF 2.0.")
try:
tf_config = {
"task": {
"type": "chief",
"index": 0
},
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
run_config = tf.contrib.learn.RunConfig(tf_random_seed=42)
run_config._is_chief = True # pylint: disable=protected-access
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Will fail if TF_CONFIG is not overwritten correctly in
# Estimator#prepare_next_iteration.
estimator.train(input_fn=train_input_fn, max_steps=3)
finally:
# Revert TF_CONFIG environment variable in order to not break other tests.
del os.environ["TF_CONFIG"]
class EstimatorReplayTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "no_evaluator",
"evaluator": None,
"replay_evaluator": None,
"want_architecture": " dnn3 | dnn3 | dnn ",
}, {
"testcase_name":
"evaluator",
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS),
steps=1),
"replay_evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[0., 0.], [0., 0], [0., 0.],
[0., 0.]], [[0], [0], [0], [0]]),
steps=1),
"want_architecture":
" dnn3 | dnn3 | dnn ",
})
def test_replay(self, evaluator, replay_evaluator, want_architecture):
"""Train entire estimator lifecycle using Replay."""
original_model_dir = os.path.join(self.test_subdirectory, "original")
run_config = tf.estimator.RunConfig(
tf_random_seed=42, model_dir=original_model_dir)
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
_DNNBuilder("dnn3", layer_size=5),
])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
evaluator=evaluator,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train for three iterations.
estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn(want_architecture,
str(eval_results["architecture/adanet/ensembles"]))
replay_run_config = tf.estimator.RunConfig(
tf_random_seed=42,
model_dir=os.path.join(self.test_subdirectory, "replayed"))
# Use different features and labels to represent a shift in the data
# distribution.
different_features = [[0., 0.], [0., 0], [0., 0.], [0., 0.]]
different_labels = [[0], [0], [0], [0]]
replay_estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
evaluator=replay_evaluator,
config=replay_run_config,
replay_config=replay.Config(best_ensemble_indices=[2, 3, 1]))
train_input_fn = tu.dummy_input_fn(different_features, different_labels)
# Train for three iterations.
replay_estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = replay_estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn(want_architecture,
str(eval_results["architecture/adanet/ensembles"]))
if __name__ == "__main__":
tf.test.main()
| [
"adanet.tf_compat.v1.placeholder",
"tensorflow_estimator.python.estimator.head.binary_class_head.BinaryClassHead",
"tensorflow.math.log",
"adanet.tf_compat.v1.metrics.mean",
"tensorflow.split",
"absl.logging.info",
"tensorflow.io.gfile.listdir",
"adanet.tf_compat.v1.variable_scope",
"adanet.tf_compat.v1.glorot_uniform_initializer",
"adanet.distributed.placement.RoundRobinStrategy",
"tensorflow.keras.layers.Dense",
"adanet.tf_compat.v1.Summary.Value",
"adanet.ensemble.SoloStrategy",
"adanet.core.testing_utils.dataset_input_fn",
"tensorflow.reduce_mean",
"adanet.core.testing_utils.ModifierSessionRunHook",
"adanet.subnetwork.Subnetwork",
"adanet.subnetwork.MaterializedReport",
"adanet.replay.Config",
"adanet.tf_compat.v1.train.CheckpointSaverHook",
"tensorflow.io.gfile.glob",
"tensorflow.keras.layers.Conv2D",
"adanet.ensemble.ComplexityRegularizedEnsembler",
"json.dumps",
"adanet.tf_compat.v1.train.GradientDescentOptimizer",
"adanet.core.testing_utils.dummy_input_fn",
"adanet.core.estimator.Estimator",
"adanet.tf_compat.v1.layers.Dense",
"tensorflow.contrib.learn.RunConfig",
"adanet.tf_compat.v1.summary.FileWriterCache.clear",
"tensorflow.concat",
"tensorflow.estimator.export.ServingInputReceiver",
"adanet.tf_compat.v1.train.CheckpointSaverListener",
"adanet.core.testing_utils.head",
"adanet.tf_compat.v1.feature_column.input_layer",
"adanet.tf_compat.v1.keras.initializers.he_normal",
"tensorflow.convert_to_tensor",
"adanet.subnetwork.SimpleGenerator",
"adanet.core.evaluator.Evaluator",
"tensorflow.matmul",
"tensorflow.io.gfile.exists",
"tensorflow.zeros",
"adanet.tf_compat.version_greater_or_equal",
"tensorflow.data.Dataset.from_tensors",
"adanet.core.report_materializer.ReportMaterializer",
"tensorflow.keras.metrics.Mean",
"adanet.ensemble.GrowStrategy",
"adanet.tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info",
"adanet.tf_compat.v1.train.summary_iterator",
"tensorflow.nn.leaky_relu",
"adanet.tf_compat.v1.zeros_initializer",
"absl.logging.warning",
"tensorflow.feature_column.numeric_column",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.reshape",
"absl.logging.set_verbosity",
"tensorflow.keras.layers.Flatten",
"adanet.ensemble.AllStrategy",
"adanet.subnetwork.TrainOpSpec",
"tensorflow.feature_column.categorical_column_with_vocabulary_list",
"tensorflow.keras.layers.MaxPool2D",
"adanet.tf_compat.v1.colocate_with",
"tensorflow.estimator.RunConfig",
"tensorflow.no_op",
"tensorflow.ones",
"tensorflow_estimator.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss",
"os.path.join",
"absl.testing.parameterized.named_parameters",
"tensorflow.test.main",
"tensorflow.contrib.estimator.export_saved_model_for_mode",
"tensorflow_estimator.python.estimator.head.regression_head.RegressionHead",
"adanet.tf_compat.v1.layers.dense",
"tensorflow.constant",
"adanet.tf_compat.v1.train.create_global_step",
"tensorflow.feature_column.categorical_column_with_hash_bucket"
]
| [((2394, 2429), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.INFO'], {}), '(logging.INFO)\n', (2415, 2429), False, 'from absl import logging\n'), ((56220, 56772), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'single_iteration', 'max_iteration_steps': 3,\n 'keep_checkpoint_max': 3, 'want_num_checkpoints': 3}", "{'testcase_name': 'single_iteration_keep_one', 'max_iteration_steps': 3,\n 'keep_checkpoint_max': 1, 'want_num_checkpoints': 1}", "{'testcase_name': 'three_iterations', 'max_iteration_steps': 1,\n 'keep_checkpoint_max': 3, 'want_num_checkpoints': 3}", "{'testcase_name': 'three_iterations_keep_one', 'max_iteration_steps': 1,\n 'keep_checkpoint_max': 1, 'want_num_checkpoints': 1}"], {}), "({'testcase_name': 'single_iteration',\n 'max_iteration_steps': 3, 'keep_checkpoint_max': 3,\n 'want_num_checkpoints': 3}, {'testcase_name':\n 'single_iteration_keep_one', 'max_iteration_steps': 3,\n 'keep_checkpoint_max': 1, 'want_num_checkpoints': 1}, {'testcase_name':\n 'three_iterations', 'max_iteration_steps': 1, 'keep_checkpoint_max': 3,\n 'want_num_checkpoints': 3}, {'testcase_name':\n 'three_iterations_keep_one', 'max_iteration_steps': 1,\n 'keep_checkpoint_max': 1, 'want_num_checkpoints': 1})\n", (56250, 56772), False, 'from absl.testing import parameterized\n'), ((58274, 58318), 'adanet.tf_compat.v1.summary.FileWriterCache.clear', 'tf_compat.v1.summary.FileWriterCache.clear', ([], {}), '()\n', (58316, 58318), False, 'from adanet import tf_compat\n'), ((58463, 58492), 'os.path.join', 'os.path.join', (['dir_', '"""events*"""'], {}), "(dir_, 'events*')\n", (58475, 58492), False, 'import os\n'), ((58509, 58536), 'tensorflow.io.gfile.glob', 'tf.io.gfile.glob', (['filenames'], {}), '(filenames)\n', (58525, 58536), True, 'import tensorflow as tf\n'), ((58643, 58695), 'adanet.tf_compat.v1.train.summary_iterator', 'tf_compat.v1.train.summary_iterator', (['event_paths[-1]'], {}), '(event_paths[-1])\n', (58678, 58695), False, 'from adanet import tf_compat\n'), ((60445, 60468), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (60466, 60468), True, 'import tensorflow as tf\n'), ((72915, 73533), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'extra_train_features', 'train_features': {'x': [[1.0, \n 0.0]], 'extra': [[1.0, 0.0]]}, 'eval_features': {'x': [[1.0, 0.0]]},\n 'predict_features': {'x': [[1.0, 0.0]]}}", "{'testcase_name': 'extra_eval_features', 'train_features': {'x': [[1.0, 0.0\n ]]}, 'eval_features': {'x': [[1.0, 0.0]], 'extra': [[1.0, 0.0]]},\n 'predict_features': {'x': [[1.0, 0.0]]}}", "{'testcase_name': 'extra_predict_features', 'train_features': {'x': [[1.0, \n 0.0]]}, 'eval_features': {'x': [[1.0, 0.0]]}, 'predict_features': {'x':\n [[1.0, 0.0]], 'extra': [[1.0, 0.0]]}}"], {}), "({'testcase_name': 'extra_train_features',\n 'train_features': {'x': [[1.0, 0.0]], 'extra': [[1.0, 0.0]]},\n 'eval_features': {'x': [[1.0, 0.0]]}, 'predict_features': {'x': [[1.0, \n 0.0]]}}, {'testcase_name': 'extra_eval_features', 'train_features': {\n 'x': [[1.0, 0.0]]}, 'eval_features': {'x': [[1.0, 0.0]], 'extra': [[1.0,\n 0.0]]}, 'predict_features': {'x': [[1.0, 0.0]]}}, {'testcase_name':\n 'extra_predict_features', 'train_features': {'x': [[1.0, 0.0]]},\n 'eval_features': {'x': [[1.0, 0.0]]}, 'predict_features': {'x': [[1.0, \n 0.0]], 'extra': [[1.0, 0.0]]}})\n", (72945, 73533), False, 'from absl.testing import parameterized\n'), ((115088, 115102), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (115100, 115102), True, 'import tensorflow as tf\n'), ((5684, 5864), 'adanet.subnetwork.Subnetwork', 'Subnetwork', ([], {'last_layer': '(last_layer if self._return_penultimate_layer else logits)', 'logits': 'logits', 'complexity': '(3)', 'persisted_tensors': 'persisted_tensors', 'shared': 'persisted_tensors'}), '(last_layer=last_layer if self._return_penultimate_layer else\n logits, logits=logits, complexity=3, persisted_tensors=\n persisted_tensors, shared=persisted_tensors)\n', (5694, 5864), False, 'from adanet.subnetwork import Subnetwork\n'), ((6065, 6143), 'adanet.tf_compat.v1.train.GradientDescentOptimizer', 'tf_compat.v1.train.GradientDescentOptimizer', ([], {'learning_rate': 'self._learning_rate'}), '(learning_rate=self._learning_rate)\n', (6108, 6143), False, 'from adanet import tf_compat\n'), ((6280, 6355), 'adanet.subnetwork.TrainOpSpec', 'TrainOpSpec', (['train_op', 'self._subnetwork_chief_hooks', 'self._subnetwork_hooks'], {}), '(train_op, self._subnetwork_chief_hooks, self._subnetwork_hooks)\n', (6291, 6355), False, 'from adanet.subnetwork import TrainOpSpec\n'), ((6534, 6632), 'adanet.tf_compat.v1.train.GradientDescentOptimizer', 'tf_compat.v1.train.GradientDescentOptimizer', ([], {'learning_rate': 'self._mixture_weight_learning_rate'}), '(learning_rate=self.\n _mixture_weight_learning_rate)\n', (6577, 6632), False, 'from adanet import tf_compat\n'), ((6768, 6856), 'adanet.subnetwork.TrainOpSpec', 'TrainOpSpec', (['train_op', 'self._mixture_weight_chief_hooks', 'self._mixture_weight_hooks'], {}), '(train_op, self._mixture_weight_chief_hooks, self.\n _mixture_weight_hooks)\n', (6779, 6856), False, 'from adanet.subnetwork import TrainOpSpec\n'), ((8331, 8419), 'adanet.subnetwork.Subnetwork', 'Subnetwork', ([], {'last_layer': 'last_layer', 'logits': 'logits', 'complexity': '(1)', 'persisted_tensors': '{}'}), '(last_layer=last_layer, logits=logits, complexity=1,\n persisted_tensors={})\n', (8341, 8419), False, 'from adanet.subnetwork import Subnetwork\n'), ((8623, 8687), 'adanet.tf_compat.v1.train.GradientDescentOptimizer', 'tf_compat.v1.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (8666, 8687), False, 'from adanet import tf_compat\n'), ((8897, 8961), 'adanet.tf_compat.v1.train.GradientDescentOptimizer', 'tf_compat.v1.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (8940, 8961), False, 'from adanet import tf_compat\n'), ((9610, 9668), 'adanet.subnetwork.Subnetwork', 'Subnetwork', ([], {'last_layer': 'logits', 'logits': 'logits', 'complexity': '(0)'}), '(last_layer=logits, logits=logits, complexity=0)\n', (9620, 9668), False, 'from adanet.subnetwork import Subnetwork\n'), ((9832, 9842), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (9840, 9842), True, 'import tensorflow as tf\n'), ((10616, 10707), 'adanet.subnetwork.Subnetwork', 'Subnetwork', ([], {'last_layer': "features['x']", 'logits': 'logits', 'complexity': '(1)', 'persisted_tensors': '{}'}), "(last_layer=features['x'], logits=logits, complexity=1,\n persisted_tensors={})\n", (10626, 10707), False, 'from adanet.subnetwork import Subnetwork\n'), ((10911, 10975), 'adanet.tf_compat.v1.train.GradientDescentOptimizer', 'tf_compat.v1.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (10954, 10975), False, 'from adanet import tf_compat\n'), ((11185, 11283), 'adanet.tf_compat.v1.train.GradientDescentOptimizer', 'tf_compat.v1.train.GradientDescentOptimizer', ([], {'learning_rate': 'self._mixture_weight_learning_rate'}), '(learning_rate=self.\n _mixture_weight_learning_rate)\n', (11228, 11283), False, 'from adanet import tf_compat\n'), ((33447, 33488), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (33469, 33488), True, 'import tensorflow as tf\n'), ((34929, 34961), 'absl.logging.info', 'logging.info', (['"""%s"""', 'eval_results'], {}), "('%s', eval_results)\n", (34941, 34961), False, 'from absl import logging\n'), ((35997, 36043), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""export"""'], {}), "(self.test_subdirectory, 'export')\n", (36009, 36043), False, 'import os\n'), ((38488, 38540), 'adanet.core.report_materializer.ReportMaterializer', 'ReportMaterializer', ([], {'input_fn': 'train_input_fn', 'steps': '(1)'}), '(input_fn=train_input_fn, steps=1)\n', (38506, 38540), False, 'from adanet.core.report_materializer import ReportMaterializer\n'), ((43354, 43394), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 0.0]]', '[[1.0]]'], {}), '([[1.0, 0.0]], [[1.0]])\n', (43371, 43394), True, 'from adanet.core import testing_utils as tu\n'), ((44291, 44324), 'tensorflow.reshape', 'tf.reshape', (['images', '[-1, 2, 2, 1]'], {}), '(images, [-1, 2, 2, 1])\n', (44301, 44324), True, 'import tensorflow as tf\n'), ((44350, 44402), 'adanet.tf_compat.v1.keras.initializers.he_normal', 'tf_compat.v1.keras.initializers.he_normal', ([], {'seed': 'seed'}), '(seed=seed)\n', (44391, 44402), False, 'from adanet import tf_compat\n'), ((44962, 44976), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (44973, 44976), True, 'import tensorflow as tf\n'), ((44988, 45076), 'adanet.subnetwork.Subnetwork', 'Subnetwork', ([], {'last_layer': 'x', 'logits': 'logits', 'complexity': 'complexity', 'persisted_tensors': '{}'}), '(last_layer=x, logits=logits, complexity=complexity,\n persisted_tensors={})\n', (44998, 45076), False, 'from adanet.subnetwork import Subnetwork\n'), ((45471, 45535), 'adanet.tf_compat.v1.train.GradientDescentOptimizer', 'tf_compat.v1.train.GradientDescentOptimizer', (['self._learning_rate'], {}), '(self._learning_rate)\n', (45514, 45535), False, 'from adanet import tf_compat\n'), ((45746, 45756), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (45754, 45756), True, 'import tensorflow as tf\n'), ((45974, 46015), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (45996, 46015), True, 'import tensorflow as tf\n'), ((46548, 46591), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['xor_features', 'xor_labels'], {}), '(xor_features, xor_labels)\n', (46565, 46591), True, 'from adanet.core import testing_utils as tu\n'), ((46757, 46789), 'absl.logging.info', 'logging.info', (['"""%s"""', 'eval_results'], {}), "('%s', eval_results)\n", (46769, 46789), False, 'from absl import logging\n'), ((46824, 46868), 'adanet.tf_compat.version_greater_or_equal', 'tf_compat.version_greater_or_equal', (['"""1.10.0"""'], {}), "('1.10.0')\n", (46858, 46868), False, 'from adanet import tf_compat\n'), ((49056, 49108), 'adanet.tf_compat.v1.keras.initializers.he_normal', 'tf_compat.v1.keras.initializers.he_normal', ([], {'seed': 'seed'}), '(seed=seed)\n', (49097, 49108), False, 'from adanet import tf_compat\n'), ((49144, 49256), 'adanet.tf_compat.v1.layers.dense', 'tf_compat.v1.layers.dense', (['x'], {'units': 'logits_dimension', 'activation': 'None', 'kernel_initializer': 'kernel_initializer'}), '(x, units=logits_dimension, activation=None,\n kernel_initializer=kernel_initializer)\n', (49169, 49256), False, 'from adanet import tf_compat\n'), ((49517, 49531), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (49528, 49531), True, 'import tensorflow as tf\n'), ((49543, 49636), 'adanet.subnetwork.Subnetwork', 'Subnetwork', ([], {'last_layer': 'logits', 'logits': 'logits', 'complexity': 'complexity', 'persisted_tensors': '{}'}), '(last_layer=logits, logits=logits, complexity=complexity,\n persisted_tensors={})\n', (49553, 49636), False, 'from adanet.subnetwork import Subnetwork\n'), ((50031, 50095), 'adanet.tf_compat.v1.train.GradientDescentOptimizer', 'tf_compat.v1.train.GradientDescentOptimizer', (['self._learning_rate'], {}), '(self._learning_rate)\n', (50074, 50095), False, 'from adanet import tf_compat\n'), ((50311, 50375), 'adanet.tf_compat.v1.train.GradientDescentOptimizer', 'tf_compat.v1.train.GradientDescentOptimizer', (['self._learning_rate'], {}), '(self._learning_rate)\n', (50354, 50375), False, 'from adanet import tf_compat\n'), ((51226, 51267), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (51248, 51267), True, 'import tensorflow as tf\n'), ((53425, 53471), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""export"""'], {}), "(self.test_subdirectory, 'export')\n", (53437, 53471), False, 'import os\n'), ((54818, 54858), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 0.0]]', '[[1.0]]'], {}), '([[1.0, 0.0]], [[1.0]])\n', (54835, 54858), True, 'from adanet.core import testing_utils as tu\n'), ((54860, 54899), 'adanet.tf_compat.v1.train.create_global_step', 'tf_compat.v1.train.create_global_step', ([], {}), '()\n', (54897, 54899), False, 'from adanet import tf_compat\n'), ((57156, 57250), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'save_checkpoints_steps': '(1)', 'keep_checkpoint_max': 'keep_checkpoint_max'}), '(save_checkpoints_steps=1, keep_checkpoint_max=\n keep_checkpoint_max)\n', (57178, 57250), True, 'import tensorflow as tf\n'), ((57918, 57958), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 0.0]]', '[[1.0]]'], {}), '([[1.0, 0.0]], [[1.0]])\n', (57935, 57958), True, 'from adanet.core import testing_utils as tu\n'), ((58329, 58353), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['dir_'], {}), '(dir_)\n', (58347, 58353), True, 'import tensorflow as tf\n'), ((59388, 59446), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'self._value', 'dtype': 'self._dtype'}), '(value=self._value, dtype=self._dtype)\n', (59408, 59446), True, 'import tensorflow as tf\n'), ((60768, 60859), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)', 'log_step_count_steps': '(2)', 'save_summary_steps': '(2)'}), '(tf_random_seed=42, log_step_count_steps=2,\n save_summary_steps=2)\n', (60790, 60859), True, 'import tensorflow as tf\n'), ((61545, 61585), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 0.0]]', '[[1.0]]'], {}), '([[1.0, 0.0]], [[1.0]])\n', (61562, 61585), True, 'from adanet.core import testing_utils as tu\n'), ((62099, 62156), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""subnetwork/t0_dnn"""'], {}), "(self.test_subdirectory, 'subnetwork/t0_dnn')\n", (62111, 62156), False, 'import os\n'), ((62627, 62714), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""ensemble/t0_dnn_grow_complexity_regularized"""'], {}), "(self.test_subdirectory,\n 'ensemble/t0_dnn_grow_complexity_regularized')\n", (62639, 62714), False, 'import os\n'), ((63472, 63563), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)', 'log_step_count_steps': '(2)', 'save_summary_steps': '(2)'}), '(tf_random_seed=42, log_step_count_steps=2,\n save_summary_steps=2)\n', (63494, 63563), True, 'import tensorflow as tf\n'), ((64339, 64379), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 0.0]]', '[[1.0]]'], {}), '([[1.0, 0.0]], [[1.0]])\n', (64356, 64379), True, 'from adanet.core import testing_utils as tu\n'), ((64893, 64950), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""subnetwork/t0_dnn"""'], {}), "(self.test_subdirectory, 'subnetwork/t0_dnn')\n", (64905, 64950), False, 'import os\n'), ((65337, 65424), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""ensemble/t0_dnn_grow_complexity_regularized"""'], {}), "(self.test_subdirectory,\n 'ensemble/t0_dnn_grow_complexity_regularized')\n", (65349, 65424), False, 'import os\n'), ((69430, 69473), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': 'seed'}), '(tf_random_seed=seed)\n', (69452, 69473), True, 'import tensorflow as tf\n'), ((69717, 69887), 'adanet.core.estimator.Estimator', 'Estimator', ([], {'head': 'head', 'subnetwork_generator': 'subnetwork_generator', 'max_iteration_steps': '(100)', 'metric_fn': 'metric_fn', 'config': 'run_config', 'model_dir': 'self.test_subdirectory'}), '(head=head, subnetwork_generator=subnetwork_generator,\n max_iteration_steps=100, metric_fn=metric_fn, config=run_config,\n model_dir=self.test_subdirectory)\n', (69726, 69887), False, 'from adanet.core.estimator import Estimator\n'), ((69950, 69993), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['XOR_FEATURES', 'XOR_LABELS'], {}), '(XOR_FEATURES, XOR_LABELS)\n', (69967, 69993), True, 'from adanet.core import testing_utils as tu\n'), ((70239, 70290), 'os.path.join', 'os.path.join', (['self.test_subdirectory', 'global_subdir'], {}), '(self.test_subdirectory, global_subdir)\n', (70251, 70290), False, 'import os\n'), ((70315, 70370), 'os.path.join', 'os.path.join', (['self.test_subdirectory', 'subnetwork_subdir'], {}), '(self.test_subdirectory, subnetwork_subdir)\n', (70327, 70370), False, 'import os\n'), ((70393, 70446), 'os.path.join', 'os.path.join', (['self.test_subdirectory', 'ensemble_subdir'], {}), '(self.test_subdirectory, ensemble_subdir)\n', (70405, 70446), False, 'import os\n'), ((71502, 71526), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {}), '()\n', (71524, 71526), True, 'import tensorflow as tf\n'), ((72723, 72757), 'tensorflow.constant', 'tf.constant', (['labels'], {'name': '"""labels"""'}), "(labels, name='labels')\n", (72734, 72757), True, 'import tensorflow as tf\n'), ((74128, 74169), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (74150, 74169), True, 'import tensorflow as tf\n'), ((76230, 76271), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (76252, 76271), True, 'import tensorflow as tf\n'), ((77910, 77951), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (77932, 77951), True, 'import tensorflow as tf\n'), ((78310, 78353), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['XOR_FEATURES', 'XOR_LABELS'], {}), '(XOR_FEATURES, XOR_LABELS)\n', (78327, 78353), True, 'from adanet.core import testing_utils as tu\n'), ((79146, 79192), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""export"""'], {}), "(self.test_subdirectory, 'export')\n", (79158, 79192), False, 'import os\n'), ((81585, 81626), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (81607, 81626), True, 'import tensorflow as tf\n'), ((102190, 102230), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 0.0]]', '[[1.0]]'], {}), '([[1.0, 0.0]], [[1.0]])\n', (102207, 102230), True, 'from adanet.core import testing_utils as tu\n'), ((106239, 106280), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (106261, 106280), True, 'import tensorflow as tf\n'), ((106308, 106333), 'adanet.subnetwork.SimpleGenerator', 'SimpleGenerator', (['builders'], {}), '(builders)\n', (106323, 106333), False, 'from adanet.subnetwork import SimpleGenerator\n'), ((106618, 106661), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['XOR_FEATURES', 'XOR_LABELS'], {}), '(XOR_FEATURES, XOR_LABELS)\n', (106635, 106661), True, 'from adanet.core import testing_utils as tu\n'), ((108424, 108560), 'adanet.core.estimator.Estimator', 'Estimator', ([], {'head': 'head', 'subnetwork_generator': 'subnetwork_generator', 'max_iteration_steps': '(3)', 'model_dir': 'self.test_subdirectory', 'debug': '(True)'}), '(head=head, subnetwork_generator=subnetwork_generator,\n max_iteration_steps=3, model_dir=self.test_subdirectory, debug=True)\n', (108433, 108560), False, 'from adanet.core.estimator import Estimator\n'), ((108890, 108931), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (108912, 108931), True, 'import tensorflow as tf\n'), ((109221, 109264), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['XOR_FEATURES', 'XOR_LABELS'], {}), '(XOR_FEATURES, XOR_LABELS)\n', (109238, 109264), True, 'from adanet.core import testing_utils as tu\n'), ((109724, 109765), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (109746, 109765), True, 'import tensorflow as tf\n'), ((110054, 110097), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['XOR_FEATURES', 'XOR_LABELS'], {}), '(XOR_FEATURES, XOR_LABELS)\n', (110071, 110097), True, 'from adanet.core import testing_utils as tu\n'), ((110116, 110213), 'adanet.tf_compat.v1.train.CheckpointSaverHook', 'tf_compat.v1.train.CheckpointSaverHook', ([], {'checkpoint_dir': 'self.test_subdirectory', 'save_steps': '(10)'}), '(checkpoint_dir=self.\n test_subdirectory, save_steps=10)\n', (110154, 110213), False, 'from adanet import tf_compat\n'), ((110233, 110277), 'adanet.tf_compat.v1.train.CheckpointSaverListener', 'tf_compat.v1.train.CheckpointSaverListener', ([], {}), '()\n', (110275, 110277), False, 'from adanet import tf_compat\n'), ((113187, 113235), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""original"""'], {}), "(self.test_subdirectory, 'original')\n", (113199, 113235), False, 'import os\n'), ((113253, 113324), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'tf_random_seed': '(42)', 'model_dir': 'original_model_dir'}), '(tf_random_seed=42, model_dir=original_model_dir)\n', (113275, 113324), True, 'import tensorflow as tf\n'), ((113712, 113755), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['XOR_FEATURES', 'XOR_LABELS'], {}), '(XOR_FEATURES, XOR_LABELS)\n', (113729, 113755), True, 'from adanet.core import testing_utils as tu\n'), ((114697, 114752), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['different_features', 'different_labels'], {}), '(different_features, different_labels)\n', (114714, 114752), True, 'from adanet.core import testing_utils as tu\n'), ((3932, 3966), 'adanet.tf_compat.v1.variable_scope', 'tf_compat.v1.variable_scope', (['"""dnn"""'], {}), "('dnn')\n", (3959, 3966), False, 'from adanet import tf_compat\n'), ((4906, 4947), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['hidden_layer'], {'alpha': '(0.2)'}), '(hidden_layer, alpha=0.2)\n', (4922, 4947), True, 'import tensorflow as tf\n'), ((5228, 5265), 'adanet.tf_compat.v1.variable_scope', 'tf_compat.v1.variable_scope', (['"""logits"""'], {}), "('logits')\n", (5255, 5265), False, 'from adanet import tf_compat\n'), ((5558, 5588), 'tensorflow.ones', 'tf.ones', (['[batch_size, 3, 3, 1]'], {}), '([batch_size, 3, 3, 1])\n', (5565, 5588), True, 'import tensorflow as tf\n'), ((5599, 5636), 'adanet.tf_compat.v1.variable_scope', 'tf_compat.v1.variable_scope', (['"""nested"""'], {}), "('nested')\n", (5626, 5636), False, 'from adanet import tf_compat\n'), ((7898, 7935), 'adanet.tf_compat.v1.variable_scope', 'tf_compat.v1.variable_scope', (['"""simple"""'], {}), "('simple')\n", (7925, 7935), False, 'from adanet import tf_compat\n'), ((7957, 8059), 'adanet.tf_compat.v1.feature_column.input_layer', 'tf_compat.v1.feature_column.input_layer', ([], {'features': 'features', 'feature_columns': 'self._feature_columns'}), '(features=features, feature_columns=\n self._feature_columns)\n', (7996, 8059), False, 'from adanet import tf_compat\n'), ((8107, 8144), 'adanet.tf_compat.v1.variable_scope', 'tf_compat.v1.variable_scope', (['"""logits"""'], {}), "('logits')\n", (8134, 8144), False, 'from adanet import tf_compat\n'), ((33536, 33559), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (33557, 33559), True, 'import tensorflow as tf\n'), ((33770, 33802), 'adanet.tf_compat.v1.zeros_initializer', 'tf_compat.v1.zeros_initializer', ([], {}), '()\n', (33800, 33802), False, 'from adanet import tf_compat\n'), ((34654, 34697), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['XOR_FEATURES', 'XOR_LABELS'], {}), '(XOR_FEATURES, XOR_LABELS)\n', (34671, 34697), True, 'from adanet.core import testing_utils as tu\n'), ((35532, 35617), 'adanet.tf_compat.v1.placeholder', 'tf_compat.v1.placeholder', ([], {'dtype': 'tf.string', 'shape': 'None', 'name': '"""serialized_example"""'}), "(dtype=tf.string, shape=None, name='serialized_example'\n )\n", (35556, 35617), False, 'from adanet import tf_compat\n'), ((38371, 38420), 'tensorflow.constant', 'tf.constant', (['[[1.0], [0.0]]'], {'name': '"""starts_with_a"""'}), "([[1.0], [0.0]], name='starts_with_a')\n", (38382, 38420), True, 'import tensorflow as tf\n'), ((42837, 42877), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 0.0]]', '[[1.0]]'], {}), '([[1.0, 0.0]], [[1.0]])\n', (42854, 42877), True, 'from adanet.core import testing_utils as tu\n'), ((44411, 44538), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(3)', 'kernel_size': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': 'kernel_initializer'}), "(filters=3, kernel_size=1, padding='same', activation\n ='relu', kernel_initializer=kernel_initializer)\n", (44433, 44538), True, 'import tensorflow as tf\n'), ((44604, 44653), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(1)'}), '(pool_size=2, strides=1)\n', (44629, 44653), True, 'import tensorflow as tf\n'), ((44665, 44690), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (44688, 44690), True, 'import tensorflow as tf\n'), ((44702, 44795), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(3)', 'activation': '"""relu"""', 'kernel_initializer': 'kernel_initializer'}), "(units=3, activation='relu', kernel_initializer=\n kernel_initializer)\n", (44723, 44795), True, 'import tensorflow as tf\n'), ((44829, 44924), 'adanet.tf_compat.v1.layers.Dense', 'tf_compat.v1.layers.Dense', ([], {'units': '(1)', 'activation': 'None', 'kernel_initializer': 'kernel_initializer'}), '(units=1, activation=None, kernel_initializer=\n kernel_initializer)\n', (44854, 44924), False, 'from adanet import tf_compat\n'), ((47475, 47560), 'adanet.tf_compat.v1.placeholder', 'tf_compat.v1.placeholder', ([], {'dtype': 'tf.string', 'shape': 'None', 'name': '"""serialized_example"""'}), "(dtype=tf.string, shape=None, name='serialized_example'\n )\n", (47499, 47560), False, 'from adanet import tf_compat\n'), ((49390, 49417), 'tensorflow.split', 'tf.split', (['logits', '[1, 1]', '(1)'], {}), '(logits, [1, 1], 1)\n', (49398, 49417), True, 'import tensorflow as tf\n'), ((52952, 53037), 'adanet.tf_compat.v1.placeholder', 'tf_compat.v1.placeholder', ([], {'dtype': 'tf.string', 'shape': 'None', 'name': '"""serialized_example"""'}), "(dtype=tf.string, shape=None, name='serialized_example'\n )\n", (52976, 53037), False, 'from adanet import tf_compat\n'), ((55182, 55202), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (55200, 55202), False, 'from tensorflow.python.eager import context\n'), ((55706, 55746), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 0.0]]', '[[1.0]]'], {}), '([[1.0, 0.0]], [[1.0]])\n', (55723, 55746), True, 'from adanet.core import testing_utils as tu\n'), ((55750, 55789), 'adanet.tf_compat.v1.train.create_global_step', 'tf_compat.v1.train.create_global_step', ([], {}), '()\n', (55787, 55789), False, 'from adanet import tf_compat\n'), ((58067, 58113), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""*.meta"""'], {}), "(self.test_subdirectory, '*.meta')\n", (58079, 58113), False, 'import os\n'), ((67620, 67696), 'tensorflow_estimator.python.estimator.head.regression_head.RegressionHead', 'regression_head.RegressionHead', ([], {'loss_reduction': 'tf_compat.SUM_OVER_BATCH_SIZE'}), '(loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE)\n', (67650, 67696), False, 'from tensorflow_estimator.python.estimator.head import regression_head\n'), ((67920, 67999), 'tensorflow_estimator.python.estimator.head.binary_class_head.BinaryClassHead', 'binary_class_head.BinaryClassHead', ([], {'loss_reduction': 'tf_compat.SUM_OVER_BATCH_SIZE'}), '(loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE)\n', (67953, 67999), False, 'from tensorflow_estimator.python.estimator.head import binary_class_head\n'), ((72673, 72703), 'tensorflow.constant', 'tf.constant', (['feature'], {'name': 'key'}), '(feature, name=key)\n', (72684, 72703), True, 'import tensorflow as tf\n'), ((75393, 75478), 'adanet.tf_compat.v1.placeholder', 'tf_compat.v1.placeholder', ([], {'dtype': 'tf.string', 'shape': 'None', 'name': '"""serialized_example"""'}), "(dtype=tf.string, shape=None, name='serialized_example'\n )\n", (75417, 75478), False, 'from adanet import tf_compat\n'), ((75613, 75713), 'tensorflow.estimator.export.ServingInputReceiver', 'tf.estimator.export.ServingInputReceiver', ([], {'features': 'features', 'receiver_tensors': 'serialized_example'}), '(features=features,\n receiver_tensors=serialized_example)\n', (75653, 75713), True, 'import tensorflow as tf\n'), ((77236, 77321), 'adanet.tf_compat.v1.placeholder', 'tf_compat.v1.placeholder', ([], {'dtype': 'tf.string', 'shape': 'None', 'name': '"""serialized_example"""'}), "(dtype=tf.string, shape=None, name='serialized_example'\n )\n", (77260, 77321), False, 'from adanet import tf_compat\n'), ((77428, 77528), 'tensorflow.estimator.export.ServingInputReceiver', 'tf.estimator.export.ServingInputReceiver', ([], {'features': 'features', 'receiver_tensors': 'serialized_example'}), '(features=features,\n receiver_tensors=serialized_example)\n', (77468, 77528), True, 'import tensorflow as tf\n'), ((78842, 78927), 'adanet.tf_compat.v1.placeholder', 'tf_compat.v1.placeholder', ([], {'dtype': 'tf.string', 'shape': 'None', 'name': '"""serialized_example"""'}), "(dtype=tf.string, shape=None, name='serialized_example'\n )\n", (78866, 78927), False, 'from adanet import tf_compat\n'), ((79445, 79616), 'tensorflow.contrib.estimator.export_saved_model_for_mode', 'tf.contrib.estimator.export_saved_model_for_mode', (['estimator'], {'export_dir_base': 'export_dir_base', 'input_receiver_fn': 'serving_input_fn', 'mode': 'tf.estimator.ModeKeys.EVAL'}), '(estimator, export_dir_base\n =export_dir_base, input_receiver_fn=serving_input_fn, mode=tf.estimator\n .ModeKeys.EVAL)\n', (79493, 79616), True, 'import tensorflow as tf\n'), ((79700, 79736), 'tensorflow.io.gfile.listdir', 'tf.io.gfile.listdir', (['export_dir_base'], {}), '(export_dir_base)\n', (79719, 79736), True, 'import tensorflow as tf\n'), ((79750, 79770), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (79768, 79770), False, 'from tensorflow.python.eager import context\n'), ((82337, 82422), 'adanet.tf_compat.v1.placeholder', 'tf_compat.v1.placeholder', ([], {'dtype': 'tf.string', 'shape': 'None', 'name': '"""serialized_example"""'}), "(dtype=tf.string, shape=None, name='serialized_example'\n )\n", (82361, 82422), False, 'from adanet import tf_compat\n'), ((82563, 82670), 'tensorflow.estimator.export.ServingInputReceiver', 'tf.estimator.export.ServingInputReceiver', ([], {'features': 'tensor_features', 'receiver_tensors': 'serialized_example'}), '(features=tensor_features,\n receiver_tensors=serialized_example)\n', (82603, 82670), True, 'import tensorflow as tf\n'), ((107236, 107327), 'tensorflow_estimator.python.estimator.head.regression_head.RegressionHead', 'regression_head.RegressionHead', ([], {'name': '"""y"""', 'loss_reduction': 'tf_compat.SUM_OVER_BATCH_SIZE'}), "(name='y', loss_reduction=tf_compat.\n SUM_OVER_BATCH_SIZE)\n", (107266, 107327), False, 'from tensorflow_estimator.python.estimator.head import regression_head\n'), ((107569, 107660), 'tensorflow_estimator.python.estimator.head.regression_head.RegressionHead', 'regression_head.RegressionHead', ([], {'name': '"""y"""', 'loss_reduction': 'tf_compat.SUM_OVER_BATCH_SIZE'}), "(name='y', loss_reduction=tf_compat.\n SUM_OVER_BATCH_SIZE)\n", (107599, 107660), False, 'from tensorflow_estimator.python.estimator.head import regression_head\n'), ((111001, 111046), 'tensorflow.contrib.learn.RunConfig', 'tf.contrib.learn.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (111027, 111046), True, 'import tensorflow as tf\n'), ((111447, 111468), 'json.dumps', 'json.dumps', (['tf_config'], {}), '(tf_config)\n', (111457, 111468), False, 'import json\n'), ((111488, 111533), 'tensorflow.contrib.learn.RunConfig', 'tf.contrib.learn.RunConfig', ([], {'tf_random_seed': '(42)'}), '(tf_random_seed=42)\n', (111514, 111533), True, 'import tensorflow as tf\n'), ((111909, 111952), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['XOR_FEATURES', 'XOR_LABELS'], {}), '(XOR_FEATURES, XOR_LABELS)\n', (111926, 111952), True, 'from adanet.core import testing_utils as tu\n'), ((4008, 4051), 'adanet.tf_compat.v1.variable_scope', 'tf_compat.v1.variable_scope', (['"""hidden_layer"""'], {}), "('hidden_layer')\n", (4035, 4051), False, 'from adanet import tf_compat\n'), ((4258, 4294), 'tensorflow.constant', 'tf.constant', (['[1]'], {'name': '"""disjoint_op"""'}), "([1], name='disjoint_op')\n", (4269, 4294), True, 'import tensorflow as tf\n'), ((4605, 4658), 'tensorflow.concat', 'tf.concat', (['[hidden_layer, other_hidden_layer]'], {'axis': '(1)'}), '([hidden_layer, other_hidden_layer], axis=1)\n', (4614, 4658), True, 'import tensorflow as tf\n'), ((10533, 10589), 'adanet.tf_compat.v1.glorot_uniform_initializer', 'tf_compat.v1.glorot_uniform_initializer', ([], {'seed': 'self._seed'}), '(seed=self._seed)\n', (10572, 10589), False, 'from adanet import tf_compat\n'), ((33979, 33988), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (33986, 33988), True, 'from adanet.core import testing_utils as tu\n'), ((35234, 35287), 'adanet.core.testing_utils.dataset_input_fn', 'tu.dataset_input_fn', ([], {'features': '[0.0, 0.0]', 'labels': 'None'}), '(features=[0.0, 0.0], labels=None)\n', (35253, 35287), True, 'from adanet.core import testing_utils as tu\n'), ((19835, 19862), 'adanet.core.testing_utils.ModifierSessionRunHook', 'tu.ModifierSessionRunHook', ([], {}), '()\n', (19860, 19862), True, 'from adanet.core import testing_utils as tu\n'), ((28863, 28876), 'adanet.ensemble.AllStrategy', 'AllStrategy', ([], {}), '()\n', (28874, 28876), False, 'from adanet.ensemble import AllStrategy\n'), ((29362, 29375), 'adanet.ensemble.AllStrategy', 'AllStrategy', ([], {}), '()\n', (29373, 29375), False, 'from adanet.ensemble import AllStrategy\n'), ((29418, 29450), 'adanet.ensemble.ComplexityRegularizedEnsembler', 'ComplexityRegularizedEnsembler', ([], {}), '()\n', (29448, 29450), False, 'from adanet.ensemble import ComplexityRegularizedEnsembler\n'), ((29466, 29529), 'adanet.ensemble.ComplexityRegularizedEnsembler', 'ComplexityRegularizedEnsembler', ([], {'use_bias': '(True)', 'name': '"""with_bias"""'}), "(use_bias=True, name='with_bias')\n", (29496, 29529), False, 'from adanet.ensemble import ComplexityRegularizedEnsembler\n'), ((30007, 30021), 'adanet.ensemble.SoloStrategy', 'SoloStrategy', ([], {}), '()\n', (30019, 30021), False, 'from adanet.ensemble import SoloStrategy\n'), ((30505, 30519), 'adanet.ensemble.SoloStrategy', 'SoloStrategy', ([], {}), '()\n', (30517, 30519), False, 'from adanet.ensemble import SoloStrategy\n'), ((31010, 31023), 'adanet.ensemble.AllStrategy', 'AllStrategy', ([], {}), '()\n', (31021, 31023), False, 'from adanet.ensemble import AllStrategy\n'), ((31025, 31039), 'adanet.ensemble.GrowStrategy', 'GrowStrategy', ([], {}), '()\n', (31037, 31039), False, 'from adanet.ensemble import GrowStrategy\n'), ((31056, 31070), 'adanet.ensemble.SoloStrategy', 'SoloStrategy', ([], {}), '()\n', (31068, 31070), False, 'from adanet.ensemble import SoloStrategy\n'), ((38288, 38341), 'tensorflow.constant', 'tf.constant', (["[['alice'], ['bob']]"], {'name': '"""human_names"""'}), "([['alice'], ['bob']], name='human_names')\n", (38299, 38341), True, 'import tensorflow as tf\n'), ((38581, 38613), 'tensorflow_estimator.python.estimator.head.regression_head.RegressionHead', 'regression_head.RegressionHead', ([], {}), '()\n', (38611, 38613), False, 'from tensorflow_estimator.python.estimator.head import regression_head\n'), ((38879, 38911), 'adanet.tf_compat.v1.zeros_initializer', 'tf_compat.v1.zeros_initializer', ([], {}), '()\n', (38909, 38911), False, 'from adanet import tf_compat\n'), ((42236, 42276), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (42253, 42276), True, 'from adanet.core import testing_utils as tu\n'), ((43185, 43207), 'tensorflow_estimator.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss', 'binary_class_head_v1', ([], {}), '()\n', (43205, 43207), True, 'from tensorflow_estimator.python.estimator.canned.head import _binary_logistic_head_with_sigmoid_cross_entropy_loss as binary_class_head_v1\n'), ((46056, 46065), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (46063, 46065), True, 'from adanet.core import testing_utils as tu\n'), ((47169, 47232), 'adanet.core.testing_utils.dataset_input_fn', 'tu.dataset_input_fn', ([], {'features': '[0.0, 0.0, 0.0, 0.0]', 'labels': 'None'}), '(features=[0.0, 0.0, 0.0, 0.0], labels=None)\n', (47188, 47232), True, 'from adanet.core import testing_utils as tu\n'), ((51969, 51994), 'adanet.subnetwork.SimpleGenerator', 'SimpleGenerator', (['builders'], {}), '(builders)\n', (51984, 51994), False, 'from adanet.subnetwork import SimpleGenerator\n'), ((52045, 52088), 'adanet.core.evaluator.Evaluator', 'Evaluator', ([], {'input_fn': 'train_input_fn', 'steps': '(1)'}), '(input_fn=train_input_fn, steps=1)\n', (52054, 52088), False, 'from adanet.core.evaluator import Evaluator\n'), ((52570, 52633), 'adanet.core.testing_utils.dataset_input_fn', 'tu.dataset_input_fn', ([], {'features': '[0.0, 0.0, 0.0, 0.0]', 'labels': 'None'}), '(features=[0.0, 0.0, 0.0, 0.0], labels=None)\n', (52589, 52633), True, 'from adanet.core import testing_utils as tu\n'), ((54468, 54508), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (54485, 54508), True, 'from adanet.core import testing_utils as tu\n'), ((54556, 54565), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (54563, 54565), True, 'from adanet.core import testing_utils as tu\n'), ((57397, 57437), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (57414, 57437), True, 'from adanet.core import testing_utils as tu\n'), ((57485, 57494), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (57492, 57494), True, 'from adanet.core import testing_utils as tu\n'), ((57685, 57717), 'adanet.tf_compat.v1.zeros_initializer', 'tf_compat.v1.zeros_initializer', ([], {}), '()\n', (57715, 57717), False, 'from adanet import tf_compat\n'), ((60228, 60272), 'tensorflow.reduce_mean', 'tf.reduce_mean', ([], {'input_tensor': '(labels - logits)'}), '(input_tensor=labels - logits)\n', (60242, 60272), True, 'import tensorflow as tf\n'), ((61037, 61077), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (61054, 61077), True, 'from adanet.core import testing_utils as tu\n'), ((61125, 61134), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (61132, 61134), True, 'from adanet.core import testing_utils as tu\n'), ((61325, 61357), 'adanet.tf_compat.v1.zeros_initializer', 'tf_compat.v1.zeros_initializer', ([], {}), '()\n', (61355, 61357), False, 'from adanet import tf_compat\n'), ((63741, 63781), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (63758, 63781), True, 'from adanet.core import testing_utils as tu\n'), ((63829, 63838), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (63836, 63838), True, 'from adanet.core import testing_utils as tu\n'), ((64029, 64061), 'adanet.tf_compat.v1.zeros_initializer', 'tf_compat.v1.zeros_initializer', ([], {}), '()\n', (64059, 64061), False, 'from adanet import tf_compat\n'), ((71655, 71695), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (71672, 71695), True, 'from adanet.core import testing_utils as tu\n'), ((71740, 71749), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (71747, 71749), True, 'from adanet.core import testing_utils as tu\n'), ((71940, 71972), 'adanet.tf_compat.v1.zeros_initializer', 'tf_compat.v1.zeros_initializer', ([], {}), '()\n', (71970, 71972), False, 'from adanet import tf_compat\n'), ((72333, 72342), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (72340, 72342), True, 'from adanet.core import testing_utils as tu\n'), ((74298, 74338), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (74315, 74338), True, 'from adanet.core import testing_utils as tu\n'), ((74386, 74395), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (74393, 74395), True, 'from adanet.core import testing_utils as tu\n'), ((74586, 74618), 'adanet.tf_compat.v1.zeros_initializer', 'tf_compat.v1.zeros_initializer', ([], {}), '()\n', (74616, 74618), False, 'from adanet import tf_compat\n'), ((75581, 75599), 'tensorflow.constant', 'tf.constant', (['value'], {}), '(value)\n', (75592, 75599), True, 'import tensorflow as tf\n'), ((76400, 76440), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (76417, 76440), True, 'from adanet.core import testing_utils as tu\n'), ((76488, 76497), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (76495, 76497), True, 'from adanet.core import testing_utils as tu\n'), ((76688, 76720), 'adanet.tf_compat.v1.zeros_initializer', 'tf_compat.v1.zeros_initializer', ([], {}), '()\n', (76718, 76720), False, 'from adanet import tf_compat\n'), ((77396, 77414), 'tensorflow.constant', 'tf.constant', (['value'], {}), '(value)\n', (77407, 77414), True, 'import tensorflow as tf\n'), ((78098, 78133), 'tensorflow_estimator.python.estimator.head.binary_class_head.BinaryClassHead', 'binary_class_head.BinaryClassHead', ([], {}), '()\n', (78131, 78133), False, 'from tensorflow_estimator.python.estimator.head import binary_class_head\n'), ((79888, 79925), 'os.path.join', 'os.path.join', (['export_dir_base', 'subdir'], {}), '(export_dir_base, subdir)\n', (79900, 79925), False, 'import os\n'), ((81763, 81772), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (81770, 81772), True, 'from adanet.core import testing_utils as tu\n'), ((81965, 81985), 'adanet.distributed.placement.RoundRobinStrategy', 'RoundRobinStrategy', ([], {}), '()\n', (81983, 81985), False, 'from adanet.distributed.placement import RoundRobinStrategy\n'), ((82531, 82549), 'tensorflow.constant', 'tf.constant', (['value'], {}), '(value)\n', (82542, 82549), True, 'import tensorflow as tf\n'), ((83367, 83432), 'absl.logging.warning', 'logging.warning', (['"""Testing estimator#export_savedmodel: %s"""', 'error'], {}), "('Testing estimator#export_savedmodel: %s', error)\n", (83382, 83432), False, 'from absl import logging\n'), ((102268, 102277), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (102275, 102277), True, 'from adanet.core import testing_utils as tu\n'), ((102419, 102451), 'adanet.tf_compat.v1.zeros_initializer', 'tf_compat.v1.zeros_initializer', ([], {}), '()\n', (102449, 102451), False, 'from adanet import tf_compat\n'), ((102594, 102646), 'adanet.core.report_materializer.ReportMaterializer', 'ReportMaterializer', ([], {'input_fn': 'train_input_fn', 'steps': '(1)'}), '(input_fn=train_input_fn, steps=1)\n', (102612, 102646), False, 'from adanet.core.report_materializer import ReportMaterializer\n'), ((90294, 90463), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=0, name='dnn', hparams={'layer_size': 1\n }, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (90312, 90463), False, 'from adanet.subnetwork import MaterializedReport\n'), ((90681, 90850), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""dnn"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=1, name='dnn', hparams={'layer_size': 1\n }, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (90699, 90850), False, 'from adanet.subnetwork import MaterializedReport\n'), ((91113, 91282), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=0, name='dnn', hparams={'layer_size': 1\n }, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (91131, 91282), False, 'from adanet.subnetwork import MaterializedReport\n'), ((91500, 91641), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""previous_ensemble"""', 'hparams': '{}', 'attributes': '{}', 'metrics': '{}', 'included_in_final_ensemble': '(False)'}), "(iteration_number=1, name='previous_ensemble', hparams={},\n attributes={}, metrics={}, included_in_final_ensemble=False)\n", (91518, 91641), False, 'from adanet.subnetwork import MaterializedReport\n'), ((91778, 91947), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""dnn"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=1, name='dnn', hparams={'layer_size': 1\n }, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (91796, 91947), False, 'from adanet.subnetwork import MaterializedReport\n'), ((97731, 97901), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn_3"""', 'hparams': "{'layer_size': 3}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=0, name='dnn_3', hparams={'layer_size':\n 3}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (97749, 97901), False, 'from adanet.subnetwork import MaterializedReport\n'), ((98120, 98290), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""dnn_3"""', 'hparams': "{'layer_size': 3}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=1, name='dnn_3', hparams={'layer_size':\n 3}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (98138, 98290), False, 'from adanet.subnetwork import MaterializedReport\n'), ((98554, 98725), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn_1"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=0, name='dnn_1', hparams={'layer_size':\n 1}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (98572, 98725), False, 'from adanet.subnetwork import MaterializedReport\n'), ((98944, 99115), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn_2"""', 'hparams': "{'layer_size': 2}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=0, name='dnn_2', hparams={'layer_size':\n 2}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (98962, 99115), False, 'from adanet.subnetwork import MaterializedReport\n'), ((99334, 99504), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn_3"""', 'hparams': "{'layer_size': 3}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=0, name='dnn_3', hparams={'layer_size':\n 3}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (99352, 99504), False, 'from adanet.subnetwork import MaterializedReport\n'), ((99723, 99864), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""previous_ensemble"""', 'hparams': '{}', 'attributes': '{}', 'metrics': '{}', 'included_in_final_ensemble': '(False)'}), "(iteration_number=1, name='previous_ensemble', hparams={},\n attributes={}, metrics={}, included_in_final_ensemble=False)\n", (99741, 99864), False, 'from adanet.subnetwork import MaterializedReport\n'), ((100001, 100172), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""dnn_1"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=1, name='dnn_1', hparams={'layer_size':\n 1}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (100019, 100172), False, 'from adanet.subnetwork import MaterializedReport\n'), ((100391, 100562), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""dnn_2"""', 'hparams': "{'layer_size': 2}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=1, name='dnn_2', hparams={'layer_size':\n 2}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (100409, 100562), False, 'from adanet.subnetwork import MaterializedReport\n'), ((100781, 100951), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""dnn_3"""', 'hparams': "{'layer_size': 3}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=1, name='dnn_3', hparams={'layer_size':\n 3}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (100799, 100951), False, 'from adanet.subnetwork import MaterializedReport\n'), ((106374, 106383), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (106381, 106383), True, 'from adanet.core import testing_utils as tu\n'), ((109037, 109046), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (109044, 109046), True, 'from adanet.core import testing_utils as tu\n'), ((109871, 109880), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (109878, 109880), True, 'from adanet.core import testing_utils as tu\n'), ((113540, 113549), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (113547, 113549), True, 'from adanet.core import testing_utils as tu\n'), ((114140, 114188), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""replayed"""'], {}), "(self.test_subdirectory, 'replayed')\n", (114152, 114188), False, 'import os\n'), ((114441, 114450), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (114448, 114450), True, 'from adanet.core import testing_utils as tu\n'), ((114627, 114673), 'adanet.replay.Config', 'replay.Config', ([], {'best_ensemble_indices': '[2, 3, 1]'}), '(best_ensemble_indices=[2, 3, 1])\n', (114640, 114673), False, 'from adanet import replay\n'), ((4308, 4347), 'adanet.tf_compat.v1.colocate_with', 'tf_compat.v1.colocate_with', (['disjoint_op'], {}), '(disjoint_op)\n', (4334, 4347), False, 'from adanet import tf_compat\n'), ((4395, 4422), 'tensorflow.matmul', 'tf.matmul', (["features['x']", 'w'], {}), "(features['x'], w)\n", (4404, 4422), True, 'import tensorflow as tf\n'), ((5390, 5440), 'adanet.tf_compat.v1.glorot_uniform_initializer', 'tf_compat.v1.glorot_uniform_initializer', ([], {'seed': 'seed'}), '(seed=seed)\n', (5429, 5440), False, 'from adanet import tf_compat\n'), ((7016, 7046), 'tensorflow.constant', 'tf.constant', (['(3)'], {'dtype': 'tf.int32'}), '(3, dtype=tf.int32)\n', (7027, 7046), True, 'import tensorflow as tf\n'), ((8267, 8317), 'adanet.tf_compat.v1.glorot_uniform_initializer', 'tf_compat.v1.glorot_uniform_initializer', ([], {'seed': 'seed'}), '(seed=seed)\n', (8306, 8317), False, 'from adanet import tf_compat\n'), ((9527, 9575), 'adanet.tf_compat.v1.glorot_uniform_initializer', 'tf_compat.v1.glorot_uniform_initializer', ([], {'seed': '(42)'}), '(seed=42)\n', (9566, 9575), False, 'from adanet import tf_compat\n'), ((26034, 26074), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (26051, 26074), True, 'from adanet.core import testing_utils as tu\n'), ((26594, 26634), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[1.0]]'], {}), '([[1.0, 1.0]], [[1.0]])\n', (26611, 26634), True, 'from adanet.core import testing_utils as tu\n'), ((27182, 27222), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (27199, 27222), True, 'from adanet.core import testing_utils as tu\n'), ((27780, 27820), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (27797, 27820), True, 'from adanet.core import testing_utils as tu\n'), ((28351, 28391), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (28368, 28391), True, 'from adanet.core import testing_utils as tu\n'), ((36797, 36910), 'tensorflow.feature_column.categorical_column_with_hash_bucket', 'tf.feature_column.categorical_column_with_hash_bucket', ([], {'key': '"""human_names"""', 'hash_bucket_size': '(4)', 'dtype': 'tf.string'}), "(key='human_names',\n hash_bucket_size=4, dtype=tf.string)\n", (36850, 36910), True, 'import tensorflow as tf\n'), ((37160, 37291), 'tensorflow.feature_column.categorical_column_with_vocabulary_list', 'tf.feature_column.categorical_column_with_vocabulary_list', ([], {'key': '"""human_names"""', 'vocabulary_list': "['alice', 'bob']", 'dtype': 'tf.string'}), "(key='human_names',\n vocabulary_list=['alice', 'bob'], dtype=tf.string)\n", (37217, 37291), True, 'import tensorflow as tf\n'), ((37559, 37672), 'tensorflow.feature_column.categorical_column_with_hash_bucket', 'tf.feature_column.categorical_column_with_hash_bucket', ([], {'key': '"""human_names"""', 'hash_bucket_size': '(4)', 'dtype': 'tf.string'}), "(key='human_names',\n hash_bucket_size=4, dtype=tf.string)\n", (37612, 37672), True, 'import tensorflow as tf\n'), ((37922, 38053), 'tensorflow.feature_column.categorical_column_with_vocabulary_list', 'tf.feature_column.categorical_column_with_vocabulary_list', ([], {'key': '"""human_names"""', 'vocabulary_list': "['alice', 'bob']", 'dtype': 'tf.string'}), "(key='human_names',\n vocabulary_list=['alice', 'bob'], dtype=tf.string)\n", (37979, 38053), True, 'import tensorflow as tf\n'), ((42368, 42377), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (42375, 42377), True, 'from adanet.core import testing_utils as tu\n'), ((42576, 42608), 'adanet.tf_compat.v1.zeros_initializer', 'tf_compat.v1.zeros_initializer', ([], {}), '()\n', (42606, 42608), False, 'from adanet import tf_compat\n'), ((51480, 51505), 'tensorflow.constant', 'tf.constant', (['xor_features'], {}), '(xor_features)\n', (51491, 51505), True, 'import tensorflow as tf\n'), ((51536, 51559), 'tensorflow.constant', 'tf.constant', (['xor_labels'], {}), '(xor_labels)\n', (51547, 51559), True, 'import tensorflow as tf\n'), ((51580, 51603), 'tensorflow.constant', 'tf.constant', (['xor_labels'], {}), '(xor_labels)\n', (51591, 51603), True, 'import tensorflow as tf\n'), ((55338, 55378), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (55355, 55378), True, 'from adanet.core import testing_utils as tu\n'), ((55430, 55439), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (55437, 55439), True, 'from adanet.core import testing_utils as tu\n'), ((55965, 56074), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'save_checkpoints_steps': '(1)', 'keep_checkpoint_max': '(3)', 'model_dir': 'self.test_subdirectory'}), '(save_checkpoints_steps=1, keep_checkpoint_max=3,\n model_dir=self.test_subdirectory)\n', (55987, 56074), True, 'import tensorflow as tf\n'), ((66371, 66409), 'adanet.tf_compat.v1.metrics.mean', 'tf_compat.v1.metrics.mean', (['predictions'], {}), '(predictions)\n', (66396, 66409), False, 'from adanet import tf_compat\n'), ((72361, 72398), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""x"""'], {}), "('x')\n", (72393, 72398), True, 'import tensorflow as tf\n'), ((79051, 79074), 'tensorflow.constant', 'tf.constant', (['XOR_LABELS'], {}), '(XOR_LABELS)\n', (79062, 79074), True, 'import tensorflow as tf\n'), ((80095, 80211), 'adanet.tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', 'tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', (["signature_def.outputs['metrics/average_loss/value']"], {}), "(signature_def.\n outputs['metrics/average_loss/value'])\n", (80153, 80211), False, 'from adanet import tf_compat\n'), ((80295, 80415), 'adanet.tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', 'tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', (["signature_def.outputs['metrics/average_loss/update_op']"], {}), "(signature_def.\n outputs['metrics/average_loss/update_op'])\n", (80353, 80415), False, 'from adanet import tf_compat\n'), ((80439, 80555), 'adanet.tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', 'tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', (["signature_def.outputs['metrics/accuracy/update_op']"], {}), "(signature_def.\n outputs['metrics/accuracy/update_op'])\n", (80497, 80555), False, 'from adanet import tf_compat\n'), ((80589, 80703), 'adanet.tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', 'tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', (["signature_def.outputs['metrics/recall/update_op']"], {}), "(signature_def.\n outputs['metrics/recall/update_op'])\n", (80647, 80703), False, 'from adanet import tf_compat\n'), ((80860, 80976), 'adanet.tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', 'tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', (["signature_def.outputs['metrics/average_loss/value']"], {}), "(signature_def.\n outputs['metrics/average_loss/value'])\n", (80918, 80976), False, 'from adanet import tf_compat\n'), ((81091, 81201), 'adanet.tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', 'tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', (["signature_def.outputs['metrics/recall/value']"], {}), "(signature_def.\n outputs['metrics/recall/value'])\n", (81149, 81201), False, 'from adanet import tf_compat\n'), ((81317, 81429), 'adanet.tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', 'tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info', (["signature_def.outputs['metrics/accuracy/value']"], {}), "(signature_def.\n outputs['metrics/accuracy/value'])\n", (81375, 81429), False, 'from adanet import tf_compat\n'), ((85418, 85587), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=0, name='dnn', hparams={'layer_size': 1\n }, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (85436, 85587), False, 'from adanet.subnetwork import MaterializedReport\n'), ((86688, 86859), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn_1"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=0, name='dnn_1', hparams={'layer_size':\n 1}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (86706, 86859), False, 'from adanet.subnetwork import MaterializedReport\n'), ((87078, 87249), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn_2"""', 'hparams': "{'layer_size': 2}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=0, name='dnn_2', hparams={'layer_size':\n 2}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (87096, 87249), False, 'from adanet.subnetwork import MaterializedReport\n'), ((87468, 87638), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn_3"""', 'hparams': "{'layer_size': 3}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=0, name='dnn_3', hparams={'layer_size':\n 3}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (87486, 87638), False, 'from adanet.subnetwork import MaterializedReport\n'), ((88232, 88401), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=0, name='dnn', hparams={'layer_size': 1\n }, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (88250, 88401), False, 'from adanet.subnetwork import MaterializedReport\n'), ((88699, 88840), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""previous_ensemble"""', 'hparams': '{}', 'attributes': '{}', 'metrics': '{}', 'included_in_final_ensemble': '(False)'}), "(iteration_number=1, name='previous_ensemble', hparams={},\n attributes={}, metrics={}, included_in_final_ensemble=False)\n", (88717, 88840), False, 'from adanet.subnetwork import MaterializedReport\n'), ((89009, 89178), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""dnn"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=1, name='dnn', hparams={'layer_size': 1\n }, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (89027, 89178), False, 'from adanet.subnetwork import MaterializedReport\n'), ((89477, 89618), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(2)', 'name': '"""previous_ensemble"""', 'hparams': '{}', 'attributes': '{}', 'metrics': '{}', 'included_in_final_ensemble': '(False)'}), "(iteration_number=2, name='previous_ensemble', hparams={},\n attributes={}, metrics={}, included_in_final_ensemble=False)\n", (89495, 89618), False, 'from adanet.subnetwork import MaterializedReport\n'), ((89787, 89956), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(2)', 'name': '"""dnn"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=2, name='dnn', hparams={'layer_size': 1\n }, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (89805, 89956), False, 'from adanet.subnetwork import MaterializedReport\n'), ((93034, 93205), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn_1"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=0, name='dnn_1', hparams={'layer_size':\n 1}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (93052, 93205), False, 'from adanet.subnetwork import MaterializedReport\n'), ((93472, 93643), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn_2"""', 'hparams': "{'layer_size': 2}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=0, name='dnn_2', hparams={'layer_size':\n 2}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (93490, 93643), False, 'from adanet.subnetwork import MaterializedReport\n'), ((93910, 94080), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(0)', 'name': '"""dnn_3"""', 'hparams': "{'layer_size': 3}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=0, name='dnn_3', hparams={'layer_size':\n 3}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (93928, 94080), False, 'from adanet.subnetwork import MaterializedReport\n'), ((94380, 94521), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""previous_ensemble"""', 'hparams': '{}', 'attributes': '{}', 'metrics': '{}', 'included_in_final_ensemble': '(False)'}), "(iteration_number=1, name='previous_ensemble', hparams={},\n attributes={}, metrics={}, included_in_final_ensemble=False)\n", (94398, 94521), False, 'from adanet.subnetwork import MaterializedReport\n'), ((94690, 94861), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""dnn_1"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=1, name='dnn_1', hparams={'layer_size':\n 1}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (94708, 94861), False, 'from adanet.subnetwork import MaterializedReport\n'), ((95128, 95299), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""dnn_2"""', 'hparams': "{'layer_size': 2}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=1, name='dnn_2', hparams={'layer_size':\n 2}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (95146, 95299), False, 'from adanet.subnetwork import MaterializedReport\n'), ((95566, 95736), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(1)', 'name': '"""dnn_3"""', 'hparams': "{'layer_size': 3}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=1, name='dnn_3', hparams={'layer_size':\n 3}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (95584, 95736), False, 'from adanet.subnetwork import MaterializedReport\n'), ((96036, 96177), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(2)', 'name': '"""previous_ensemble"""', 'hparams': '{}', 'attributes': '{}', 'metrics': '{}', 'included_in_final_ensemble': '(False)'}), "(iteration_number=2, name='previous_ensemble', hparams={},\n attributes={}, metrics={}, included_in_final_ensemble=False)\n", (96054, 96177), False, 'from adanet.subnetwork import MaterializedReport\n'), ((96346, 96517), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(2)', 'name': '"""dnn_1"""', 'hparams': "{'layer_size': 1}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=2, name='dnn_1', hparams={'layer_size':\n 1}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (96364, 96517), False, 'from adanet.subnetwork import MaterializedReport\n'), ((96784, 96955), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(2)', 'name': '"""dnn_2"""', 'hparams': "{'layer_size': 2}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(False)'}), "(iteration_number=2, name='dnn_2', hparams={'layer_size':\n 2}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=False)\n", (96802, 96955), False, 'from adanet.subnetwork import MaterializedReport\n'), ((97222, 97392), 'adanet.subnetwork.MaterializedReport', 'MaterializedReport', ([], {'iteration_number': '(2)', 'name': '"""dnn_3"""', 'hparams': "{'layer_size': 3}", 'attributes': "{'complexity': 3}", 'metrics': "{'moo': 3}", 'included_in_final_ensemble': '(True)'}), "(iteration_number=2, name='dnn_3', hparams={'layer_size':\n 3}, attributes={'complexity': 3}, metrics={'moo': 3},\n included_in_final_ensemble=True)\n", (97240, 97392), False, 'from adanet.subnetwork import MaterializedReport\n'), ((105881, 105921), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0]]', '[[0.0]]'], {}), '([[1.0, 1.0]], [[0.0]])\n', (105898, 105921), True, 'from adanet.core import testing_utils as tu\n'), ((107454, 107470), 'tensorflow.zeros', 'tf.zeros', (['[1, 1]'], {}), '([1, 1])\n', (107462, 107470), True, 'import tensorflow as tf\n'), ((107779, 107799), 'tensorflow.math.log', 'tf.math.log', (['[[0.0]]'], {}), '([[0.0]])\n', (107790, 107799), True, 'import tensorflow as tf\n'), ((111716, 111725), 'adanet.core.testing_utils.head', 'tu.head', ([], {}), '()\n', (111723, 111725), True, 'from adanet.core import testing_utils as tu\n'), ((112643, 112686), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['XOR_FEATURES', 'XOR_LABELS'], {}), '(XOR_FEATURES, XOR_LABELS)\n', (112660, 112686), True, 'from adanet.core import testing_utils as tu\n'), ((112798, 112889), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[0.0, 0.0], [0.0, 0], [0.0, 0.0], [0.0, 0.0]]', '[[0], [0], [0], [0]]'], {}), '([[0.0, 0.0], [0.0, 0], [0.0, 0.0], [0.0, 0.0]], [[0], [0],\n [0], [0]])\n', (112815, 112889), True, 'from adanet.core import testing_utils as tu\n'), ((4157, 4207), 'adanet.tf_compat.v1.glorot_uniform_initializer', 'tf_compat.v1.glorot_uniform_initializer', ([], {'seed': 'seed'}), '(seed=seed)\n', (4196, 4207), False, 'from adanet import tf_compat\n'), ((7087, 7117), 'tensorflow.constant', 'tf.constant', (['(3)'], {'dtype': 'tf.int32'}), '(3, dtype=tf.int32)\n', (7098, 7117), True, 'import tensorflow as tf\n'), ((7151, 7181), 'tensorflow.constant', 'tf.constant', (['(3)'], {'dtype': 'tf.int32'}), '(3, dtype=tf.int32)\n', (7162, 7181), True, 'import tensorflow as tf\n'), ((35706, 35749), 'tensorflow.constant', 'tf.constant', (['[[0.0, 0.0]]'], {'name': '"""serving_x"""'}), "([[0.0, 0.0]], name='serving_x')\n", (35717, 35749), True, 'import tensorflow as tf\n'), ((36310, 36346), 'tensorflow.io.gfile.listdir', 'tf.io.gfile.listdir', (['export_dir_base'], {}), '(export_dir_base)\n', (36329, 36346), True, 'import tensorflow as tf\n'), ((31531, 31594), 'tensorflow.data.Dataset.from_tensors', 'tf.data.Dataset.from_tensors', (["({'x': XOR_FEATURES}, XOR_LABELS)"], {}), "(({'x': XOR_FEATURES}, XOR_LABELS))\n", (31559, 31594), True, 'import tensorflow as tf\n'), ((46246, 46296), 'adanet.core.testing_utils.dummy_input_fn', 'tu.dummy_input_fn', (['[[1.0, 1.0, 0.1, 0.1]]', '[[0.0]]'], {}), '([[1.0, 1.0, 0.1, 0.1]], [[0.0]])\n', (46263, 46296), True, 'from adanet.core import testing_utils as tu\n'), ((47649, 47702), 'tensorflow.constant', 'tf.constant', (['[[0.0, 0.0, 0.0, 0.0]]'], {'name': '"""serving_x"""'}), "([[0.0, 0.0, 0.0, 0.0]], name='serving_x')\n", (47660, 47702), True, 'import tensorflow as tf\n'), ((53126, 53179), 'tensorflow.constant', 'tf.constant', (['[[0.0, 0.0, 0.0, 0.0]]'], {'name': '"""serving_x"""'}), "([[0.0, 0.0, 0.0, 0.0]], name='serving_x')\n", (53137, 53179), True, 'import tensorflow as tf\n'), ((53738, 53774), 'tensorflow.io.gfile.listdir', 'tf.io.gfile.listdir', (['export_dir_base'], {}), '(export_dir_base)\n', (53757, 53774), True, 'import tensorflow as tf\n'), ((79006, 79031), 'tensorflow.constant', 'tf.constant', (['XOR_FEATURES'], {}), '(XOR_FEATURES)\n', (79017, 79031), True, 'import tensorflow as tf\n'), ((107413, 107438), 'tensorflow.math.log', 'tf.math.log', (['[[1.0, 0.0]]'], {}), '([[1.0, 0.0]])\n', (107424, 107438), True, 'import tensorflow as tf\n'), ((107746, 107761), 'tensorflow.ones', 'tf.ones', (['[1, 2]'], {}), '([1, 2])\n', (107753, 107761), True, 'import tensorflow as tf\n'), ((107954, 108045), 'tensorflow_estimator.python.estimator.head.regression_head.RegressionHead', 'regression_head.RegressionHead', ([], {'name': '"""y"""', 'loss_reduction': 'tf_compat.SUM_OVER_BATCH_SIZE'}), "(name='y', loss_reduction=tf_compat.\n SUM_OVER_BATCH_SIZE)\n", (107984, 108045), False, 'from tensorflow_estimator.python.estimator.head import regression_head\n'), ((108153, 108168), 'tensorflow.ones', 'tf.ones', (['[1, 2]'], {}), '([1, 2])\n', (108160, 108168), True, 'import tensorflow as tf\n'), ((108211, 108231), 'tensorflow.math.log', 'tf.math.log', (['[[0.0]]'], {}), '([[0.0]])\n', (108222, 108231), True, 'import tensorflow as tf\n'), ((51698, 51793), 'tensorflow_estimator.python.estimator.head.regression_head.RegressionHead', 'regression_head.RegressionHead', ([], {'name': '"""head1"""', 'loss_reduction': 'tf_compat.SUM_OVER_BATCH_SIZE'}), "(name='head1', loss_reduction=tf_compat.\n SUM_OVER_BATCH_SIZE)\n", (51728, 51793), False, 'from tensorflow_estimator.python.estimator.head import regression_head\n'), ((51819, 51914), 'tensorflow_estimator.python.estimator.head.regression_head.RegressionHead', 'regression_head.RegressionHead', ([], {'name': '"""head2"""', 'loss_reduction': 'tf_compat.SUM_OVER_BATCH_SIZE'}), "(name='head2', loss_reduction=tf_compat.\n SUM_OVER_BATCH_SIZE)\n", (51849, 51914), False, 'from tensorflow_estimator.python.estimator.head import regression_head\n'), ((17081, 17124), 'adanet.core.testing_utils.ModifierSessionRunHook', 'tu.ModifierSessionRunHook', (['"""chief_hook_var"""'], {}), "('chief_hook_var')\n", (17106, 17124), True, 'from adanet.core import testing_utils as tu\n'), ((17190, 17227), 'adanet.core.testing_utils.ModifierSessionRunHook', 'tu.ModifierSessionRunHook', (['"""hook_var"""'], {}), "('hook_var')\n", (17215, 17227), True, 'from adanet.core import testing_utils as tu\n'), ((17784, 17827), 'adanet.core.testing_utils.ModifierSessionRunHook', 'tu.ModifierSessionRunHook', (['"""chief_hook_var"""'], {}), "('chief_hook_var')\n", (17809, 17827), True, 'from adanet.core import testing_utils as tu\n'), ((17924, 17961), 'adanet.core.testing_utils.ModifierSessionRunHook', 'tu.ModifierSessionRunHook', (['"""hook_var"""'], {}), "('hook_var')\n", (17949, 17961), True, 'from adanet.core import testing_utils as tu\n'), ((68615, 68678), 'adanet.tf_compat.v1.Summary.Value', 'tf_compat.v1.Summary.Value', ([], {'tag': '"""summary_tag"""', 'simple_value': '(1.0)'}), "(tag='summary_tag', simple_value=1.0)\n", (68641, 68678), False, 'from adanet import tf_compat\n')] |
import subprocess, os
ue4_win = r"C:\Program Files\Epic Games\UE_4.16"
ue4_linux = "/home/qiuwch/workspace/UE416"
ue4_mac = '/Users/Shared/Epic Games/UE_4.16'
win_uprojects = [
r'C:\qiuwch\workspace\uprojects\UE4RealisticRendering\RealisticRendering.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene1\ArchinteriorsVol2Scene1.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene2\ArchinteriorsVol2Scene2.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene3\ArchinteriorsVol2Scene3.uproject',
r'C:\qiuwch\workspace\uprojects\UE4UrbanCity\UrbanCity.uproject',
r'D:\workspace\uprojects\Matinee\Matinee.uproject',
r'D:\workspace\uprojects\PhotorealisticCharacter\PhotorealisticCharacter2.uproject',
]
linux_uprojects = [
os.path.expanduser('~/workspace/uprojects/UE4RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser("~/workspace/uprojects/UE4UrbanCity/UrbanCity.uproject"),
]
mac_uprojects = [
os.path.expanduser('~/workspace/UnrealEngine/Templates/FP_FirstPerson/FP_FirstPerson.uproject'),
os.path.expanduser('~/uprojects/RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser('~/uprojects/UE4UrbanCity/UrbanCity.uproject'),
]
uprojects = []
for uproject_path in win_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_win,
log_file = 'log/win_%s.log' % uproject_name
),
)
for uproject_path in linux_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_linux,
log_file = 'log/linux_%s.log' % uproject_name
),
)
for uproject_path in mac_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_mac,
log_file = 'log/mac_%s.log' % uproject_name
),
)
if __name__ == '__main__':
for uproject in uprojects:
uproject_path = uproject['uproject_path']
if not os.path.isfile(uproject_path):
print("Can not find uproject file %s, skip this project" % uproject_path)
continue
cmd = [
'python', 'build.py',
'--UE4', uproject['ue4_path'],
# '--output', uproject['output_folder'],
uproject['uproject_path']
]
print(cmd)
subprocess.call(cmd,
stdout = open(uproject['log_file'], 'w'))
with open(uproject['log_file']) as f:
lines = f.readlines()
print(''.join(lines[-10:])) # Print the last few lines
| [
"os.path.isfile",
"os.path.basename",
"os.path.expanduser"
]
| [((803, 901), 'os.path.expanduser', 'os.path.expanduser', (['"""~/workspace/uprojects/UE4RealisticRendering/RealisticRendering.uproject"""'], {}), "(\n '~/workspace/uprojects/UE4RealisticRendering/RealisticRendering.uproject')\n", (821, 901), False, 'import subprocess, os\n'), ((902, 1015), 'os.path.expanduser', 'os.path.expanduser', (['"""~/workspace/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject"""'], {}), "(\n '~/workspace/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'\n )\n", (920, 1015), False, 'import subprocess, os\n'), ((1011, 1124), 'os.path.expanduser', 'os.path.expanduser', (['"""~/workspace/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject"""'], {}), "(\n '~/workspace/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'\n )\n", (1029, 1124), False, 'import subprocess, os\n'), ((1120, 1233), 'os.path.expanduser', 'os.path.expanduser', (['"""~/workspace/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject"""'], {}), "(\n '~/workspace/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'\n )\n", (1138, 1233), False, 'import subprocess, os\n'), ((1229, 1304), 'os.path.expanduser', 'os.path.expanduser', (['"""~/workspace/uprojects/UE4UrbanCity/UrbanCity.uproject"""'], {}), "('~/workspace/uprojects/UE4UrbanCity/UrbanCity.uproject')\n", (1247, 1304), False, 'import subprocess, os\n'), ((1331, 1436), 'os.path.expanduser', 'os.path.expanduser', (['"""~/workspace/UnrealEngine/Templates/FP_FirstPerson/FP_FirstPerson.uproject"""'], {}), "(\n '~/workspace/UnrealEngine/Templates/FP_FirstPerson/FP_FirstPerson.uproject'\n )\n", (1349, 1436), False, 'import subprocess, os\n'), ((1432, 1517), 'os.path.expanduser', 'os.path.expanduser', (['"""~/uprojects/RealisticRendering/RealisticRendering.uproject"""'], {}), "('~/uprojects/RealisticRendering/RealisticRendering.uproject'\n )\n", (1450, 1517), False, 'import subprocess, os\n'), ((1518, 1616), 'os.path.expanduser', 'os.path.expanduser', (['"""~/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject"""'], {}), "(\n '~/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject')\n", (1536, 1616), False, 'import subprocess, os\n'), ((1617, 1715), 'os.path.expanduser', 'os.path.expanduser', (['"""~/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject"""'], {}), "(\n '~/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject')\n", (1635, 1715), False, 'import subprocess, os\n'), ((1716, 1814), 'os.path.expanduser', 'os.path.expanduser', (['"""~/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject"""'], {}), "(\n '~/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject')\n", (1734, 1814), False, 'import subprocess, os\n'), ((1815, 1880), 'os.path.expanduser', 'os.path.expanduser', (['"""~/uprojects/UE4UrbanCity/UrbanCity.uproject"""'], {}), "('~/uprojects/UE4UrbanCity/UrbanCity.uproject')\n", (1833, 1880), False, 'import subprocess, os\n'), ((2892, 2921), 'os.path.isfile', 'os.path.isfile', (['uproject_path'], {}), '(uproject_path)\n', (2906, 2921), False, 'import subprocess, os\n'), ((1957, 1988), 'os.path.basename', 'os.path.basename', (['uproject_path'], {}), '(uproject_path)\n', (1973, 1988), False, 'import subprocess, os\n'), ((2246, 2277), 'os.path.basename', 'os.path.basename', (['uproject_path'], {}), '(uproject_path)\n', (2262, 2277), False, 'import subprocess, os\n'), ((2537, 2568), 'os.path.basename', 'os.path.basename', (['uproject_path'], {}), '(uproject_path)\n', (2553, 2568), False, 'import subprocess, os\n')] |
"""
Django settings for openstack_lease_it project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import ast
import logging
from openstack_lease_it.config import GLOBAL_CONFIG, load_config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load configuration
load_config()
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = GLOBAL_CONFIG['DJANGO_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = ast.literal_eval(GLOBAL_CONFIG['DJANGO_DEBUG'])
# ALLOWED_HOSTS secure django app access
ALLOWED_HOSTS = []
# A email as format must match this regular expression
# If you not understand, please
EMAIL_REGEXP = r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\.-]+\.[A-Za-z]*$"
# Application definition
INSTALLED_APPS = (
'openstack_auth',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'openstack_lease_it',
'lease_it',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'openstack_lease_it.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'openstack_lease_it.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_CHARSET = 'utf-8'
# We use memcached as cache backend
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '{MEMCACHED_HOST}:{MEMCACHED_PORT}'.format(**GLOBAL_CONFIG),
}
}
SESSION_COOKIE_SECURE = False
SESSION_TIMEOUT = 1800
# A token can be near the end of validity when a page starts loading, and
# invalid during the rendering which can cause errors when a page load.
# TOKEN_TIMEOUT_MARGIN defines a time in seconds we retrieve from token
# validity to avoid this issue. You can adjust this time depending on the
# performance of the infrastructure.
TOKEN_TIMEOUT_MARGIN = 100
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = '/'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
if GLOBAL_CONFIG['BACKEND_PLUGIN'] == 'Openstack':
# UserId on django-openstack_auth need specific User model
AUTH_USER_MODEL = 'openstack_auth.User'
# Define keystone URL for authentification
OPENSTACK_KEYSTONE_URL = GLOBAL_CONFIG['OS_AUTH_URL']
# We use keystone v3 API
OPENSTACK_API_VERSIONS = {
"identity": GLOBAL_CONFIG['OS_IDENTITY_API_VERSION'],
}
# We use multidomain
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
# We load Openstack_auth backend
AUTHENTICATION_BACKENDS = (
'openstack_auth.backend.KeystoneBackend',
'django.contrib.auth.backends.ModelBackend',
)
else:
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
# Configure logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(asctime)s: %(message)s'
},
},
'handlers': {
'django': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'django.log'),
'formatter': 'simple'
},
'main': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'main.log'),
'formatter': 'simple'
},
'notification': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'notification.log'),
'formatter': 'simple'
},
'instances': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'instances.log'),
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['django'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'main': {
'handlers': ['main'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'notification': {
'handlers': ['notification'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'instances': {
'handlers': ['instances'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
},
}
LOGGER = logging.getLogger('main')
LOGGER_NOTIFICATION = logging.getLogger('notification')
LOGGER_INSTANCES = logging.getLogger('instances')
| [
"logging.getLogger",
"os.path.join",
"ast.literal_eval",
"openstack_lease_it.config.load_config",
"os.path.abspath"
]
| [((587, 600), 'openstack_lease_it.config.load_config', 'load_config', ([], {}), '()\n', (598, 600), False, 'from openstack_lease_it.config import GLOBAL_CONFIG, load_config\n'), ((792, 839), 'ast.literal_eval', 'ast.literal_eval', (["GLOBAL_CONFIG['DJANGO_DEBUG']"], {}), "(GLOBAL_CONFIG['DJANGO_DEBUG'])\n", (808, 839), False, 'import ast\n'), ((6507, 6532), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (6524, 6532), False, 'import logging\n'), ((6555, 6588), 'logging.getLogger', 'logging.getLogger', (['"""notification"""'], {}), "('notification')\n", (6572, 6588), False, 'import logging\n'), ((6608, 6638), 'logging.getLogger', 'logging.getLogger', (['"""instances"""'], {}), "('instances')\n", (6625, 6638), False, 'import logging\n'), ((537, 562), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (552, 562), False, 'import os\n'), ((2649, 2685), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""db.sqlite3"""'], {}), "(BASE_DIR, 'db.sqlite3')\n", (2661, 2685), False, 'import os\n'), ((4975, 5033), 'os.path.join', 'os.path.join', (["GLOBAL_CONFIG['DJANGO_LOGDIR']", '"""django.log"""'], {}), "(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'django.log')\n", (4987, 5033), False, 'import os\n'), ((5221, 5277), 'os.path.join', 'os.path.join', (["GLOBAL_CONFIG['DJANGO_LOGDIR']", '"""main.log"""'], {}), "(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'main.log')\n", (5233, 5277), False, 'import os\n'), ((5473, 5537), 'os.path.join', 'os.path.join', (["GLOBAL_CONFIG['DJANGO_LOGDIR']", '"""notification.log"""'], {}), "(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'notification.log')\n", (5485, 5537), False, 'import os\n'), ((5730, 5791), 'os.path.join', 'os.path.join', (["GLOBAL_CONFIG['DJANGO_LOGDIR']", '"""instances.log"""'], {}), "(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'instances.log')\n", (5742, 5791), False, 'import os\n')] |
from unittest import TestCase
from pandora.client import APIClient
from pandora.errors import InvalidAuthToken, ParameterMissing
from pandora.models.pandora import Station, AdItem, PlaylistItem
from pandora.py2compat import Mock, patch
from pydora.utils import iterate_forever
class TestIterateForever(TestCase):
def setUp(self):
self.transport = Mock(side_effect=[InvalidAuthToken(), None])
self.client = APIClient(self.transport, None, None, None, None)
self.client._authenticate = Mock()
def test_handle_missing_params_exception_due_to_missing_ad_tokens(self):
with patch.object(APIClient, 'get_playlist') as get_playlist_mock:
with patch.object(APIClient, 'register_ad', side_effect=ParameterMissing("ParameterMissing")):
station = Station.from_json(self.client, {'stationToken': 'token_mock'})
ad_mock = AdItem.from_json(self.client, {'station_id': 'id_mock'})
get_playlist_mock.return_value=iter([ad_mock])
station_iter = iterate_forever(station.get_playlist)
next_track = next(station_iter)
self.assertEqual(ad_mock, next_track)
def test_reraise_missing_params_exception(self):
with patch.object(APIClient, 'get_playlist', side_effect=ParameterMissing("ParameterMissing")) as get_playlist_mock:
with self.assertRaises(ParameterMissing):
station = Station.from_json(self.client, {'stationToken': 'token_mock'})
track_mock = PlaylistItem.from_json(self.client, {'token': 'token_mock'})
get_playlist_mock.return_value=iter([track_mock])
station_iter = iterate_forever(station.get_playlist)
next(station_iter)
| [
"pandora.errors.InvalidAuthToken",
"pydora.utils.iterate_forever",
"pandora.py2compat.patch.object",
"pandora.models.pandora.AdItem.from_json",
"pandora.py2compat.Mock",
"pandora.models.pandora.Station.from_json",
"pandora.models.pandora.PlaylistItem.from_json",
"pandora.errors.ParameterMissing",
"pandora.client.APIClient"
]
| [((430, 479), 'pandora.client.APIClient', 'APIClient', (['self.transport', 'None', 'None', 'None', 'None'], {}), '(self.transport, None, None, None, None)\n', (439, 479), False, 'from pandora.client import APIClient\n'), ((516, 522), 'pandora.py2compat.Mock', 'Mock', ([], {}), '()\n', (520, 522), False, 'from pandora.py2compat import Mock, patch\n'), ((614, 653), 'pandora.py2compat.patch.object', 'patch.object', (['APIClient', '"""get_playlist"""'], {}), "(APIClient, 'get_playlist')\n", (626, 653), False, 'from pandora.py2compat import Mock, patch\n'), ((810, 872), 'pandora.models.pandora.Station.from_json', 'Station.from_json', (['self.client', "{'stationToken': 'token_mock'}"], {}), "(self.client, {'stationToken': 'token_mock'})\n", (827, 872), False, 'from pandora.models.pandora import Station, AdItem, PlaylistItem\n'), ((899, 955), 'pandora.models.pandora.AdItem.from_json', 'AdItem.from_json', (['self.client', "{'station_id': 'id_mock'}"], {}), "(self.client, {'station_id': 'id_mock'})\n", (915, 955), False, 'from pandora.models.pandora import Station, AdItem, PlaylistItem\n'), ((1051, 1088), 'pydora.utils.iterate_forever', 'iterate_forever', (['station.get_playlist'], {}), '(station.get_playlist)\n', (1066, 1088), False, 'from pydora.utils import iterate_forever\n'), ((1460, 1522), 'pandora.models.pandora.Station.from_json', 'Station.from_json', (['self.client', "{'stationToken': 'token_mock'}"], {}), "(self.client, {'stationToken': 'token_mock'})\n", (1477, 1522), False, 'from pandora.models.pandora import Station, AdItem, PlaylistItem\n'), ((1556, 1616), 'pandora.models.pandora.PlaylistItem.from_json', 'PlaylistItem.from_json', (['self.client', "{'token': 'token_mock'}"], {}), "(self.client, {'token': 'token_mock'})\n", (1578, 1616), False, 'from pandora.models.pandora import Station, AdItem, PlaylistItem\n'), ((1723, 1760), 'pydora.utils.iterate_forever', 'iterate_forever', (['station.get_playlist'], {}), '(station.get_playlist)\n', (1738, 1760), False, 'from pydora.utils import iterate_forever\n'), ((381, 399), 'pandora.errors.InvalidAuthToken', 'InvalidAuthToken', ([], {}), '()\n', (397, 399), False, 'from pandora.errors import InvalidAuthToken, ParameterMissing\n'), ((1311, 1347), 'pandora.errors.ParameterMissing', 'ParameterMissing', (['"""ParameterMissing"""'], {}), "('ParameterMissing')\n", (1327, 1347), False, 'from pandora.errors import InvalidAuthToken, ParameterMissing\n'), ((744, 780), 'pandora.errors.ParameterMissing', 'ParameterMissing', (['"""ParameterMissing"""'], {}), "('ParameterMissing')\n", (760, 780), False, 'from pandora.errors import InvalidAuthToken, ParameterMissing\n')] |
""" Bifurcation point classes. Each class locates and processes bifurcation points.
* _BranchPointFold is a version based on BranchPoint location algorithms
* BranchPoint: Branch process is broken (can't find alternate branch -- see MATCONT notes)
<NAME>, March 2006
"""
from __future__ import absolute_import, print_function
from .misc import *
from PyDSTool.common import args
from .TestFunc import DiscreteMap, FixedPointMap
from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, \
subtract, divide, transpose, eye, real, imag, \
conjugate, average
from scipy import optimize, linalg
from numpy import dot as matrixmultiply
from numpy import array, float, complex, int, float64, complex64, int32, \
zeros, divide, subtract, reshape, argsort, nonzero
#####
_classes = ['BifPoint', 'BPoint', 'BranchPoint', 'FoldPoint', 'HopfPoint',
'BTPoint', 'ZHPoint', 'CPPoint',
'BranchPointFold', '_BranchPointFold', 'DHPoint',
'GHPoint', 'LPCPoint', 'PDPoint', 'NSPoint', 'SPoint']
__all__ = _classes
#####
class BifPoint(object):
def __init__(self, testfuncs, flagfuncs, label='Bifurcation', stop=False):
self.testfuncs = []
self.flagfuncs = []
self.found = []
self.label = label
self.stop = stop
self.data = args()
if not isinstance(testfuncs, list):
testfuncs = [testfuncs]
if not isinstance(flagfuncs, list):
flagfuncs = [flagfuncs]
self.testfuncs.extend(testfuncs)
self.flagfuncs.extend(flagfuncs)
self.tflen = len(self.testfuncs)
def locate(self, P1, P2, C):
pointlist = []
for i, testfunc in enumerate(self.testfuncs):
if self.flagfuncs[i] == iszero:
for ind in range(testfunc.m):
X, V = testfunc.findzero(P1, P2, ind)
pointlist.append((X,V))
X = average([point[0] for point in pointlist], axis=0)
V = average([point[1] for point in pointlist], axis=0)
C.Corrector(X,V)
return X, V
def process(self, X, V, C):
data = args()
data.X = todict(C, X)
data.V = todict(C, V)
self.found.append(data)
def info(self, C, ind=None, strlist=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
if C.verbosity >= 1:
print(self.label + ' Point found ')
if C.verbosity >= 2:
print('========================== ')
for n, i in enumerate(ind):
print(n, ': ')
Xd = self.found[i].X
for k, j in Xd.items():
print(k, ' = ', j)
print('')
if hasattr(self.found[i], 'eigs'):
print('Eigenvalues = \n')
for x in self.found[i].eigs:
print(' (%f,%f)' % (x.real, x.imag))
print('\n')
if strlist is not None:
for string in strlist:
print(string)
print('')
class SPoint(BifPoint):
"""Special point that represents user-selected free parameter values."""
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'S', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
self.info(C, -1)
return True
class BPoint(BifPoint):
"""Special point that represents boundary of computational domain."""
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'B', stop=stop)
def locate(self, P1, P2, C):
# Find location that triggered testfunc and initialize testfunc to that index
val1 = (P1[0]-self.testfuncs[0].lower)*(self.testfuncs[0].upper-P1[0])
val2 = (P2[0]-self.testfuncs[0].lower)*(self.testfuncs[0].upper-P2[0])
ind = nonzero(val1*val2 < 0)
self.testfuncs[0].ind = ind
self.testfuncs[0].func = self.testfuncs[0].one
X, V = BifPoint.locate(self, P1, P2, C)
# Set testfunc back to monitoring all
self.testfuncs[0].ind = None
self.testfuncs[0].func = self.testfuncs[0].all
return X, V
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
class BranchPoint(BifPoint):
"""May only work for EquilibriumCurve ... (needs fixing)"""
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'BP', stop=stop)
def __locate_newton(self, X, C):
"""x[0:self.dim] = (x,alpha)
x[self.dim] = beta
x[self.dim+1:2*self.dim] = p
"""
J_coords = C.CorrFunc.jac(X[0:C.dim], C.coords)
J_params = C.CorrFunc.jac(X[0:C.dim], C.params)
return r_[C.CorrFunc(X[0:C.dim]) + X[C.dim]*X[C.dim+1:], \
matrixmultiply(transpose(J_coords),X[C.dim+1:]), \
matrixmultiply(transpose(X[C.dim+1:]),J_params), \
matrixmultiply(transpose(X[C.dim+1:]),X[C.dim+1:]) - 1]
def locate(self, P1, P2, C):
# Initiliaze p vector to eigenvector with smallest eigenvalue
X, V = P1
X2, V2 = P2
J_coords = C.CorrFunc.jac(X, C.coords)
W, VL = linalg.eig(J_coords, left=1, right=0)
ind = argsort([abs(eig) for eig in W])[0]
p = real(VL[:,ind])
initpoint = zeros(2*C.dim, float)
initpoint[0:C.dim] = X
initpoint[C.dim+1:] = p
X = optimize.fsolve(self.__locate_newton, initpoint, C)
self.data.psi = X[C.dim+1:]
X = X[0:C.dim]
V = 0.5*(V+V2)
return X, V
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
# Finds the new branch
J_coords = C.CorrFunc.jac(X, C.coords)
J_params = C.CorrFunc.jac(X, C.params)
singular = True
perpvec = r_[1,zeros(C.dim-1)]
d = 1
while singular and d <= C.dim:
try:
v0 = linalg.solve(r_[c_[J_coords, J_params],
[perpvec]], \
r_[zeros(C.dim-1),1])
except:
perpvec = r_[0., perpvec[0:(C.dim-1)]]
d += 1
else:
singular = False
if singular:
raise PyDSTool_ExistError("Problem in _compute: Failed to compute tangent vector.")
v0 /= linalg.norm(v0)
V = sign([x for x in v0 if abs(x) > 1e-8][0])*v0
A = r_[c_[J_coords, J_params], [V]]
W, VR = linalg.eig(A)
W0 = [ind for ind, eig in enumerate(W) if abs(eig) < 5e-5]
V1 = real(VR[:,W0[0]])
H = C.CorrFunc.hess(X, C.coords+C.params, C.coords+C.params)
c11 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V) for i in range(H.shape[0])])
c12 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V1) for i in range(H.shape[0])])
c22 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V1, V1) for i in range(H.shape[0])])
beta = 1
alpha = -1*c22/(2*c12)
V1 = alpha*V + beta*V1
V1 /= linalg.norm(V1)
self.found[-1].eigs = W
self.found[-1].branch = todict(C, V1)
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
for n, i in enumerate(ind):
strlist.append('branch angle = ' + repr(matrixmultiply(tocoords(C, self.found[i].V), \
tocoords(C, self.found[i].branch))))
X = tocoords(C, self.found[-1].X)
V = tocoords(C, self.found[-1].V)
C._preTestFunc(X, V)
strlist.append('Test function #1: ' + repr(self.testfuncs[0](X,V)[0]))
BifPoint.info(self, C, ind, strlist)
class FoldPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'LP', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
# Compute normal form coefficient
# NOTE: These are for free when using bordering technique!)
# NOTE: Does not agree with MATCONT output! (if |p| = |q| = 1, then it does)
J_coords = C.CorrFunc.jac(X, C.coords)
W, VL, VR = linalg.eig(J_coords, left=1, right=1)
minW = min(abs(W))
ind = [(abs(eig) < minW+1e-8) and (abs(eig) > minW-1e-8) for eig in W].index(True)
p, q = real(VL[:,ind]), real(VR[:,ind])
p /= matrixmultiply(p,q)
B = C.CorrFunc.hess(X, C.coords, C.coords)
self.found[-1].a = abs(0.5*matrixmultiply(p,[bilinearform(B[i,:,:], q, q) for i in range(B.shape[0])]))
self.found[-1].eigs = W
numzero = len([eig for eig in W if abs(eig) < 1e-4])
if numzero > 1:
if C.verbosity >= 2:
print('Fold-Fold!\n')
del self.found[-1]
return False
elif numzero == 0:
if C.verbosity >= 2:
print('False positive!\n')
del self.found[-1]
return False
if C.verbosity >= 2:
print('\nChecking...')
print(' |q| = %f' % linalg.norm(q))
print(' <p,q> = %f' % matrixmultiply(p,q))
print(' |Aq| = %f' % linalg.norm(matrixmultiply(J_coords,q)))
print(' |transpose(A)p| = %f\n' % linalg.norm(matrixmultiply(transpose(J_coords),p)))
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
for n, i in enumerate(ind):
strlist.append('a = ' + repr(self.found[i].a))
BifPoint.info(self, C, ind, strlist)
class HopfPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'H', stop=stop)
def process(self, X, V, C):
"""Tolerance for eigenvalues a possible problem when checking for neutral saddles."""
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.jac(X, C.coords)
eigs, LV, RV = linalg.eig(J_coords,left=1,right=1)
# Check for neutral saddles
found = False
for i in range(len(eigs)):
if abs(imag(eigs[i])) < 1e-5:
for j in range(i+1,len(eigs)):
if C.verbosity >= 2:
if abs(eigs[i]) < 1e-5 and abs(eigs[j]) < 1e-5:
print('Fold-Fold point found in Hopf!\n')
elif abs(imag(eigs[j])) < 1e-5 and abs(real(eigs[i]) + real(eigs[j])) < 1e-5:
print('Neutral saddle found!\n')
elif abs(real(eigs[i])) < 1e-5:
for j in range(i+1, len(eigs)):
if abs(real(eigs[j])) < 1e-5 and abs(real(eigs[i]) - real(eigs[j])) < 1e-5:
found = True
w = abs(imag(eigs[i]))
if imag(eigs[i]) > 0:
p = conjugate(LV[:,j])/linalg.norm(LV[:,j])
q = RV[:,i]/linalg.norm(RV[:,i])
else:
p = conjugate(LV[:,i])/linalg.norm(LV[:,i])
q = RV[:,j]/linalg.norm(RV[:,j])
if not found:
del self.found[-1]
return False
direc = conjugate(1/matrixmultiply(conjugate(p),q))
p = direc*p
# Alternate way to compute 1st lyapunov coefficient (from Kuznetsov [4])
#print (1./(w*w))*real(1j*matrixmultiply(conjugate(p),b1)*matrixmultiply(conjugate(p),b3) + \
# w*matrixmultiply(conjugate(p),trilinearform(D,q,q,conjugate(q))))
self.found[-1].w = w
self.found[-1].l1 = firstlyapunov(X, C.CorrFunc, w, J_coords=J_coords, p=p, q=q, check=(C.verbosity==2))
self.found[-1].eigs = eigs
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
for n, i in enumerate(ind):
strlist.append('w = ' + repr(self.found[i].w))
strlist.append('l1 = ' + repr(self.found[i].l1))
BifPoint.info(self, C, ind, strlist)
# Codimension-2 bifurcations
class BTPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'BT', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.sysfunc.jac(X, C.coords)
W, VL, VR = linalg.eig(J_coords, left=1, right=1)
self.found[-1].eigs = W
if C.verbosity >= 2:
if C.CorrFunc.testfunc.data.B.shape[1] == 2:
b = matrixmultiply(transpose(J_coords), C.CorrFunc.testfunc.data.w[:,0])
c = matrixmultiply(J_coords, C.CorrFunc.testfunc.data.v[:,0])
else:
b = C.CorrFunc.testfunc.data.w[:,0]
c = C.CorrFunc.testfunc.data.v[:,0]
print('\nChecking...')
print(' <b,c> = %f' % matrixmultiply(transpose(b), c))
print('\n')
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
class ZHPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'ZH', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.sysfunc.jac(X, C.coords)
W, VL, VR = linalg.eig(J_coords, left=1, right=1)
self.found[-1].eigs = W
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
class CPPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'CP', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.sysfunc.jac(X, C.coords)
B = C.CorrFunc.sysfunc.hess(X, C.coords, C.coords)
W, VL, VR = linalg.eig(J_coords, left=1, right=1)
q = C.CorrFunc.testfunc.data.C/linalg.norm(C.CorrFunc.testfunc.data.C)
p = C.CorrFunc.testfunc.data.B/matrixmultiply(transpose(C.CorrFunc.testfunc.data.B),q)
self.found[-1].eigs = W
a = 0.5*matrixmultiply(transpose(p), reshape([bilinearform(B[i,:,:], q, q) \
for i in range(B.shape[0])],(B.shape[0],1)))[0][0]
if C.verbosity >= 2:
print('\nChecking...')
print(' |a| = %f' % a)
print('\n')
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
class BranchPointFold(BifPoint):
"""Check Equilibrium.m in MATCONT"""
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'BP', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
pind = self.testfuncs[0].pind
# Finds the new branch
J_coords = C.CorrFunc.jac(X, C.coords)
J_params = C.CorrFunc.jac(X, C.params)
A = r_[c_[J_coords, J_params[:,pind]]]
#A = r_[c_[J_coords, J_params], [V]]
W, VR = linalg.eig(A)
W0 = [ind for ind, eig in enumerate(W) if abs(eig) < 5e-5]
tmp = real(VR[:,W0[0]])
V1 = r_[tmp[:-1], 0, 0]
V1[len(tmp)-1+pind] = tmp[-1]
"""NEED TO FIX THIS!"""
H = C.CorrFunc.hess(X, C.coords+C.params, C.coords+C.params)
# c11 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V) for i in range(H.shape[0])])
# c12 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V1) for i in range(H.shape[0])])
# c22 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V1, V1) for i in range(H.shape[0])])
# beta = 1
# alpha = -1*c22/(2*c12)
# V1 = alpha*V + beta*V1
# V1 /= linalg.norm(V1)
self.found[-1].eigs = W
self.found[-1].branch = None
self.found[-1].par = C.freepars[self.testfuncs[0].pind]
# self.found[-1].branch = todict(C, V1)
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
#for n, i in enumerate(ind):
# strlist.append('branch angle = ' + repr(matrixmultiply(tocoords(C, self.found[i].V), \
# tocoords(C, self.found[i].branch))))
X = tocoords(C, self.found[-1].X)
V = tocoords(C, self.found[-1].V)
C._preTestFunc(X, V)
strlist.append('Test function #1: ' + repr(self.testfuncs[0](X,V)[0]))
BifPoint.info(self, C, ind, strlist)
class _BranchPointFold(BifPoint):
"""Check Equilibrium.m in MATCONT"""
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'BP', stop=stop)
def __locate_newton(self, X, C):
"""Note: This is redundant!! B is a column of A!!! Works for now, though..."""
pind = self.testfuncs[0].pind
J_coords = C.CorrFunc.jac(X[0:C.dim], C.coords)
J_params = C.CorrFunc.jac(X[0:C.dim], C.params)
A = c_[J_coords, J_params[:,pind]]
B = J_params[:,pind]
return r_[C.CorrFunc(X[0:C.dim]) + X[C.dim]*X[C.dim+1:], \
matrixmultiply(transpose(A),X[C.dim+1:]), \
matrixmultiply(transpose(X[C.dim+1:]),B), \
matrixmultiply(transpose(X[C.dim+1:]),X[C.dim+1:]) - 1]
def locate(self, P1, P2, C):
# Initiliaze p vector to eigenvector with smallest eigenvalue
X, V = P1
pind = self.testfuncs[0].pind
J_coords = C.CorrFunc.jac(X, C.coords)
J_params = C.CorrFunc.jac(X, C.params)
A = r_[c_[J_coords, J_params[:,pind]]]
W, VL = linalg.eig(A, left=1, right=0)
ind = argsort([abs(eig) for eig in W])[0]
p = real(VL[:,ind])
initpoint = zeros(2*C.dim, float)
initpoint[0:C.dim] = X
initpoint[C.dim+1:] = p
X = optimize.fsolve(self.__locate_newton, initpoint, C)
self.data.psi = X[C.dim+1:]
X = X[0:C.dim]
return X, V
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
pind = self.testfuncs[0].pind
# Finds the new branch
J_coords = C.CorrFunc.jac(X, C.coords)
J_params = C.CorrFunc.jac(X, C.params)
A = r_[c_[J_coords, J_params[:,pind]]]
#A = r_[c_[J_coords, J_params], [V]]
W, VR = linalg.eig(A)
W0 = [ind for ind, eig in enumerate(W) if abs(eig) < 5e-5]
tmp = real(VR[:,W0[0]])
V1 = r_[tmp[:-1], 0, 0]
V1[len(tmp)-1+pind] = tmp[-1]
"""NEED TO FIX THIS!"""
H = C.CorrFunc.hess(X, C.coords+C.params, C.coords+C.params)
c11 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V) for i in range(H.shape[0])])
c12 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V1) for i in range(H.shape[0])])
c22 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V1, V1) for i in range(H.shape[0])])
beta = 1
alpha = -1*c22/(2*c12)
V1 = alpha*V + beta*V1
V1 /= linalg.norm(V1)
self.found[-1].eigs = W
self.found[-1].branch = None
self.found[-1].par = C.freepars[self.testfuncs[0].pind]
self.found[-1].branch = todict(C, V1)
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
#for n, i in enumerate(ind):
# strlist.append('branch angle = ' + repr(matrixmultiply(tocoords(C, self.found[i].V), \
# tocoords(C, self.found[i].branch))))
X = tocoords(C, self.found[-1].X)
V = tocoords(C, self.found[-1].V)
C._preTestFunc(X, V)
strlist.append('Test function #1: ' + repr(self.testfuncs[0](X,V)[0]))
BifPoint.info(self, C, ind, strlist)
class DHPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'DH', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.sysfunc.jac(X, C.coords)
eigs, LV, RV = linalg.eig(J_coords,left=1,right=1)
self.found[-1].eigs = eigs
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
class GHPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'GH', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.sysfunc.jac(X, C.coords)
eigs, LV, RV = linalg.eig(J_coords,left=1,right=1)
# Check for neutral saddles
found = False
for i in range(len(eigs)):
if abs(imag(eigs[i])) < 1e-5:
for j in range(i+1,len(eigs)):
if C.verbosity >= 2:
if abs(eigs[i]) < 1e-5 and abs(eigs[j]) < 1e-5:
print('Fold-Fold point found in Hopf!\n')
elif abs(imag(eigs[j])) < 1e-5 and abs(real(eigs[i]) + real(eigs[j])) < 1e-5:
print('Neutral saddle found!\n')
elif abs(real(eigs[i])) < 1e-5:
for j in range(i+1, len(eigs)):
if abs(real(eigs[j])) < 1e-5 and abs(real(eigs[i]) - real(eigs[j])) < 1e-5:
found = True
w = abs(imag(eigs[i]))
if imag(eigs[i]) > 0:
p = conjugate(LV[:,j]/linalg.norm(LV[:,j]))
q = RV[:,i]/linalg.norm(RV[:,i])
else:
p = conjugate(LV[:,i]/linalg.norm(LV[:,i]))
q = RV[:,j]/linalg.norm(RV[:,j])
if not found:
del self.found[-1]
return False
direc = conjugate(1/matrixmultiply(conjugate(p),q))
p = direc*p
# Alternate way to compute 1st lyapunov coefficient (from Kuznetsov [4])
#print (1./(w*w))*real(1j*matrixmultiply(conjugate(p),b1)*matrixmultiply(conjugate(p),b3) + \
# w*matrixmultiply(conjugate(p),trilinearform(D,q,q,conjugate(q))))
self.found[-1].w = w
self.found[-1].l1 = firstlyapunov(X, C.CorrFunc.sysfunc, w, J_coords=J_coords, p=p, q=q, check=(C.verbosity==2))
self.found[-1].eigs = eigs
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
for n, i in enumerate(ind):
strlist.append('w = ' + repr(self.found[i].w))
strlist.append('l1 = ' + repr(self.found[i].l1))
BifPoint.info(self, C, ind, strlist)
# Discrete maps
class LPCPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'LPC', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.sysfunc.jac(X, C.coords)
W, VL, VR = linalg.eig(J_coords, left=1, right=1)
self.found[-1].eigs = W
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
X = tocoords(C, self.found[-1].X)
V = tocoords(C, self.found[-1].V)
C._preTestFunc(X, V)
strlist.append('Test function #1: ' + repr(self.testfuncs[0](X,V)[0]))
strlist.append('Test function #2: ' + repr(self.testfuncs[1](X,V)[0]))
BifPoint.info(self, C, ind, strlist)
class PDPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'PD', stop=stop)
def process(self, X, V, C):
"""Do I need to compute the branch, or will it always be in the direction of freepar = constant?"""
BifPoint.process(self, X, V, C)
F = DiscreteMap(C.sysfunc, period=2*C.sysfunc.period)
FP = FixedPointMap(F)
J_coords = FP.jac(X, C.coords)
J_params = FP.jac(X, C.params)
# Locate branch of double period map
W, VL = linalg.eig(J_coords, left=1, right=0)
ind = argsort([abs(eig) for eig in W])[0]
psi = real(VL[:,ind])
A = r_[c_[J_coords, J_params], [V]]
W, VR = linalg.eig(A)
W0 = argsort([abs(eig) for eig in W])[0]
V1 = real(VR[:,W0])
H = FP.hess(X, C.coords+C.params, C.coords+C.params)
c11 = matrixmultiply(psi,[bilinearform(H[i,:,:], V, V) for i in range(H.shape[0])])
c12 = matrixmultiply(psi,[bilinearform(H[i,:,:], V, V1) for i in range(H.shape[0])])
c22 = matrixmultiply(psi,[bilinearform(H[i,:,:], V1, V1) for i in range(H.shape[0])])
beta = 1
alpha = -1*c22/(2*c12)
V1 = alpha*V + beta*V1
V1 /= linalg.norm(V1)
J_coords = C.sysfunc.jac(X, C.coords)
W = linalg.eig(J_coords, right=0)
self.found[-1].eigs = W
self.found[-1].branch_period = 2*C.sysfunc.period
self.found[-1].branch = todict(C, V1)
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
for n, i in enumerate(ind):
strlist.append('Period doubling branch angle = ' + repr(matrixmultiply(tocoords(C, self.found[i].V), \
tocoords(C, self.found[i].branch))))
BifPoint.info(self, C, ind, strlist)
class NSPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'NS', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.sysfunc.jac(X, C.coords)
eigs, VL, VR = linalg.eig(J_coords, left=1, right=1)
# Check for nonreal multipliers
found = False
for i in range(len(eigs)):
for j in range(i+1,len(eigs)):
if abs(imag(eigs[i])) > 1e-10 and \
abs(imag(eigs[j])) > 1e-10 and \
abs(eigs[i]*eigs[j] - 1) < 1e-5:
found = True
if not found:
del self.found[-1]
return False
self.found[-1].eigs = eigs
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
| [
"scipy.optimize.fsolve",
"numpy.average",
"numpy.conjugate",
"PyDSTool.common.args",
"numpy.real",
"scipy.linalg.eig",
"numpy.zeros",
"numpy.dot",
"numpy.nonzero",
"scipy.linalg.norm",
"numpy.transpose",
"numpy.imag"
]
| [((1323, 1329), 'PyDSTool.common.args', 'args', ([], {}), '()\n', (1327, 1329), False, 'from PyDSTool.common import args\n'), ((1932, 1982), 'numpy.average', 'average', (['[point[0] for point in pointlist]'], {'axis': '(0)'}), '([point[0] for point in pointlist], axis=0)\n', (1939, 1982), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((1995, 2045), 'numpy.average', 'average', (['[point[1] for point in pointlist]'], {'axis': '(0)'}), '([point[1] for point in pointlist], axis=0)\n', (2002, 2045), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((2140, 2146), 'PyDSTool.common.args', 'args', ([], {}), '()\n', (2144, 2146), False, 'from PyDSTool.common import args\n'), ((4050, 4074), 'numpy.nonzero', 'nonzero', (['(val1 * val2 < 0)'], {}), '(val1 * val2 < 0)\n', (4057, 4074), False, 'from numpy import array, float, complex, int, float64, complex64, int32, zeros, divide, subtract, reshape, argsort, nonzero\n'), ((5675, 5712), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(0)'}), '(J_coords, left=1, right=0)\n', (5685, 5712), False, 'from scipy import optimize, linalg\n'), ((5775, 5791), 'numpy.real', 'real', (['VL[:, ind]'], {}), '(VL[:, ind])\n', (5779, 5791), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((5812, 5835), 'numpy.zeros', 'zeros', (['(2 * C.dim)', 'float'], {}), '(2 * C.dim, float)\n', (5817, 5835), False, 'from numpy import array, float, complex, int, float64, complex64, int32, zeros, divide, subtract, reshape, argsort, nonzero\n'), ((5910, 5961), 'scipy.optimize.fsolve', 'optimize.fsolve', (['self.__locate_newton', 'initpoint', 'C'], {}), '(self.__locate_newton, initpoint, C)\n', (5925, 5961), False, 'from scipy import optimize, linalg\n'), ((6846, 6861), 'scipy.linalg.norm', 'linalg.norm', (['v0'], {}), '(v0)\n', (6857, 6861), False, 'from scipy import optimize, linalg\n'), ((6980, 6993), 'scipy.linalg.eig', 'linalg.eig', (['A'], {}), '(A)\n', (6990, 6993), False, 'from scipy import optimize, linalg\n'), ((7074, 7092), 'numpy.real', 'real', (['VR[:, W0[0]]'], {}), '(VR[:, W0[0]])\n', (7078, 7092), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((7565, 7580), 'scipy.linalg.norm', 'linalg.norm', (['V1'], {}), '(V1)\n', (7576, 7580), False, 'from scipy import optimize, linalg\n'), ((8815, 8852), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(1)'}), '(J_coords, left=1, right=1)\n', (8825, 8852), False, 'from scipy import optimize, linalg\n'), ((9032, 9052), 'numpy.dot', 'matrixmultiply', (['p', 'q'], {}), '(p, q)\n', (9046, 9052), True, 'from numpy import dot as matrixmultiply\n'), ((10734, 10771), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(1)'}), '(J_coords, left=1, right=1)\n', (10744, 10771), False, 'from scipy import optimize, linalg\n'), ((13285, 13322), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(1)'}), '(J_coords, left=1, right=1)\n', (13295, 13322), False, 'from scipy import optimize, linalg\n'), ((14414, 14451), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(1)'}), '(J_coords, left=1, right=1)\n', (14424, 14451), False, 'from scipy import optimize, linalg\n'), ((15099, 15136), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(1)'}), '(J_coords, left=1, right=1)\n', (15109, 15136), False, 'from scipy import optimize, linalg\n'), ((16423, 16436), 'scipy.linalg.eig', 'linalg.eig', (['A'], {}), '(A)\n', (16433, 16436), False, 'from scipy import optimize, linalg\n'), ((16518, 16536), 'numpy.real', 'real', (['VR[:, W0[0]]'], {}), '(VR[:, W0[0]])\n', (16522, 16536), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((19129, 19159), 'scipy.linalg.eig', 'linalg.eig', (['A'], {'left': '(1)', 'right': '(0)'}), '(A, left=1, right=0)\n', (19139, 19159), False, 'from scipy import optimize, linalg\n'), ((19222, 19238), 'numpy.real', 'real', (['VL[:, ind]'], {}), '(VL[:, ind])\n', (19226, 19238), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((19259, 19282), 'numpy.zeros', 'zeros', (['(2 * C.dim)', 'float'], {}), '(2 * C.dim, float)\n', (19264, 19282), False, 'from numpy import array, float, complex, int, float64, complex64, int32, zeros, divide, subtract, reshape, argsort, nonzero\n'), ((19357, 19408), 'scipy.optimize.fsolve', 'optimize.fsolve', (['self.__locate_newton', 'initpoint', 'C'], {}), '(self.__locate_newton, initpoint, C)\n', (19372, 19408), False, 'from scipy import optimize, linalg\n'), ((19837, 19850), 'scipy.linalg.eig', 'linalg.eig', (['A'], {}), '(A)\n', (19847, 19850), False, 'from scipy import optimize, linalg\n'), ((19932, 19950), 'numpy.real', 'real', (['VR[:, W0[0]]'], {}), '(VR[:, W0[0]])\n', (19936, 19950), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((20525, 20540), 'scipy.linalg.norm', 'linalg.norm', (['V1'], {}), '(V1)\n', (20536, 20540), False, 'from scipy import optimize, linalg\n'), ((21692, 21729), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(1)'}), '(J_coords, left=1, right=1)\n', (21702, 21729), False, 'from scipy import optimize, linalg\n'), ((22321, 22358), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(1)'}), '(J_coords, left=1, right=1)\n', (22331, 22358), False, 'from scipy import optimize, linalg\n'), ((24869, 24906), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(1)'}), '(J_coords, left=1, right=1)\n', (24879, 24906), False, 'from scipy import optimize, linalg\n'), ((26060, 26097), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(0)'}), '(J_coords, left=1, right=0)\n', (26070, 26097), False, 'from scipy import optimize, linalg\n'), ((26162, 26178), 'numpy.real', 'real', (['VL[:, ind]'], {}), '(VL[:, ind])\n', (26166, 26178), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((26239, 26252), 'scipy.linalg.eig', 'linalg.eig', (['A'], {}), '(A)\n', (26249, 26252), False, 'from scipy import optimize, linalg\n'), ((26315, 26330), 'numpy.real', 'real', (['VR[:, W0]'], {}), '(VR[:, W0])\n', (26319, 26330), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((26765, 26780), 'scipy.linalg.norm', 'linalg.norm', (['V1'], {}), '(V1)\n', (26776, 26780), False, 'from scipy import optimize, linalg\n'), ((26840, 26869), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'right': '(0)'}), '(J_coords, right=0)\n', (26850, 26869), False, 'from scipy import optimize, linalg\n'), ((27791, 27828), 'scipy.linalg.eig', 'linalg.eig', (['J_coords'], {'left': '(1)', 'right': '(1)'}), '(J_coords, left=1, right=1)\n', (27801, 27828), False, 'from scipy import optimize, linalg\n'), ((8986, 9002), 'numpy.real', 'real', (['VL[:, ind]'], {}), '(VL[:, ind])\n', (8990, 9002), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((9003, 9019), 'numpy.real', 'real', (['VR[:, ind]'], {}), '(VR[:, ind])\n', (9007, 9019), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((15177, 15216), 'scipy.linalg.norm', 'linalg.norm', (['C.CorrFunc.testfunc.data.C'], {}), '(C.CorrFunc.testfunc.data.C)\n', (15188, 15216), False, 'from scipy import optimize, linalg\n'), ((6314, 6330), 'numpy.zeros', 'zeros', (['(C.dim - 1)'], {}), '(C.dim - 1)\n', (6319, 6330), False, 'from numpy import array, float, complex, int, float64, complex64, int32, zeros, divide, subtract, reshape, argsort, nonzero\n'), ((13552, 13610), 'numpy.dot', 'matrixmultiply', (['J_coords', 'C.CorrFunc.testfunc.data.v[:, 0]'], {}), '(J_coords, C.CorrFunc.testfunc.data.v[:, 0])\n', (13566, 13610), True, 'from numpy import dot as matrixmultiply\n'), ((15271, 15308), 'numpy.transpose', 'transpose', (['C.CorrFunc.testfunc.data.B'], {}), '(C.CorrFunc.testfunc.data.B)\n', (15280, 15308), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((5289, 5308), 'numpy.transpose', 'transpose', (['J_coords'], {}), '(J_coords)\n', (5298, 5308), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((5358, 5382), 'numpy.transpose', 'transpose', (['X[C.dim + 1:]'], {}), '(X[C.dim + 1:])\n', (5367, 5382), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((9718, 9732), 'scipy.linalg.norm', 'linalg.norm', (['q'], {}), '(q)\n', (9729, 9732), False, 'from scipy import optimize, linalg\n'), ((9769, 9789), 'numpy.dot', 'matrixmultiply', (['p', 'q'], {}), '(p, q)\n', (9783, 9789), True, 'from numpy import dot as matrixmultiply\n'), ((10883, 10896), 'numpy.imag', 'imag', (['eigs[i]'], {}), '(eigs[i])\n', (10887, 10896), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((12036, 12048), 'numpy.conjugate', 'conjugate', (['p'], {}), '(p)\n', (12045, 12048), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((13478, 13497), 'numpy.transpose', 'transpose', (['J_coords'], {}), '(J_coords)\n', (13487, 13497), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((18643, 18655), 'numpy.transpose', 'transpose', (['A'], {}), '(A)\n', (18652, 18655), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((18705, 18729), 'numpy.transpose', 'transpose', (['X[C.dim + 1:]'], {}), '(X[C.dim + 1:])\n', (18714, 18729), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((22470, 22483), 'numpy.imag', 'imag', (['eigs[i]'], {}), '(eigs[i])\n', (22474, 22483), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((23631, 23643), 'numpy.conjugate', 'conjugate', (['p'], {}), '(p)\n', (23640, 23643), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((5427, 5451), 'numpy.transpose', 'transpose', (['X[C.dim + 1:]'], {}), '(X[C.dim + 1:])\n', (5436, 5451), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((9836, 9863), 'numpy.dot', 'matrixmultiply', (['J_coords', 'q'], {}), '(J_coords, q)\n', (9850, 9863), True, 'from numpy import dot as matrixmultiply\n'), ((11320, 11333), 'numpy.real', 'real', (['eigs[i]'], {}), '(eigs[i])\n', (11324, 11333), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((13817, 13829), 'numpy.transpose', 'transpose', (['b'], {}), '(b)\n', (13826, 13829), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((15377, 15389), 'numpy.transpose', 'transpose', (['p'], {}), '(p)\n', (15386, 15389), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((18767, 18791), 'numpy.transpose', 'transpose', (['X[C.dim + 1:]'], {}), '(X[C.dim + 1:])\n', (18776, 18791), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((22915, 22928), 'numpy.real', 'real', (['eigs[i]'], {}), '(eigs[i])\n', (22919, 22928), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((6546, 6562), 'numpy.zeros', 'zeros', (['(C.dim - 1)'], {}), '(C.dim - 1)\n', (6551, 6562), False, 'from numpy import array, float, complex, int, float64, complex64, int32, zeros, divide, subtract, reshape, argsort, nonzero\n'), ((9939, 9958), 'numpy.transpose', 'transpose', (['J_coords'], {}), '(J_coords)\n', (9948, 9958), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((27993, 28006), 'numpy.imag', 'imag', (['eigs[i]'], {}), '(eigs[i])\n', (27997, 28006), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((28045, 28058), 'numpy.imag', 'imag', (['eigs[j]'], {}), '(eigs[j])\n', (28049, 28058), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((11556, 11569), 'numpy.imag', 'imag', (['eigs[i]'], {}), '(eigs[i])\n', (11560, 11569), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((11598, 11611), 'numpy.imag', 'imag', (['eigs[i]'], {}), '(eigs[i])\n', (11602, 11611), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((23151, 23164), 'numpy.imag', 'imag', (['eigs[i]'], {}), '(eigs[i])\n', (23155, 23164), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((23193, 23206), 'numpy.imag', 'imag', (['eigs[i]'], {}), '(eigs[i])\n', (23197, 23206), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((11418, 11431), 'numpy.real', 'real', (['eigs[j]'], {}), '(eigs[j])\n', (11422, 11431), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((11649, 11668), 'numpy.conjugate', 'conjugate', (['LV[:, j]'], {}), '(LV[:, j])\n', (11658, 11668), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((11668, 11689), 'scipy.linalg.norm', 'linalg.norm', (['LV[:, j]'], {}), '(LV[:, j])\n', (11679, 11689), False, 'from scipy import optimize, linalg\n'), ((11729, 11750), 'scipy.linalg.norm', 'linalg.norm', (['RV[:, i]'], {}), '(RV[:, i])\n', (11740, 11750), False, 'from scipy import optimize, linalg\n'), ((11812, 11831), 'numpy.conjugate', 'conjugate', (['LV[:, i]'], {}), '(LV[:, i])\n', (11821, 11831), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((11831, 11852), 'scipy.linalg.norm', 'linalg.norm', (['LV[:, i]'], {}), '(LV[:, i])\n', (11842, 11852), False, 'from scipy import optimize, linalg\n'), ((11892, 11913), 'scipy.linalg.norm', 'linalg.norm', (['RV[:, j]'], {}), '(RV[:, j])\n', (11903, 11913), False, 'from scipy import optimize, linalg\n'), ((23013, 23026), 'numpy.real', 'real', (['eigs[j]'], {}), '(eigs[j])\n', (23017, 23026), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((23324, 23345), 'scipy.linalg.norm', 'linalg.norm', (['RV[:, i]'], {}), '(RV[:, i])\n', (23335, 23345), False, 'from scipy import optimize, linalg\n'), ((23487, 23508), 'scipy.linalg.norm', 'linalg.norm', (['RV[:, j]'], {}), '(RV[:, j])\n', (23498, 23508), False, 'from scipy import optimize, linalg\n'), ((11169, 11182), 'numpy.imag', 'imag', (['eigs[j]'], {}), '(eigs[j])\n', (11173, 11182), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((11448, 11461), 'numpy.real', 'real', (['eigs[i]'], {}), '(eigs[i])\n', (11452, 11461), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((11464, 11477), 'numpy.real', 'real', (['eigs[j]'], {}), '(eigs[j])\n', (11468, 11477), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((22760, 22773), 'numpy.imag', 'imag', (['eigs[j]'], {}), '(eigs[j])\n', (22764, 22773), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((23043, 23056), 'numpy.real', 'real', (['eigs[i]'], {}), '(eigs[i])\n', (23047, 23056), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((23059, 23072), 'numpy.real', 'real', (['eigs[j]'], {}), '(eigs[j])\n', (23063, 23072), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((23262, 23283), 'scipy.linalg.norm', 'linalg.norm', (['LV[:, j]'], {}), '(LV[:, j])\n', (23273, 23283), False, 'from scipy import optimize, linalg\n'), ((23425, 23446), 'scipy.linalg.norm', 'linalg.norm', (['LV[:, i]'], {}), '(LV[:, i])\n', (23436, 23446), False, 'from scipy import optimize, linalg\n'), ((11199, 11212), 'numpy.real', 'real', (['eigs[i]'], {}), '(eigs[i])\n', (11203, 11212), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((11215, 11228), 'numpy.real', 'real', (['eigs[j]'], {}), '(eigs[j])\n', (11219, 11228), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((22790, 22803), 'numpy.real', 'real', (['eigs[i]'], {}), '(eigs[i])\n', (22794, 22803), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n'), ((22806, 22819), 'numpy.real', 'real', (['eigs[j]'], {}), '(eigs[j])\n', (22810, 22819), False, 'from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, subtract, divide, transpose, eye, real, imag, conjugate, average\n')] |
"""
PyXLL-Jupyter
This package integrated Jupyter notebooks into Microsoft Excel.
To install it, first install PyXLL (see https://www.pyxll.com).
Briefly, to install PyXLL do the following::
pip install pyxll
pyxll install
Once PyXLL is installed then installing this package will add a
button to the PyXLL ribbon toolbar that will start a Jupyter
notebook browser as a custom task pane in Excel.
To install this package use::
pip install pyxll_jupyter
"""
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="pyxll_jupyter",
description="Adds Jupyter notebooks to Microsoft Excel using PyXLL.",
long_description=long_description,
long_description_content_type='text/markdown',
version="0.1.11",
packages=find_packages(),
include_package_data=True,
package_data={
"pyxll_jupyter": [
"pyxll_jupyter/resources/ribbon.xml",
"pyxll_jupyter/resources/jupyter.png",
]
},
project_urls={
"Source": "https://github.com/pyxll/pyxll-jupyter",
"Tracker": "https://github.com/pyxll/pyxll-jupyter/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows"
],
entry_points={
"pyxll": [
"modules = pyxll_jupyter.pyxll:modules",
"ribbon = pyxll_jupyter.pyxll:ribbon"
]
},
install_requires=[
"pyxll >= 5.0.0",
"jupyter >= 1.0.0",
"PySide2"
]
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
]
| [((572, 594), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (584, 594), False, 'from os import path\n'), ((606, 644), 'os.path.join', 'path.join', (['this_directory', '"""README.md"""'], {}), "(this_directory, 'README.md')\n", (615, 644), False, 'from os import path\n'), ((936, 951), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (949, 951), False, 'from setuptools import setup, find_packages\n')] |
from typing import List
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views import generic, View
from board.forms import SignUpForm
from .const import BOARD_VIEW_COLUMN_COUNT
from .models import Board, Priority, Membership, Contribution
from .models import Task
@login_required
def index(request):
board_col, row_count = Board.objects.get_user_split_boards(request.user, BOARD_VIEW_COLUMN_COUNT)
context = {
'board_col': board_col,
'row_count': row_count
}
return render(request, 'index.html', context)
@login_required
def board(request, board_id):
_board = Board.objects.get(id=board_id)
todo_tasks: List[Task] = Task.objects.filter(board=_board, status='TODO')
doing_tasks = Task.objects.filter(board=_board, status='DOING')
done_tasks = Task.objects.filter(board=_board, status='DONE')
context = {
'board': _board,
'todo_tasks': todo_tasks,
'doing_tasks': doing_tasks,
'done_tasks': done_tasks,
'user': request.user,
}
return render(request, 'board.html', context)
@login_required
def update_task_state(request):
if request.method == "POST":
task_id = request.POST['task_id']
new_state = request.POST['new_state']
this_task = Task.objects.get(id=task_id)
this_task.status = new_state
this_task.save()
return JsonResponse({"success": True})
class SignUp(generic.CreateView):
form_class = SignUpForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
class CreateBoard(View):
def post(self, request):
name = request.POST['name']
description = request.POST['description']
if name:
new_board = Board.objects.create(
name=name,
description=description,
)
Membership.objects.create(
board=new_board,
user=request.user,
role=Membership.Role.SUPER_USER
)
return JsonResponse({"success": True})
return JsonResponse({"success": False})
class CreateTask(View):
def post(self, request):
title = request.POST['title']
description = request.POST['description']
status = request.POST['status']
priority = int(request.POST['priority'])
board_id = int(request.POST['board_id'])
if title and request.user in Board.objects.get(id=board_id).members.all():
Task.objects.create(
title=title,
description=description,
status=status,
priority=Priority.choices[-int(priority) - 1][0],
created_by=request.user,
board_id=board_id
)
return JsonResponse({"success": True})
return JsonResponse({"success": False})
class CreateBoardMembership(View):
def post(self, request):
username = request.POST['username']
board_id = int(request.POST['board_id'])
if username and board_id:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return JsonResponse(
status=404,
data={'message': 'User doesn\'t exist'}
)
try:
membership = Membership.objects.get(board=board_id, user=user.id)
except Membership.DoesNotExist:
membership = None
if membership is not None:
return JsonResponse(
status=400,
data={'message': 'user already added'}
)
Membership.objects.create(
user=user,
board_id=board_id
)
return JsonResponse({'message': 'success'})
return JsonResponse(
status=400,
data={'message': 'username or board_id can\'t be empty'}
)
def parse_priority(value: str):
choices = Priority.choices
for i in range(0, len(choices)):
if value == choices[i][1].lower():
return choices[i][0]
@login_required
def update_task(request):
this_task = Task.objects.get(id=request.POST['id'])
this_task.title = request.POST['title']
this_task.description = request.POST['description']
this_task.status = request.POST['status']
this_task.priority = parse_priority(request.POST['priority'].lower())
this_task.save()
assigned_user_id = request.POST['user']
if assigned_user_id:
Contribution.objects.create(
task=this_task,
user_id=assigned_user_id,
)
return JsonResponse({"success": True})
@login_required
def get_available_users(request):
users = User.objects.filter(
membership__board_id=request.GET['board']
).exclude(
contribution__task_id=request.GET['task']
)
response_users = list(map(
lambda user: {
'id': user.id,
'username': user.username
},
users
))
return JsonResponse({'users': response_users})
@login_required
def delete_task(request):
if request.method.POST['task']:
task = Task.objects.get(id=request.method.GET['task'])
if request.user in task.board.members.all():
task.delete()
return JsonResponse({"success": True})
return JsonResponse({"success": False})
| [
"django.shortcuts.render",
"django.http.JsonResponse",
"django.contrib.auth.models.User.objects.filter",
"django.urls.reverse_lazy",
"django.contrib.auth.models.User.objects.get"
]
| [((679, 717), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'context'], {}), "(request, 'index.html', context)\n", (685, 717), False, 'from django.shortcuts import render\n'), ((1216, 1254), 'django.shortcuts.render', 'render', (['request', '"""board.html"""', 'context'], {}), "(request, 'board.html', context)\n", (1222, 1254), False, 'from django.shortcuts import render\n'), ((1549, 1580), 'django.http.JsonResponse', 'JsonResponse', (["{'success': True}"], {}), "({'success': True})\n", (1561, 1580), False, 'from django.http import JsonResponse\n'), ((1663, 1684), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""login"""'], {}), "('login')\n", (1675, 1684), False, 'from django.urls import reverse_lazy\n'), ((4872, 4903), 'django.http.JsonResponse', 'JsonResponse', (["{'success': True}"], {}), "({'success': True})\n", (4884, 4903), False, 'from django.http import JsonResponse\n'), ((5273, 5312), 'django.http.JsonResponse', 'JsonResponse', (["{'users': response_users}"], {}), "({'users': response_users})\n", (5285, 5312), False, 'from django.http import JsonResponse\n'), ((5597, 5629), 'django.http.JsonResponse', 'JsonResponse', (["{'success': False}"], {}), "({'success': False})\n", (5609, 5629), False, 'from django.http import JsonResponse\n'), ((2245, 2277), 'django.http.JsonResponse', 'JsonResponse', (["{'success': False}"], {}), "({'success': False})\n", (2257, 2277), False, 'from django.http import JsonResponse\n'), ((3001, 3033), 'django.http.JsonResponse', 'JsonResponse', (["{'success': False}"], {}), "({'success': False})\n", (3013, 3033), False, 'from django.http import JsonResponse\n'), ((4041, 4126), 'django.http.JsonResponse', 'JsonResponse', ([], {'status': '(400)', 'data': '{\'message\': "username or board_id can\'t be empty"}'}), '(status=400, data={\'message\':\n "username or board_id can\'t be empty"})\n', (4053, 4126), False, 'from django.http import JsonResponse\n'), ((2197, 2228), 'django.http.JsonResponse', 'JsonResponse', (["{'success': True}"], {}), "({'success': True})\n", (2209, 2228), False, 'from django.http import JsonResponse\n'), ((2953, 2984), 'django.http.JsonResponse', 'JsonResponse', (["{'success': True}"], {}), "({'success': True})\n", (2965, 2984), False, 'from django.http import JsonResponse\n'), ((3988, 4024), 'django.http.JsonResponse', 'JsonResponse', (["{'message': 'success'}"], {}), "({'message': 'success'})\n", (4000, 4024), False, 'from django.http import JsonResponse\n'), ((4968, 5030), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'membership__board_id': "request.GET['board']"}), "(membership__board_id=request.GET['board'])\n", (4987, 5030), False, 'from django.contrib.auth.models import User\n'), ((5554, 5585), 'django.http.JsonResponse', 'JsonResponse', (["{'success': True}"], {}), "({'success': True})\n", (5566, 5585), False, 'from django.http import JsonResponse\n'), ((3269, 3304), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'username'}), '(username=username)\n', (3285, 3304), False, 'from django.contrib.auth.models import User\n'), ((3731, 3795), 'django.http.JsonResponse', 'JsonResponse', ([], {'status': '(400)', 'data': "{'message': 'user already added'}"}), "(status=400, data={'message': 'user already added'})\n", (3743, 3795), False, 'from django.http import JsonResponse\n'), ((3366, 3430), 'django.http.JsonResponse', 'JsonResponse', ([], {'status': '(404)', 'data': '{\'message\': "User doesn\'t exist"}'}), '(status=404, data={\'message\': "User doesn\'t exist"})\n', (3378, 3430), False, 'from django.http import JsonResponse\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-15 22:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailsearchpromotions', '0002_capitalizeverbose'),
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('wagtailforms', '0003_capitalizeverbose'),
('torchbox', '0110_rename_blogpagetaglist_to_tag'),
]
database_operations = [
migrations.AlterModelTable('SignUpFormPageResponse', 'sign_up_form_signupformpageresponse'),
migrations.AlterModelTable('SignUpFormPage', 'sign_up_form_signupformpage'),
migrations.AlterModelTable('SignUpFormPageBullet', 'sign_up_form_signupformpagebullet'),
migrations.AlterModelTable('SignUpFormPageLogo', 'sign_up_form_signupformpagelogo'),
migrations.AlterModelTable('SignUpFormPageQuote', 'sign_up_form_signupformpagequote'),
]
state_operations = [
migrations.RemoveField(
model_name='signupformpage',
name='call_to_action_image',
),
migrations.RemoveField(
model_name='signupformpage',
name='email_attachment',
),
migrations.RemoveField(
model_name='signupformpage',
name='page_ptr',
),
migrations.RemoveField(
model_name='signupformpagebullet',
name='page',
),
migrations.RemoveField(
model_name='signupformpagelogo',
name='logo',
),
migrations.RemoveField(
model_name='signupformpagelogo',
name='page',
),
migrations.RemoveField(
model_name='signupformpagequote',
name='page',
),
migrations.DeleteModel(
name='SignUpFormPageResponse',
),
migrations.DeleteModel(
name='SignUpFormPage',
),
migrations.DeleteModel(
name='SignUpFormPageBullet',
),
migrations.DeleteModel(
name='SignUpFormPageLogo',
),
migrations.DeleteModel(
name='SignUpFormPageQuote',
),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=database_operations,
state_operations=state_operations,
)
]
| [
"django.db.migrations.AlterModelTable",
"django.db.migrations.DeleteModel",
"django.db.migrations.RemoveField",
"django.db.migrations.SeparateDatabaseAndState"
]
| [((546, 641), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', (['"""SignUpFormPageResponse"""', '"""sign_up_form_signupformpageresponse"""'], {}), "('SignUpFormPageResponse',\n 'sign_up_form_signupformpageresponse')\n", (572, 641), False, 'from django.db import migrations\n'), ((647, 722), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', (['"""SignUpFormPage"""', '"""sign_up_form_signupformpage"""'], {}), "('SignUpFormPage', 'sign_up_form_signupformpage')\n", (673, 722), False, 'from django.db import migrations\n'), ((732, 823), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', (['"""SignUpFormPageBullet"""', '"""sign_up_form_signupformpagebullet"""'], {}), "('SignUpFormPageBullet',\n 'sign_up_form_signupformpagebullet')\n", (758, 823), False, 'from django.db import migrations\n'), ((829, 916), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', (['"""SignUpFormPageLogo"""', '"""sign_up_form_signupformpagelogo"""'], {}), "('SignUpFormPageLogo',\n 'sign_up_form_signupformpagelogo')\n", (855, 916), False, 'from django.db import migrations\n'), ((922, 1011), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', (['"""SignUpFormPageQuote"""', '"""sign_up_form_signupformpagequote"""'], {}), "('SignUpFormPageQuote',\n 'sign_up_form_signupformpagequote')\n", (948, 1011), False, 'from django.db import migrations\n'), ((1049, 1134), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""signupformpage"""', 'name': '"""call_to_action_image"""'}), "(model_name='signupformpage', name='call_to_action_image'\n )\n", (1071, 1134), False, 'from django.db import migrations\n'), ((1174, 1250), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""signupformpage"""', 'name': '"""email_attachment"""'}), "(model_name='signupformpage', name='email_attachment')\n", (1196, 1250), False, 'from django.db import migrations\n'), ((1295, 1363), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""signupformpage"""', 'name': '"""page_ptr"""'}), "(model_name='signupformpage', name='page_ptr')\n", (1317, 1363), False, 'from django.db import migrations\n'), ((1408, 1478), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""signupformpagebullet"""', 'name': '"""page"""'}), "(model_name='signupformpagebullet', name='page')\n", (1430, 1478), False, 'from django.db import migrations\n'), ((1523, 1591), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""signupformpagelogo"""', 'name': '"""logo"""'}), "(model_name='signupformpagelogo', name='logo')\n", (1545, 1591), False, 'from django.db import migrations\n'), ((1636, 1704), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""signupformpagelogo"""', 'name': '"""page"""'}), "(model_name='signupformpagelogo', name='page')\n", (1658, 1704), False, 'from django.db import migrations\n'), ((1749, 1818), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""signupformpagequote"""', 'name': '"""page"""'}), "(model_name='signupformpagequote', name='page')\n", (1771, 1818), False, 'from django.db import migrations\n'), ((1863, 1916), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""SignUpFormPageResponse"""'}), "(name='SignUpFormPageResponse')\n", (1885, 1916), False, 'from django.db import migrations\n'), ((1949, 1994), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""SignUpFormPage"""'}), "(name='SignUpFormPage')\n", (1971, 1994), False, 'from django.db import migrations\n'), ((2027, 2078), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""SignUpFormPageBullet"""'}), "(name='SignUpFormPageBullet')\n", (2049, 2078), False, 'from django.db import migrations\n'), ((2111, 2160), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""SignUpFormPageLogo"""'}), "(name='SignUpFormPageLogo')\n", (2133, 2160), False, 'from django.db import migrations\n'), ((2193, 2243), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""SignUpFormPageQuote"""'}), "(name='SignUpFormPageQuote')\n", (2215, 2243), False, 'from django.db import migrations\n'), ((2302, 2417), 'django.db.migrations.SeparateDatabaseAndState', 'migrations.SeparateDatabaseAndState', ([], {'database_operations': 'database_operations', 'state_operations': 'state_operations'}), '(database_operations=database_operations,\n state_operations=state_operations)\n', (2337, 2417), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python3
#
# Copyright (c) 2019 <NAME> and contributors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The test_webframe module covers the webframe module."""
from typing import List
from typing import TYPE_CHECKING
from typing import Tuple
from typing import cast
import configparser
import datetime
import os
import unittest
import unittest.mock
import time
# pylint: disable=unused-import
import yattag
import webframe
if TYPE_CHECKING:
# pylint: disable=no-name-in-module,import-error,unused-import
from wsgiref.types import StartResponse # noqa: F401
class TestHandleStatic(unittest.TestCase):
"""Tests handle_static()."""
def test_happy(self) -> None:
"""Tests the happy path: css case."""
content, content_type = webframe.handle_static("/osm/static/osm.css")
self.assertTrue(len(content))
self.assertEqual(content_type, "text/css")
def test_javascript(self) -> None:
"""Tests the javascript case."""
content, content_type = webframe.handle_static("/osm/static/sorttable.js")
self.assertTrue(len(content))
self.assertEqual(content_type, "application/x-javascript")
def test_else(self) -> None:
"""Tests the case when the content type is not recognized."""
content, content_type = webframe.handle_static("/osm/static/test.xyz")
self.assertFalse(len(content))
self.assertFalse(len(content_type))
class TestHandleException(unittest.TestCase):
"""Tests handle_exception()."""
def test_happy(self) -> None:
"""Tests the happy path."""
environ = {
"PATH_INFO": "/"
}
def start_response(status: str, response_headers: List[Tuple[str, str]]) -> None:
self.assertTrue(status.startswith("500"))
header_dict = dict(response_headers)
self.assertEqual(header_dict["Content-type"], "text/html; charset=utf-8")
try:
int("a")
# pylint: disable=broad-except
except Exception:
callback = cast('StartResponse', start_response)
output_iterable = webframe.handle_exception(environ, callback)
output_list = cast(List[bytes], output_iterable)
self.assertTrue(output_list)
output = output_list[0].decode('utf-8')
self.assertIn("ValueError", output)
return
self.fail()
class TestLocalToUiTz(unittest.TestCase):
"""Tests local_to_ui_tz()."""
def test_happy(self) -> None:
"""Tests the happy path."""
def get_abspath(path: str) -> str:
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(__file__), path)
def get_config() -> configparser.ConfigParser:
config = configparser.ConfigParser()
config.read_dict({"wsgi": {"timezone": "Europe/Budapest"}})
return config
with unittest.mock.patch('util.get_abspath', get_abspath):
with unittest.mock.patch('webframe.get_config', get_config):
local_dt = datetime.datetime.fromtimestamp(0)
ui_dt = webframe.local_to_ui_tz(local_dt)
if time.strftime('%Z%z') == "CET+0100":
self.assertEqual(ui_dt.timestamp(), 0)
class TestFillMissingHeaderItems(unittest.TestCase):
"""Tests fill_missing_header_items()."""
def test_happy(self) -> None:
"""Tests the happy path."""
streets = "no"
relation_name = "gazdagret"
items: List[yattag.doc.Doc] = []
webframe.fill_missing_header_items(streets, relation_name, items)
html = items[0].getvalue()
self.assertIn("Missing house numbers", html)
self.assertNotIn("Missing streets", html)
if __name__ == '__main__':
unittest.main()
| [
"webframe.local_to_ui_tz",
"datetime.datetime.fromtimestamp",
"webframe.handle_exception",
"configparser.ConfigParser",
"os.path.isabs",
"time.strftime",
"typing.cast",
"os.path.dirname",
"webframe.fill_missing_header_items",
"unittest.main",
"unittest.mock.patch",
"webframe.handle_static"
]
| [((3887, 3902), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3900, 3902), False, 'import unittest\n'), ((833, 878), 'webframe.handle_static', 'webframe.handle_static', (['"""/osm/static/osm.css"""'], {}), "('/osm/static/osm.css')\n", (855, 878), False, 'import webframe\n'), ((1081, 1131), 'webframe.handle_static', 'webframe.handle_static', (['"""/osm/static/sorttable.js"""'], {}), "('/osm/static/sorttable.js')\n", (1103, 1131), False, 'import webframe\n'), ((1373, 1419), 'webframe.handle_static', 'webframe.handle_static', (['"""/osm/static/test.xyz"""'], {}), "('/osm/static/test.xyz')\n", (1395, 1419), False, 'import webframe\n'), ((3650, 3715), 'webframe.fill_missing_header_items', 'webframe.fill_missing_header_items', (['streets', 'relation_name', 'items'], {}), '(streets, relation_name, items)\n', (3684, 3715), False, 'import webframe\n'), ((2679, 2698), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (2692, 2698), False, 'import os\n'), ((2870, 2897), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (2895, 2897), False, 'import configparser\n'), ((3010, 3062), 'unittest.mock.patch', 'unittest.mock.patch', (['"""util.get_abspath"""', 'get_abspath'], {}), "('util.get_abspath', get_abspath)\n", (3029, 3062), False, 'import unittest\n'), ((2119, 2156), 'typing.cast', 'cast', (['"""StartResponse"""', 'start_response'], {}), "('StartResponse', start_response)\n", (2123, 2156), False, 'from typing import cast\n'), ((2187, 2231), 'webframe.handle_exception', 'webframe.handle_exception', (['environ', 'callback'], {}), '(environ, callback)\n', (2212, 2231), False, 'import webframe\n'), ((2258, 2292), 'typing.cast', 'cast', (['List[bytes]', 'output_iterable'], {}), '(List[bytes], output_iterable)\n', (2262, 2292), False, 'from typing import cast\n'), ((2760, 2785), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2775, 2785), False, 'import os\n'), ((3081, 3135), 'unittest.mock.patch', 'unittest.mock.patch', (['"""webframe.get_config"""', 'get_config'], {}), "('webframe.get_config', get_config)\n", (3100, 3135), False, 'import unittest\n'), ((3164, 3198), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(0)'], {}), '(0)\n', (3195, 3198), False, 'import datetime\n'), ((3223, 3256), 'webframe.local_to_ui_tz', 'webframe.local_to_ui_tz', (['local_dt'], {}), '(local_dt)\n', (3246, 3256), False, 'import webframe\n'), ((3276, 3297), 'time.strftime', 'time.strftime', (['"""%Z%z"""'], {}), "('%Z%z')\n", (3289, 3297), False, 'import time\n')] |
from __future__ import unicode_literals
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import requests
from youtube_search import YoutubeSearch
import youtube_dl
import eyed3.id3
import eyed3
import lyricsgenius
import telepot
spotifyy = spotipy.Spotify(
client_credentials_manager=SpotifyClientCredentials(client_id='a145db3dcd564b9592dacf10649e4ed5',
client_secret='<KEY>'))
genius = lyricsgenius.Genius('<KEY>')
token = '<PASSWORD>'
bot = telepot.Bot(token)
def DOWNLOADMP3(link,chat_id):
#Get MetaData
results = spotifyy.track(link)
song = results['name']
print('[Spotify]MetaData Found!')
artist = results['artists'][0]['name']
YTSEARCH = str(song + " " + artist)
artistfinder = results['artists']
tracknum = results['track_number']
album = results['album']['name']
realese_date = int(results['album']['release_date'][:4])
if len(artistfinder) > 1:
fetures = "( Ft."
for lomi in range(0, len(artistfinder)):
try:
if lomi < len(artistfinder) - 2:
artistft = artistfinder[lomi + 1]['name'] + ", "
fetures += artistft
else:
artistft = artistfinder[lomi + 1]['name'] + ")"
fetures += artistft
except:
pass
else:
fetures = ""
time_duration = ""
time_duration1 = ""
time_duration2 = ""
time_duration3 = ""
millis = results['duration_ms']
millis = int(millis)
seconds = (millis / 1000) % 60
minutes = (millis / (1000 * 60)) % 60
seconds = int(seconds)
minutes = int(minutes)
if seconds >= 10:
if seconds < 59:
time_duration = "{0}:{1}".format(minutes, seconds)
time_duration1 = "{0}:{1}".format(minutes, seconds + 1)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
if seconds == 10:
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
elif seconds < 58:
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
elif seconds == 58:
time_duration3 = "{0}:0{1}".format(minutes + 1, seconds - 58)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
else:
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
else:
time_duration1 = "{0}:0{1}".format(minutes + 1, seconds - 59)
if seconds == 59:
time_duration3 = "{0}:0{1}".format(minutes + 1, seconds - 58)
else:
time_duration = "{0}:0{1}".format(minutes, seconds)
time_duration1 = "{0}:0{1}".format(minutes, seconds + 1)
if seconds < 8:
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
elif seconds == 9 or seconds == 8:
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
elif seconds == 0:
time_duration2 = "{0}:{1}".format(minutes - 1, seconds + 59)
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
else:
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
trackname = song + fetures
#Download Cover
response = requests.get(results['album']['images'][0]['url'])
DIRCOVER = "songpicts//" + trackname + ".png"
file = open(DIRCOVER, "wb")
file.write(response.content)
file.close()
#search for music on youtube
results = list(YoutubeSearch(str(YTSEARCH)).to_dict())
LINKASLI = ''
for URLSSS in results:
timeyt = URLSSS["duration"]
print(URLSSS['title'])
if timeyt == time_duration or timeyt == time_duration1:
LINKASLI = URLSSS['url_suffix']
break
elif timeyt == time_duration2 or timeyt == time_duration3:
LINKASLI = URLSSS['url_suffix']
break
YTLINK = str("https://www.youtube.com/" + LINKASLI)
print('[Youtube]song found!')
print(f'[Youtube]Link song on youtube : {YTLINK}')
#Donwload Music from youtube
options = {
# PERMANENT options
'format': 'bestaudio/best',
'keepvideo': False,
'outtmpl': f'song//{trackname}.*',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320'
}]
}
with youtube_dl.YoutubeDL(options) as mp3:
mp3.download([YTLINK])
aud = eyed3.load(f"song//{trackname}.mp3")
print('[Youtube]Song Downloaded!')
aud.tag.artist = artist
aud.tag.album = album
aud.tag.album_artist = artist
aud.tag.title = trackname
aud.tag.track_num = tracknum
aud.tag.year = realese_date
try:
songok = genius.search_song(song, artist)
aud.tag.lyrics.set(songok.lyrics)
print('[Genius]Song lyric Found!')
except:
print('[Genius]Song lyric NOT Found!')
aud.tag.images.set(3, open("songpicts//" + trackname + ".png", 'rb').read(), 'image/png')
aud.tag.save()
bot.sendAudio(chat_id, open(f'song//{trackname}.mp3', 'rb'), title=trackname)
print('[Telegram]Song sent!')
def album(link):
results = spotifyy.album_tracks(link)
albums = results['items']
while results['next']:
results = spotifyy.next(results)
albums.extend(results['items'])
print('[Spotify]Album Found!')
return albums
def artist(link):
results = spotifyy.artist_top_tracks(link)
albums = results['tracks']
print('[Spotify]Artist Found!')
return albums
def searchalbum(track):
results = spotifyy.search(track)
return results['tracks']['items'][0]['album']['external_urls']['spotify']
def playlist(link):
results = spotifyy.playlist_tracks(link)
print('[Spotify]Playlist Found!')
return results['items']
def searchsingle(track):
results = spotifyy.search(track)
return results['tracks']['items'][0]['href']
def searchartist(searchstr):
results = spotifyy.search(searchstr)
return results['tracks']['items'][0]['artists'][0]["external_urls"]['spotify']
| [
"telepot.Bot",
"lyricsgenius.Genius",
"spotipy.oauth2.SpotifyClientCredentials",
"requests.get",
"youtube_dl.YoutubeDL",
"eyed3.load"
]
| [((467, 495), 'lyricsgenius.Genius', 'lyricsgenius.Genius', (['"""<KEY>"""'], {}), "('<KEY>')\n", (486, 495), False, 'import lyricsgenius\n'), ((525, 543), 'telepot.Bot', 'telepot.Bot', (['token'], {}), '(token)\n', (536, 543), False, 'import telepot\n'), ((3585, 3635), 'requests.get', 'requests.get', (["results['album']['images'][0]['url']"], {}), "(results['album']['images'][0]['url'])\n", (3597, 3635), False, 'import requests\n'), ((4809, 4845), 'eyed3.load', 'eyed3.load', (['f"""song//{trackname}.mp3"""'], {}), "(f'song//{trackname}.mp3')\n", (4819, 4845), False, 'import eyed3\n'), ((307, 404), 'spotipy.oauth2.SpotifyClientCredentials', 'SpotifyClientCredentials', ([], {'client_id': '"""a145db3dcd564b9592dacf10649e4ed5"""', 'client_secret': '"""<KEY>"""'}), "(client_id='a145db3dcd564b9592dacf10649e4ed5',\n client_secret='<KEY>')\n", (331, 404), False, 'from spotipy.oauth2 import SpotifyClientCredentials\n'), ((4729, 4758), 'youtube_dl.YoutubeDL', 'youtube_dl.YoutubeDL', (['options'], {}), '(options)\n', (4749, 4758), False, 'import youtube_dl\n')] |
#------------------------------------------------------------------------------
# Copyright (c) 2018-2019, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
"""Test the typed dictionary.
"""
import sys
import pytest
from atom.api import Atom, Dict, Int, atomdict
@pytest.fixture
def atom_dict():
"""Atom with different Dict members.
"""
class DictAtom(Atom):
untyped = Dict()
keytyped = Dict(Int())
valuetyped = Dict(value=Int())
fullytyped = Dict(Int(), Int())
untyped_default = Dict(default={1: 1})
keytyped_default = Dict(Int(), default={1: 1})
valuetyped_default = Dict(value=Int(), default={1: 1})
fullytyped_default = Dict(Int(), Int(), default={1: 1})
return DictAtom()
MEMBERS = ['untyped', 'keytyped', 'valuetyped', 'fullytyped',
'untyped_default', 'keytyped_default', 'valuetyped_default',
'fullytyped_default']
@pytest.mark.parametrize('member', MEMBERS)
def test_instance(atom_dict, member):
"""Test the repr.
"""
assert isinstance(getattr(atom_dict, member), atomdict)
@pytest.mark.parametrize('member', MEMBERS)
def test_repr(atom_dict, member):
"""Test the repr.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert repr(getattr(atom_dict, member)) == repr(d)
@pytest.mark.parametrize('member', MEMBERS)
def test_len(atom_dict, member):
"""Test the len.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert len(getattr(atom_dict, member)) == len(d)
@pytest.mark.parametrize('member', MEMBERS)
def test_contains(atom_dict, member):
"""Test __contains__.
"""
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert 5 in getattr(atom_dict, member)
del getattr(atom_dict, member)[5]
assert 5 not in getattr(atom_dict, member)
@pytest.mark.parametrize('member', MEMBERS)
def test_keys(atom_dict, member):
"""Test the keys.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert getattr(atom_dict, member).keys() == d.keys()
@pytest.mark.parametrize('member', MEMBERS)
def test_copy(atom_dict, member):
"""Test copy.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert getattr(atom_dict, member).copy() == d
def test_setitem(atom_dict):
"""Test setting items.
"""
atom_dict.untyped[''] = 1
assert atom_dict.untyped[''] == 1
atom_dict.keytyped[1] = ''
assert atom_dict.keytyped[1] == ''
with pytest.raises(TypeError):
atom_dict.keytyped[''] = 1
atom_dict.valuetyped[1] = 1
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped[''] = ''
atom_dict.fullytyped[1] = 1
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped[''] = 1
with pytest.raises(TypeError):
atom_dict.fullytyped[1] = ''
def test_setdefault(atom_dict):
"""Test using setdefault.
"""
assert atom_dict.untyped.setdefault('', 1) == 1
assert atom_dict.untyped.setdefault('', 2) == 1
assert atom_dict.untyped[''] == 1
assert atom_dict.keytyped.setdefault(1, '') == ''
assert atom_dict.keytyped[1] == ''
with pytest.raises(TypeError):
atom_dict.keytyped.setdefault('', 1)
assert atom_dict.valuetyped.setdefault(1, 1) == 1
assert atom_dict.valuetyped.setdefault(1, '') == 1
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped.setdefault(2, '')
assert atom_dict.fullytyped.setdefault(1, 1) == 1
assert atom_dict.fullytyped.setdefault(1, '') == 1
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped.setdefault('', 1)
with pytest.raises(TypeError):
atom_dict.fullytyped.setdefault(2, '')
def test_update(atom_dict):
"""Test update a dict.
"""
atom_dict.untyped.update({'': 1})
assert atom_dict.untyped[''] == 1
atom_dict.untyped.update([('1', 1)])
assert atom_dict.untyped['1'] == 1
atom_dict.keytyped.update({1: 1})
assert atom_dict.keytyped[1] == 1
atom_dict.keytyped.update([(2, 1)])
assert atom_dict.keytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.keytyped.update({'': 1})
atom_dict.valuetyped.update({1: 1})
assert atom_dict.valuetyped[1] == 1
atom_dict.valuetyped.update([(2, 1)])
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped.update({'': ''})
atom_dict.fullytyped.update({1: 1})
assert atom_dict.fullytyped[1] == 1
atom_dict.fullytyped.update([(2, 1)])
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped.update({'': 1})
with pytest.raises(TypeError):
atom_dict.fullytyped.update({'': ''})
| [
"atom.api.Int",
"pytest.mark.parametrize",
"atom.api.Dict",
"pytest.raises"
]
| [((1130, 1172), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""member"""', 'MEMBERS'], {}), "('member', MEMBERS)\n", (1153, 1172), False, 'import pytest\n'), ((1305, 1347), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""member"""', 'MEMBERS'], {}), "('member', MEMBERS)\n", (1328, 1347), False, 'import pytest\n'), ((1631, 1673), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""member"""', 'MEMBERS'], {}), "('member', MEMBERS)\n", (1654, 1673), False, 'import pytest\n'), ((1953, 1995), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""member"""', 'MEMBERS'], {}), "('member', MEMBERS)\n", (1976, 1995), False, 'import pytest\n'), ((2270, 2312), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""member"""', 'MEMBERS'], {}), "('member', MEMBERS)\n", (2293, 2312), False, 'import pytest\n'), ((2598, 2640), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""member"""', 'MEMBERS'], {}), "('member', MEMBERS)\n", (2621, 2640), False, 'import pytest\n'), ((589, 595), 'atom.api.Dict', 'Dict', ([], {}), '()\n', (593, 595), False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((732, 754), 'atom.api.Dict', 'Dict', ([], {'default': '{(1): 1}'}), '(default={(1): 1})\n', (736, 754), False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((3127, 3151), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3140, 3151), False, 'import pytest\n'), ((3270, 3294), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3283, 3294), False, 'import pytest\n'), ((3416, 3440), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3429, 3440), False, 'import pytest\n'), ((3488, 3512), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3501, 3512), False, 'import pytest\n'), ((3872, 3896), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3885, 3896), False, 'import pytest\n'), ((4102, 4126), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4115, 4126), False, 'import pytest\n'), ((4334, 4358), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4347, 4358), False, 'import pytest\n'), ((4416, 4440), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4429, 4440), False, 'import pytest\n'), ((4875, 4899), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4888, 4899), False, 'import pytest\n'), ((5116, 5140), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5129, 5140), False, 'import pytest\n'), ((5360, 5384), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5373, 5384), False, 'import pytest\n'), ((5440, 5464), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5453, 5464), False, 'import pytest\n'), ((620, 625), 'atom.api.Int', 'Int', ([], {}), '()\n', (623, 625), False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((692, 697), 'atom.api.Int', 'Int', ([], {}), '()\n', (695, 697), False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((699, 704), 'atom.api.Int', 'Int', ([], {}), '()\n', (702, 704), False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((785, 790), 'atom.api.Int', 'Int', ([], {}), '()\n', (788, 790), False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((905, 910), 'atom.api.Int', 'Int', ([], {}), '()\n', (908, 910), False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((912, 917), 'atom.api.Int', 'Int', ([], {}), '()\n', (915, 917), False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((659, 664), 'atom.api.Int', 'Int', ([], {}), '()\n', (662, 664), False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((848, 853), 'atom.api.Int', 'Int', ([], {}), '()\n', (851, 853), False, 'from atom.api import Atom, Dict, Int, atomdict\n')] |
from django.contrib import admin
from bible.models import Bible, VerseOfTheDay
@admin.register(Bible)
class BibleAdmin(admin.ModelAdmin):
list_display = ['__str__', 'text']
readonly_fields = ['book', 'chapter', 'verse', 'text', 'category']
search_fields = ['text', 'book', 'chapter']
list_filter = ['category', 'book']
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
@admin.register(VerseOfTheDay)
class VerseOfTheDayAdmin(admin.ModelAdmin):
autocomplete_fields = ['verse']
raw_id_fields = ['verse']
| [
"django.contrib.admin.register"
]
| [((82, 103), 'django.contrib.admin.register', 'admin.register', (['Bible'], {}), '(Bible)\n', (96, 103), False, 'from django.contrib import admin\n'), ((561, 590), 'django.contrib.admin.register', 'admin.register', (['VerseOfTheDay'], {}), '(VerseOfTheDay)\n', (575, 590), False, 'from django.contrib import admin\n')] |
# Copyright 2020 Soil, Inc.
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import inspect
import sys
import threading
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging
import soil.conf
from soil.i18n import _
CONF = soil.conf.CONF
LOG = logging.getLogger(__name__)
_LOCK = threading.Lock()
_FACADE = None
def _create_facade_lazily():
global _LOCK
with _LOCK:
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database)
)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def dispose_engine():
get_engine().dispose()
| [
"threading.Lock",
"oslo_log.log.getLogger"
]
| [((1137, 1164), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1154, 1164), True, 'from oslo_log import log as logging\n'), ((1175, 1191), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1189, 1191), False, 'import threading\n')] |
from __future__ import absolute_import, print_function
import unittest
import os
import tensorflow as tf
from tensorflow.keras import regularizers
from niftynet.network.simple_gan import SimpleGAN
from tests.niftynet_testcase import NiftyNetTestCase
class SimpleGANTest(NiftyNetTestCase):
def test_3d_reg_shape(self):
input_shape = (2, 32, 32, 32, 1)
noise_shape = (2, 512)
x = tf.ones(input_shape)
r = tf.ones(noise_shape)
simple_gan_instance = SimpleGAN()
out = simple_gan_instance(r, x, is_training=True)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(out)
self.assertAllClose(input_shape, out[0].shape)
self.assertAllClose((2, 1), out[1].shape)
self.assertAllClose((2, 1), out[2].shape)
def test_2d_reg_shape(self):
input_shape = (2, 64, 64, 1)
noise_shape = (2, 512)
x = tf.ones(input_shape)
r = tf.ones(noise_shape)
simple_gan_instance = SimpleGAN()
out = simple_gan_instance(r, x, is_training=True)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(out)
self.assertAllClose(input_shape, out[0].shape)
self.assertAllClose((2, 1), out[1].shape)
self.assertAllClose((2, 1), out[2].shape)
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.ones",
"niftynet.network.simple_gan.SimpleGAN",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.test.main"
]
| [((1488, 1502), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (1500, 1502), True, 'import tensorflow as tf\n'), ((410, 430), 'tensorflow.ones', 'tf.ones', (['input_shape'], {}), '(input_shape)\n', (417, 430), True, 'import tensorflow as tf\n'), ((443, 463), 'tensorflow.ones', 'tf.ones', (['noise_shape'], {}), '(noise_shape)\n', (450, 463), True, 'import tensorflow as tf\n'), ((495, 506), 'niftynet.network.simple_gan.SimpleGAN', 'SimpleGAN', ([], {}), '()\n', (504, 506), False, 'from niftynet.network.simple_gan import SimpleGAN\n'), ((989, 1009), 'tensorflow.ones', 'tf.ones', (['input_shape'], {}), '(input_shape)\n', (996, 1009), True, 'import tensorflow as tf\n'), ((1022, 1042), 'tensorflow.ones', 'tf.ones', (['noise_shape'], {}), '(noise_shape)\n', (1029, 1042), True, 'import tensorflow as tf\n'), ((1074, 1085), 'niftynet.network.simple_gan.SimpleGAN', 'SimpleGAN', ([], {}), '()\n', (1083, 1085), False, 'from niftynet.network.simple_gan import SimpleGAN\n'), ((631, 674), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (672, 674), True, 'import tensorflow as tf\n'), ((1210, 1253), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (1251, 1253), True, 'import tensorflow as tf\n')] |
import time
import thingspeak_wrapper as tsw
# Initiate the class ThingWrapper with (CHANNEL_ID, WRITE_API__KEY, READ_API_KEY)
# if is a public channel just pass the CHANNEL_ID argument, api_key defaults are None
my_channel = tsw.wrapper.ThingWrapper(501309, '6TQDNWJQ44FA0GAQ', '10EVD2N6YIHI5O7Z')
# all set of functions are:
# my_channel.sender()
# my_channel.multiple_sender()
# my_channel.get_json_feeds()
# my_channel.get_json_feeds_from()
# my_channel.get_xml_feeds()
# my_channel.get_xml_feeds_from()
# my_channel.get_csv_feeds()
# my_channel.get_csv_feeds_from()
# ---------------------------
# Now you can use all the possible functions
# Send a value to a single field
my_channel.sender(1, 4)
# this delay is due to limitation of thingspeak free account which allow you to post data every 15 sec minimum
time.sleep(15)
# ---------------------------
# Send data to multiple field
# It take 2 input as lists ([..], [..])
# Create lists of fields and values
fields = [1, 2, 3]
values = [22.0, 1029, 700]
# pass them to the function
my_channel.multiple_sender(fields, values)
# ---------------------------
# Get data functions returns data as json, xml, csv
# optionally csv can be returned as Pandas Data frame
# pass arguments to the function (field, data_quantity)
# default values are ( fields='feeds', results_quantity=None)
# you will get all fields and all values (max 8000)
json_field1 = my_channel.get_json_feeds(1, 300)
print(json_field1)
# get xml data pass same values as previous function
xml_field1 = my_channel.get_xml_feeds(1, 300)
print(xml_field1)
# get csv data
# this function requires to specify (field, pandas_format=True, result_quantity=None)
# defaults are (fields='feeds', pandas_format=True, result_quantity=None)
csv_field1 = my_channel.get_csv_feeds(1, pandas_format=True,
results_quantity=300)
print(csv_field1)
# data without pandas_format
csv_no_pandas = my_channel.get_csv_feeds(1, pandas_format=False,
results_quantity=300)
print(csv_no_pandas)
# there is the possibility to request data from and to specific dates
# set date and time as strings YYYY-MM-DD HH:NN:SS
start_date, start_time = '2018-05-21', '12:00:00'
stop_date, stop_time = '2018-05-21', '23:59:59'
# pass values to the function
# defaults are (start_date, start_time, stop_date=None, stop_time=None, fields='feeds')
values_from_date = my_channel.get_json_feeds_from(stop_date, start_time, stop_date, stop_time, 1)
print(values_from_date)
| [
"thingspeak_wrapper.wrapper.ThingWrapper",
"time.sleep"
]
| [((228, 300), 'thingspeak_wrapper.wrapper.ThingWrapper', 'tsw.wrapper.ThingWrapper', (['(501309)', '"""6TQDNWJQ44FA0GAQ"""', '"""10EVD2N6YIHI5O7Z"""'], {}), "(501309, '6TQDNWJQ44FA0GAQ', '10EVD2N6YIHI5O7Z')\n", (252, 300), True, 'import thingspeak_wrapper as tsw\n'), ((820, 834), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (830, 834), False, 'import time\n')] |
#
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import matplotlib.pyplot as plt
import neptune
import numpy as np
import skopt.plots as sk_plots
from skopt.utils import dump
from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run
class NeptuneCallback:
"""Logs hyperparameter optimization process to Neptune.
Specifically using NeptuneCallback will log: run metrics and run parameters, best run metrics so far, and
the current results checkpoint.
Examples:
Initialize NeptuneCallback::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(api_token='<PASSWORD>',
project_qualified_name='shared/showroom')
neptune.create_experiment(name='optuna sweep')
neptune_callback = sk_utils.NeptuneCallback()
Run skopt training passing neptune_callback as a callback::
...
results = skopt.forest_minimize(objective, space, callback=[neptune_callback],
base_estimator='ET', n_calls=100, n_random_starts=10)
You can explore an example experiment in Neptune:
https://ui.neptune.ai/o/shared/org/showroom/e/SHOW-1065/logs
"""
def __init__(self, experiment=None, log_checkpoint=True):
self._exp = experiment if experiment else neptune
expect_not_a_run(self._exp)
self.log_checkpoint = log_checkpoint
self._iteration = 0
def __call__(self, res):
self._exp.log_metric('run_score', x=self._iteration, y=res.func_vals[-1])
self._exp.log_metric('best_so_far_run_score', x=self._iteration, y=np.min(res.func_vals))
self._exp.log_text('run_parameters', x=self._iteration, y=NeptuneCallback._get_last_params(res))
if self.log_checkpoint:
self._exp.log_artifact(_export_results_object(res), 'results.pkl')
self._iteration += 1
@staticmethod
def _get_last_params(res):
param_vals = res.x_iters[-1]
named_params = _format_to_named_params(param_vals, res)
return str(named_params)
def log_results(results, experiment=None, log_plots=True, log_pickle=True):
"""Logs runs results and parameters to neptune.
Logs all hyperparameter optimization results to Neptune. Those include best score ('best_score' metric),
best parameters ('best_parameters' property), convergence plot ('diagnostics' log),
evaluations plot ('diagnostics' log), and objective plot ('diagnostics' log).
Args:
results('scipy.optimize.OptimizeResult'): Results object that is typically an output
| of the function like `skopt.forest_minimize(...)`
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
log_plots: ('bool'): If True skopt plots will be logged to Neptune.
log_pickle: ('bool'): if True pickled skopt results object will be logged to Neptune.
Examples:
Run skopt training::
...
results = skopt.forest_minimize(objective, space,
base_estimator='ET', n_calls=100, n_random_starts=10)
Initialize Neptune::
import neptune
neptune.init(api_token='<PASSWORD>',
project_qualified_name='shared/showroom')
neptune.create_experiment(name='optuna sweep')
Send best parameters to Neptune::
import neptunecontrib.monitoring.skopt as sk_utils
sk_utils.log_results(results)
You can explore an example experiment in Neptune:
https://ui.neptune.ai/o/shared/org/showroom/e/SHOW-1065/logs
"""
_exp = experiment if experiment else neptune
expect_not_a_run(_exp)
_log_best_score(results, _exp)
_log_best_parameters(results, _exp)
if log_plots:
_log_plot_convergence(results, _exp)
_log_plot_evaluations(results, _exp)
_log_plot_regret(results, _exp)
_log_plot_objective(results, _exp)
if log_pickle:
_log_results_object(results, _exp)
def NeptuneMonitor(*args, **kwargs):
message = """NeptuneMonitor was renamed to NeptuneCallback and will be removed in future releases.
"""
warnings.warn(message)
return NeptuneCallback(*args, **kwargs)
def _log_best_parameters(results, experiment):
expect_not_a_run(experiment)
named_params = ([(dimension.name, param) for dimension, param in zip(results.space, results.x)])
experiment.set_property('best_parameters', str(named_params))
def _log_best_score(results, experiment):
experiment.log_metric('best_score', results.fun)
def _log_plot_convergence(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig, ax = plt.subplots()
sk_plots.plot_convergence(results, ax=ax)
experiment.log_image(name, fig)
def _log_plot_regret(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig, ax = plt.subplots()
sk_plots.plot_regret(results, ax=ax)
experiment.log_image(name, fig)
def _log_plot_evaluations(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_evaluations(results, bins=10), fig=fig)
experiment.log_image(name, fig)
def _log_plot_objective(results, experiment, name='diagnostics'):
try:
expect_not_a_run(experiment)
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_objective(results), fig=fig)
experiment.log_image(name, fig)
except Exception as e:
print('Could not create the objective chart due to error: {}'.format(e))
def _log_results_object(results, experiment=None):
expect_not_a_run(experiment)
experiment.log_artifact(_export_results_object(results), 'results.pkl')
def _export_results_object(results):
from io import BytesIO
results.specs['args'].pop('callback', None)
buffer = BytesIO()
dump(results, buffer, store_objective=False)
buffer.seek(0)
return buffer
def _format_to_named_params(params, result):
return [(dimension.name, param) for dimension, param in zip(result.space, params)]
| [
"skopt.plots.plot_convergence",
"io.BytesIO",
"skopt.plots.plot_regret",
"matplotlib.pyplot.figure",
"skopt.utils.dump",
"skopt.plots.plot_evaluations",
"numpy.min",
"warnings.warn",
"neptunecontrib.monitoring.utils.expect_not_a_run",
"matplotlib.pyplot.subplots",
"skopt.plots.plot_objective"
]
| [((4317, 4339), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', (['_exp'], {}), '(_exp)\n', (4333, 4339), False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((4825, 4847), 'warnings.warn', 'warnings.warn', (['message'], {}), '(message)\n', (4838, 4847), False, 'import warnings\n'), ((4945, 4973), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', (['experiment'], {}), '(experiment)\n', (4961, 4973), False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((5312, 5340), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', (['experiment'], {}), '(experiment)\n', (5328, 5340), False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((5355, 5369), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5367, 5369), True, 'import matplotlib.pyplot as plt\n'), ((5374, 5415), 'skopt.plots.plot_convergence', 'sk_plots.plot_convergence', (['results'], {'ax': 'ax'}), '(results, ax=ax)\n', (5399, 5415), True, 'import skopt.plots as sk_plots\n'), ((5521, 5549), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', (['experiment'], {}), '(experiment)\n', (5537, 5549), False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((5564, 5578), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5576, 5578), True, 'import matplotlib.pyplot as plt\n'), ((5583, 5619), 'skopt.plots.plot_regret', 'sk_plots.plot_regret', (['results'], {'ax': 'ax'}), '(results, ax=ax)\n', (5603, 5619), True, 'import skopt.plots as sk_plots\n'), ((5730, 5758), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', (['experiment'], {}), '(experiment)\n', (5746, 5758), False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((5769, 5797), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (5779, 5797), True, 'import matplotlib.pyplot as plt\n'), ((6335, 6363), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', (['experiment'], {}), '(experiment)\n', (6351, 6363), False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((6569, 6578), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (6576, 6578), False, 'from io import BytesIO\n'), ((6583, 6627), 'skopt.utils.dump', 'dump', (['results', 'buffer'], {'store_objective': '(False)'}), '(results, buffer, store_objective=False)\n', (6587, 6627), False, 'from skopt.utils import dump\n'), ((1951, 1978), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', (['self._exp'], {}), '(self._exp)\n', (1967, 1978), False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((5817, 5860), 'skopt.plots.plot_evaluations', 'sk_plots.plot_evaluations', (['results'], {'bins': '(10)'}), '(results, bins=10)\n', (5842, 5860), True, 'import skopt.plots as sk_plots\n'), ((5992, 6020), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', (['experiment'], {}), '(experiment)\n', (6008, 6020), False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((6035, 6063), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (6045, 6063), True, 'import matplotlib.pyplot as plt\n'), ((6087, 6119), 'skopt.plots.plot_objective', 'sk_plots.plot_objective', (['results'], {}), '(results)\n', (6110, 6119), True, 'import skopt.plots as sk_plots\n'), ((2240, 2261), 'numpy.min', 'np.min', (['res.func_vals'], {}), '(res.func_vals)\n', (2246, 2261), True, 'import numpy as np\n')] |
# Generated by Django 2.1.5 on 2019-02-12 21:18
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("api", "0049_add_all_other_translations")]
operations = [
migrations.CreateModel(
name="ClickThroughAgreement",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField()),
],
),
migrations.AddField(
model_name="job",
name="click_through_agreement",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="api.ClickThroughAgreement",
),
),
]
| [
"django.db.models.AutoField",
"django.db.models.TextField",
"django.db.models.ForeignKey"
]
| [((830, 939), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""api.ClickThroughAgreement"""'}), "(null=True, on_delete=django.db.models.deletion.PROTECT,\n to='api.ClickThroughAgreement')\n", (847, 939), False, 'from django.db import migrations, models\n'), ((408, 501), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (424, 501), False, 'from django.db import migrations, models\n'), ((662, 680), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (678, 680), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
'''Generally useful bits and bobs.'''
import queue # For PrintThread and exe_run
from time import sleep, time, gmtime, strftime # For lock timeout, exe_run timeout and logging
from multiprocessing import RLock
from copy import copy
import threading # For PrintThread
import sys
import os # For ChangeDir, has_admin
import stat # To help deltree out
from collections import deque # For storing a window of debug
from telnetlib import Telnet # For talking to JLink server
import socket
import shutil # To delete a directory tree
import signal # For CTRL_C_EVENT
import subprocess
import platform # Figure out current OS
import re # Regular Expression
import serial # Pyserial (make sure to do pip install pyserial)
import psutil # For killing things (make sure to do pip install psutil)
import requests # For HTTP comms with a KMTronic box (do pip install requests)
import u_settings
# Since this function is used by the global variables below it needs
# to be placed here.
def is_linux():
'''Returns True when system is Linux'''
return platform.system() == 'Linux'
# Since this function is used by the global variables below it needs
# to be placed here.
def pick_by_os(linux=None, other=None):
'''
This is a convenience function for selecting a value based on platform.
As an example the line below will print out "Linux" when running on a
Linux platform and "Not Linux" when running on some other platform:
print( u_utils.pick_by_os(linux="Linux", other="Not Linux") )
'''
if is_linux():
return linux
return other
# The port that this agent service runs on
# Deliberately NOT a setting, we need to be sure
# everyone uses the same value
AGENT_SERVICE_PORT = 17003
# The maximum number of characters that an agent will
# use from controller_name when constructing a directory
# name for a ubxlib branch to be checked out into
AGENT_WORKING_SUBDIR_CONTROLLER_NAME_MAX_LENGTH = 4
# How long to wait for an install lock in seconds
INSTALL_LOCK_WAIT_SECONDS = u_settings.INSTALL_LOCK_WAIT_SECONDS #(60 * 60)
# The URL for Unity, the unit test framework
UNITY_URL = u_settings.UNITY_URL #"https://github.com/ThrowTheSwitch/Unity"
# The sub-directory that Unity is usually put in
# (off the working directory)
UNITY_SUBDIR = u_settings.UNITY_SUBDIR #"Unity"
# The path to DevCon, a Windows tool that allows
# USB devices to be reset, amongst other things
DEVCON_PATH = u_settings.DEVCON_PATH #"devcon.exe"
# The path to jlink.exe (or just the name 'cos it's on the path)
JLINK_PATH = u_settings.JLINK_PATH #"jlink.exe"
# The port number for SWO trace capture out of JLink
JLINK_SWO_PORT = u_settings.JLINK_SWO_PORT #19021
# The port number for GDB control of ST-LINK GDB server
STLINK_GDB_PORT = u_settings.STLINK_GDB_PORT #61200
# The port number for SWO trace capture out of ST-LINK GDB server
STLINK_SWO_PORT = u_settings.STLINK_SWO_PORT #61300
# The format string passed to strftime()
# for logging prints
TIME_FORMAT = u_settings.TIME_FORMAT #"%Y-%m-%d_%H:%M:%S"
# The default guard time waiting for a platform lock in seconds
PLATFORM_LOCK_GUARD_TIME_SECONDS = u_settings.PLATFORM_LOCK_GUARD_TIME_SECONDS #60 * 60
# The default guard time for downloading to a target in seconds
DOWNLOAD_GUARD_TIME_SECONDS = u_settings.DOWNLOAD_GUARD_TIME_SECONDS #60
# The default guard time for running tests in seconds
RUN_GUARD_TIME_SECONDS = u_settings.RUN_GUARD_TIME_SECONDS #60 * 60
# The default inactivity timer for running tests in seconds
RUN_INACTIVITY_TIME_SECONDS = u_settings.RUN_INACTIVITY_TIME_SECONDS #60 * 5
# The name of the #define that forms the filter string
# for which tests to run
FILTER_MACRO_NAME = u_settings.FILTER_MACRO_NAME #"U_CFG_APP_FILTER"
# The name of the environment variable that indicates we're running under automation
ENV_UBXLIB_AUTO = "U_UBXLIB_AUTO"
# The time for which to wait for something from the
# queue in exe_run(). If this is too short, in a
# multiprocessing world or on a slow machine, it is
# possible to miss things as the task putting things
# on the queue may be blocked from doing so until
# we've decided the queue has been completely emptied
# and moved on
EXE_RUN_QUEUE_WAIT_SECONDS = u_settings.EXE_RUN_QUEUE_WAIT_SECONDS #1
# The number of seconds a USB cutter and the bit positions of
# a KMTronic box are switched off for
HW_RESET_DURATION_SECONDS = u_settings.HW_RESET_DURATION_SECONDS # e.g. 5
# Executable file extension. This will be "" for Linux
# and ".exe" for Windows
EXE_EXT = pick_by_os(linux="", other=".exe")
def keep_going(flag, printer=None, prompt=None):
'''Check a keep_going flag'''
do_not_stop = True
if flag is not None and not flag.is_set():
do_not_stop = False
if printer and prompt:
printer.string("{}aborting as requested.".format(prompt))
return do_not_stop
# subprocess arguments behaves a little differently on Linux and Windows
# depending if a shell is used or not, which can be read here:
# https://stackoverflow.com/a/15109975
# This function will compensate for these deviations
def subprocess_osify(cmd, shell=True):
''' expects an array of strings being [command, param, ...] '''
if is_linux() and shell:
line = ''
for item in cmd:
# Put everything in a single string and quote args containing spaces
if ' ' in item:
line += '\"{}\" '.format(item)
else:
line += '{} '.format(item)
cmd = line
return cmd
def split_command_line_args(cmd_line):
''' Will split a command line string into a list of arguments.
Quoted arguments will be preserved as one argument '''
return [p for p in re.split("( |\\\".*?\\\"|'.*?')", cmd_line) if p.strip()]
def get_actual_path(path):
'''Given a drive number return real path if it is a subst'''
actual_path = path
if is_linux():
return actual_path
if os.name == 'nt':
# Get a list of substs
text = subprocess.check_output("subst",
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
# Lines should look like this:
# Z:\: => C:\projects\ubxlib_priv
# So, in this example, if we were given z:\blah
# then the actual path should be C:\projects\ubxlib_priv\blah
text = line.decode()
bits = text.rsplit(": => ")
if (len(bits) > 1) and (len(path) > 1) and \
(bits[0].lower()[0:2] == path[0:2].lower()):
actual_path = bits[1] + path[2:]
break
return actual_path
def get_instance_text(instance):
'''Return the instance as a text string'''
instance_text = ""
for idx, item in enumerate(instance):
if idx == 0:
instance_text += str(item)
else:
instance_text += "." + str(item)
return instance_text
# Get a list of instances as a text string separated
# by spaces.
def get_instances_text(instances):
'''Return the instances as a text string'''
instances_text = ""
for instance in instances:
if instance:
instances_text += " {}".format(get_instance_text(instance))
return instances_text
def remove_readonly(func, path, exec_info):
'''Help deltree out'''
del exec_info
os.chmod(path, stat.S_IWRITE)
func(path)
def deltree(directory, printer, prompt):
'''Remove an entire directory tree'''
tries = 3
success = False
if os.path.isdir(directory):
# Retry this as sometimes Windows complains
# that the directory is not empty when it
# it really should be, some sort of internal
# Windows race condition
while not success and (tries > 0):
try:
# Need the onerror bit on Winders, see
# this Stack Overflow post:
# https://stackoverflow.com/questions/1889597/deleting-directory-in-python
shutil.rmtree(directory, onerror=remove_readonly)
success = True
except OSError as ex:
if printer and prompt:
printer.string("{}ERROR unable to delete \"{}\" {}: \"{}\"".
format(prompt, directory,
ex.errno, ex.strerror))
sleep(1)
tries -= 1
else:
success = True
return success
# Some list types aren't quite list types: for instance,
# the lists returned by RPyC look like lists but they
# aren't of type list and so "in", for instance, will fail.
# This converts an instance list (i.e. a list-like object
# containing items that are each another list-like object)
# into a plain-old two-level list.
def copy_two_level_list(instances_in):
'''Convert instances_in into a true list'''
instances_out = []
if instances_in:
for item1 in instances_in:
instances_out1 = []
for item2 in item1:
instances_out1.append(item2)
instances_out.append(copy(instances_out1))
return instances_out
# Check if admin privileges are available, from:
# https://stackoverflow.com/questions/2946746/python-checking-if-a-user-has-administrator-privileges
def has_admin():
'''Check for administrator privileges'''
admin = False
if os.name == 'nt':
try:
# only Windows users with admin privileges can read the C:\windows\temp
if os.listdir(os.sep.join([os.environ.get("SystemRoot", "C:\\windows"), "temp"])):
admin = True
except PermissionError:
pass
else:
# Pylint will complain about the following line but
# that's OK, it is only executed if we're NOT on Windows
# and there the geteuid() method will exist
if "SUDO_USER" in os.environ and os.geteuid() == 0:
admin = True
return admin
# Reset a USB port with the given Device Description
def usb_reset(device_description, printer, prompt):
''' Reset a device'''
instance_id = None
found = False
success = False
try:
# Run devcon and parse the output to find the given device
printer.string("{}running {} to look for \"{}\"...". \
format(prompt, DEVCON_PATH, device_description))
cmd = [DEVCON_PATH, "hwids", "=ports"]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
# The format of a devcon entry is this:
#
# USB\VID_1366&PID_1015&MI_00\6&38E81674&0&0000
# Name: JLink CDC UART Port (COM45)
# Hardware IDs:
# USB\VID_1366&PID_1015&REV_0100&MI_00
# USB\VID_1366&PID_1015&MI_00
# Compatible IDs:
# USB\Class_02&SubClass_02&Prot_00
# USB\Class_02&SubClass_02
# USB\Class_02
#
# Grab what we hope is the instance ID
line = line.decode()
if line.startswith("USB"):
instance_id = line
else:
# If the next line is the Name we want then we're done
if instance_id and ("Name: " + device_description in line):
found = True
printer.string("{}\"{}\" found with instance ID \"{}\"". \
format(prompt, device_description,
instance_id))
break
instance_id = None
if found:
# Now run devcon to reset the device
printer.string("{}running {} to reset device \"{}\"...". \
format(prompt, DEVCON_PATH, instance_id))
cmd = [DEVCON_PATH, "restart", "@" + instance_id]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=False) # Has to be False or devcon won't work
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
else:
printer.string("{}device with description \"{}\" not found.". \
format(prompt, device_description))
except subprocess.CalledProcessError:
printer.string("{} unable to find and reset device.".format(prompt))
return success
# Open the required serial port.
def open_serial(serial_name, speed, printer, prompt):
'''Open serial port'''
serial_handle = None
text = "{}: trying to open \"{}\" as a serial port...". \
format(prompt, serial_name)
try:
return_value = serial.Serial(serial_name, speed, timeout=0.05)
serial_handle = return_value
printer.string("{} opened.".format(text))
except (ValueError, serial.SerialException) as ex:
printer.string("{}{} while accessing port {}: {}.".
format(prompt, type(ex).__name__,
serial_handle.name, str(ex)))
return serial_handle
def open_telnet(port_number, printer, prompt):
'''Open telnet port on localhost'''
telnet_handle = None
text = "{}trying to open \"{}\" as a telnet port on localhost...". \
format(prompt, port_number)
try:
telnet_handle = Telnet("localhost", int(port_number), timeout=5)
if telnet_handle is not None:
printer.string("{} opened.".format(text))
else:
printer.string("{} failed.".format(text))
except (socket.error, socket.timeout, ValueError) as ex:
printer.string("{}{} failed to open telnet {}: {}.".
format(prompt, type(ex).__name__,
port_number, str(ex)))
return telnet_handle
def install_lock_acquire(install_lock, printer, prompt, keep_going_flag=None):
'''Attempt to acquire install lock'''
timeout_seconds = INSTALL_LOCK_WAIT_SECONDS
success = False
if install_lock:
printer.string("{}waiting for install lock...".format(prompt))
while not install_lock.acquire(False) and (timeout_seconds > 0) and \
keep_going(keep_going_flag, printer, prompt):
sleep(1)
timeout_seconds -= 1
if timeout_seconds > 0:
printer.string("{}got install lock.".format(prompt))
success = True
else:
printer.string("{}failed to aquire install lock.".format(prompt))
else:
printer.string("{}warning, there is no install lock.".format(prompt))
return success
def install_lock_release(install_lock, printer, prompt):
'''Release install lock'''
if install_lock:
install_lock.release()
printer.string("{}install lock released.".format(prompt))
def fetch_repo(url, directory, branch, printer, prompt, submodule_init=True, force=False):
'''Fetch a repo: directory can be relative or absolute, branch can be a hash'''
got_code = False
success = False
dir_text = directory
if dir_text == ".":
dir_text = "this directory"
if printer and prompt:
printer.string("{}in directory {}, fetching"
" {} to {}.".format(prompt, os.getcwd(),
url, dir_text))
if not branch:
branch = "master"
if os.path.isdir(directory):
# Update existing code
with ChangeDir(directory):
if printer and prompt:
printer.string("{}updating code in {}...".
format(prompt, dir_text))
target = branch
if branch.startswith("#"):
# Actually been given a branch, lose the
# preceding #
target = branch[1:len(branch)]
# Try this once and, if it fails and force is set,
# do a git reset --hard and try again
tries = 1
if force:
tries += 1
while tries > 0:
try:
call_list = []
call_list.append("git")
call_list.append("fetch")
call_list.append("origin")
call_list.append(target)
if printer and prompt:
text = ""
for item in call_list:
if text:
text += " "
text += item
printer.string("{}in {} calling {}...".
format(prompt, os.getcwd(), text))
# Try to pull the code
text = subprocess.check_output(subprocess_osify(call_list),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code:
tries = 0
else:
if force:
# git reset --hard
printer.string("{}in directory {} calling git reset --hard...". \
format(prompt, os.getcwd()))
try:
text = subprocess.check_output(subprocess_osify(["git", "reset",
"--hard"]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
force = False
tries -= 1
if not got_code:
# If we still haven't got the code, delete the
# directory for a true clean start
deltree(directory, printer, prompt)
if not os.path.isdir(directory):
# Clone the repo
if printer and prompt:
printer.string("{}cloning from {} into {}...".
format(prompt, url, dir_text))
try:
text = subprocess.check_output(subprocess_osify(["git", "clone", "-q",
url, directory]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code and os.path.isdir(directory):
# Check out the correct branch and recurse submodules
with ChangeDir(directory):
target = "origin/" + branch
if branch.startswith("#"):
# Actually been given a branch, so lose the
# "origin/" and the preceding #
target = branch[1:len(branch)]
if printer and prompt:
printer.string("{}checking out {}...".
format(prompt, target))
try:
call_list = ["git", "-c", "advice.detachedHead=false",
"checkout", "--no-progress"]
if submodule_init:
call_list.append("--recurse-submodules")
printer.string("{}also recursing sub-modules (can take some time" \
" and gives no feedback).".format(prompt))
call_list.append(target)
if printer and prompt:
text = ""
for item in call_list:
if text:
text += " "
text += item
printer.string("{}in {} calling {}...".
format(prompt, os.getcwd(), text))
text = subprocess.check_output(subprocess_osify(call_list),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
return success
def exe_where(exe_name, help_text, printer, prompt):
'''Find an executable using where.exe or which on linux'''
success = False
try:
printer.string("{}looking for \"{}\"...". \
format(prompt, exe_name))
# See here:
# https://stackoverflow.com/questions/14928860/passing-double-quote-shell-commands-in-python-to-subprocess-popen
# ...for why the construction "".join() is necessary when
# passing things which might have spaces in them.
# It is the only thing that works.
if is_linux():
cmd = ["which {}".format(exe_name.replace(":", "/"))]
printer.string("{}detected linux, calling \"{}\"...".format(prompt, cmd))
else:
cmd = ["where", "".join(exe_name)]
printer.string("{}detected nonlinux, calling \"{}\"...".format(prompt, cmd))
text = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{} found in {}".format(prompt, exe_name,
line.decode()))
success = True
except subprocess.CalledProcessError:
if help_text:
printer.string("{}ERROR {} not found: {}". \
format(prompt, exe_name, help_text))
else:
printer.string("{}ERROR {} not found". \
format(prompt, exe_name))
return success
def exe_version(exe_name, version_switch, printer, prompt):
'''Print the version of a given executable'''
success = False
if not version_switch:
version_switch = "--version"
try:
text = subprocess.check_output(subprocess_osify(["".join(exe_name), version_switch]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError:
printer.string("{}ERROR {} either not found or didn't like {}". \
format(prompt, exe_name, version_switch))
return success
def exe_terminate(process_pid):
'''Jonathan's killer'''
process = psutil.Process(process_pid)
for proc in process.children(recursive=True):
proc.terminate()
process.terminate()
def read_from_process_and_queue(process, read_queue):
'''Read from a process, non-blocking'''
while process.poll() is None:
string = process.stdout.readline().decode()
if string and string != "":
read_queue.put(string)
else:
sleep(0.1)
def queue_get_no_exception(the_queue, block=True, timeout=None):
'''A version of queue.get() that doesn't throw an Empty exception'''
thing = None
try:
thing = the_queue.get(block=block, timeout=timeout)
except queue.Empty:
pass
return thing
def capture_env_var(line, env, printer, prompt):
'''A bit of exe_run that needs to be called from two places'''
# Find a KEY=VALUE bit in the line,
# parse it out and put it in the dictionary
# we were given
pair = line.split('=', 1)
if len(pair) == 2:
env[pair[0]] = pair[1].rstrip()
else:
printer.string("{}WARNING: not an environment variable: \"{}\"".
format(prompt, line))
# Note: if returned_env is given then "set"
# will be executed after the exe and the environment
# variables will be returned in it. The down-side
# of this is that the return value of the exe is,
# of course, lost.
def exe_run(call_list, guard_time_seconds=None, printer=None, prompt=None,
shell_cmd=False, set_env=None, returned_env=None,
bash_cmd=False, keep_going_flag=None):
'''Call an executable, printing out what it does'''
success = False
start_time = time()
flibbling = False
kill_time = None
read_time = start_time
if returned_env is not None:
# The caller wants the environment after the
# command has run, so, from this post:
# https://stackoverflow.com/questions/1214496/how-to-get-environment-from-a-subprocess
# append a tag that we can detect
# to the command and then call set,
# from which we can parse the environment
call_list.append("&&")
call_list.append("echo")
call_list.append("flibble")
call_list.append("&&")
if is_linux():
call_list.append("env")
bash_cmd = True
else:
call_list.append("set")
# I've seen output from set get lost,
# possibly because the process ending
# is asynchronous with stdout,
# so add a delay here as well
call_list.append("&&")
call_list.append("sleep")
call_list.append("2")
try:
popen_keywords = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': shell_cmd,
'env': set_env,
'executable': "bin/bash" if bash_cmd else None
}
# Call the thang
# Note: used to have bufsize=1 here but it turns out
# that is ignored 'cos the output is considered
# binary. Seems to work in any case, I guess
# Winders, at least, is in any case line-buffered.
process = subprocess.Popen(subprocess_osify(call_list, shell=shell_cmd),
**popen_keywords)
if printer:
printer.string("{}{}, pid {} started with guard time {} second(s)". \
format(prompt, call_list[0], process.pid,
guard_time_seconds))
# This is over complex but, unfortunately, necessary.
# At least one thing that we try to run, nrfjprog, can
# crash silently: just hangs and sends no output. However
# it also doesn't flush and close stdout and so read(1)
# will hang, meaning we can't read its output as a means
# to check that it has hung.
# So, here we poll for the return value, which is normally
# how things will end, and we start another thread which
# reads from the process's stdout. If the thread sees
# nothing for guard_time_seconds then we terminate the
# process.
read_queue = queue.Queue()
read_thread = threading.Thread(target=read_from_process_and_queue,
args=(process, read_queue))
read_thread.start()
while process.poll() is None:
if keep_going_flag is None or keep_going(keep_going_flag, printer, prompt):
if guard_time_seconds and (kill_time is None) and \
((time() - start_time > guard_time_seconds) or
(time() - read_time > guard_time_seconds)):
kill_time = time()
if printer:
printer.string("{}guard time of {} second(s)." \
" expired, stopping {}...".
format(prompt, guard_time_seconds,
call_list[0]))
exe_terminate(process.pid)
else:
exe_terminate(process.pid)
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
read_time = time()
while line is not None:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
read_time = time()
sleep(0.1)
# Can't join() read_thread here as it might have
# blocked on a read() (if nrfjprog has anything to
# do with it). It will be tidied up when this process
# exits.
# There may still be stuff on the queue, read it out here
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
while line is not None:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
# There may still be stuff in the buffer after
# the application has finished running so flush that
# out here
line = process.stdout.readline().decode()
while line:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = process.stdout.readline().decode()
if (process.poll() == 0) and kill_time is None:
success = True
if printer:
printer.string("{}{}, pid {} ended with return value {}.". \
format(prompt, call_list[0],
process.pid, process.poll()))
except ValueError as ex:
if printer:
printer.string("{}failed: {} while trying to execute {}.". \
format(prompt, type(ex).__name__, str(ex)))
except KeyboardInterrupt as ex:
process.kill()
raise KeyboardInterrupt from ex
return success
def set_process_prio_high():
'''Set the priority of the current process to high'''
if is_linux():
print("Setting process priority currently not supported for Linux")
# It should be possible to set prio with:
# psutil.Process().nice(-10)
# However we get "[Errno 13] Permission denied" even when run as root
else:
psutil.Process().nice(psutil.HIGH_PRIORITY_CLASS)
def set_process_prio_normal():
'''Set the priority of the current process to normal'''
if is_linux():
print("Setting process priority currently not supported for Linux")
# It should be possible to set prio with:
# psutil.Process().nice(0)
# However we get "[Errno 13] Permission denied" even when run as root
else:
psutil.Process().nice(psutil.NORMAL_PRIORITY_CLASS)
class ExeRun():
'''Run an executable as a "with:"'''
def __init__(self, call_list, printer=None, prompt=None, shell_cmd=False, with_stdin=False):
self._call_list = call_list
self._printer = printer
self._prompt = prompt
self._shell_cmd = shell_cmd
self._with_stdin=with_stdin
self._process = None
def __enter__(self):
if self._printer:
text = ""
for idx, item in enumerate(self._call_list):
if idx == 0:
text = item
else:
text += " {}".format(item)
self._printer.string("{}starting {}...".format(self._prompt,
text))
try:
# Start exe
popen_keywords = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': self._shell_cmd
}
if not is_linux():
popen_keywords['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
if self._with_stdin:
popen_keywords['stdin'] = subprocess.PIPE
self._process = subprocess.Popen(self._call_list, **popen_keywords)
if self._printer:
self._printer.string("{}{} pid {} started".format(self._prompt,
self._call_list[0],
self._process.pid))
except (OSError, subprocess.CalledProcessError, ValueError) as ex:
if self._printer:
self._printer.string("{}failed: {} to start {}.". \
format(self._prompt,
type(ex).__name__, str(ex)))
except KeyboardInterrupt as ex:
self._process.kill()
raise KeyboardInterrupt from ex
return self._process
def __exit__(self, _type, value, traceback):
del _type
del value
del traceback
# Stop exe
if self._printer:
self._printer.string("{}stopping {}...". \
format(self._prompt,
self._call_list[0]))
return_value = self._process.poll()
if not return_value:
retry = 5
while (self._process.poll() is None) and (retry > 0):
# Try to stop with CTRL-C
if is_linux():
sig = signal.SIGINT
else:
sig = signal.CTRL_BREAK_EVENT
self._process.send_signal(sig)
sleep(1)
retry -= 1
return_value = self._process.poll()
if not return_value:
# Terminate with a vengeance
self._process.terminate()
while self._process.poll() is None:
sleep(0.1)
if self._printer:
self._printer.string("{}{} pid {} terminated".format(self._prompt,
self._call_list[0],
self._process.pid))
else:
if self._printer:
self._printer.string("{}{} pid {} CTRL-C'd".format(self._prompt,
self._call_list[0],
self._process.pid))
else:
if self._printer:
self._printer.string("{}{} pid {} already ended".format(self._prompt,
self._call_list[0],
self._process.pid))
return return_value
# Simple SWO decoder: only handles single bytes of application
# data at a time, i.e. what ITM_SendChar() sends.
class SwoDecoder():
'''Take the contents of a byte_array and decode it as SWO'''
def __init__(self, address, replaceLfWithCrLf=False):
self._address = address
self._replace_lf_with_crlf = replaceLfWithCrLf
self._expecting_swit = True
def decode(self, swo_byte_array):
'''Do the decode'''
decoded_byte_array = bytearray()
if swo_byte_array:
for data_byte in swo_byte_array:
# We're looking only for "address" and we also know
# that CMSIS only offers ITM_SendChar(), so packet length
# is always 1, and we only send ASCII characters,
# so the top bit of the data byte must be 0.
#
# For the SWO protocol, see:
#
# https://developer.arm.com/documentation/ddi0314/h/
# instrumentation-trace-macrocell/
# about-the-instrumentation-trace-macrocell/trace-packet-format
#
# When we see SWIT (SoftWare Instrumentation Trace
# I think, anyway, the bit that carries our prints
# off the target) which is 0bBBBBB0SS, where BBBBB is
# address and SS is the size of payload to follow,
# in our case 0x01, we know that the next
# byte is probably data and if it is ASCII then
# it is data. Anything else is ignored.
# The reason for doing it this way is that the
# ARM ITM only sends out sync packets under
# special circumstances so it is not a recovery
# mechanism for simply losing a byte in the
# transfer, which does happen occasionally.
if self._expecting_swit:
if ((data_byte & 0x03) == 0x01) and ((data_byte & 0xf8) >> 3 == self._address):
# Trace packet type is SWIT, i.e. our
# application logging
self._expecting_swit = False
else:
if data_byte & 0x80 == 0:
if (data_byte == 10) and self._replace_lf_with_crlf:
decoded_byte_array.append(13)
decoded_byte_array.append(data_byte)
self._expecting_swit = True
return decoded_byte_array
class PrintThread(threading.Thread):
'''Print thread to organise prints nicely'''
def __init__(self, print_queue, file_handle=None,
window_file_handle=None, window_size=10000,
window_update_period_seconds=1):
self._queue = print_queue
self._lock = RLock()
self._queue_forwards = []
self._running = False
self._file_handle = file_handle
self._window = None
self._window_file_handle = window_file_handle
if self._window_file_handle:
self._window = deque(self._window_file_handle, maxlen=window_size)
self._window_update_pending = False
self._window_update_period_seconds = window_update_period_seconds
self._window_next_update_time = time()
threading.Thread.__init__(self)
def _send_forward(self, flush=False):
# Send from any forwarding buffers
# self._lock should be acquired before this is called
queue_idxes_to_remove = []
for idx, queue_forward in enumerate(self._queue_forwards):
if flush or time() > queue_forward["last_send"] + queue_forward["buffer_time"]:
string_forward = ""
len_queue_forward = len(queue_forward["buffer"])
count = 0
for item in queue_forward["buffer"]:
count += 1
if count < len_queue_forward:
item += "\n"
if queue_forward["prefix_string"]:
item = queue_forward["prefix_string"] + item
string_forward += item
queue_forward["buffer"] = []
if string_forward:
try:
queue_forward["queue"].put(string_forward)
except TimeoutError:
pass
except (OSError, EOFError, BrokenPipeError):
queue_idxes_to_remove.append(idx)
queue_forward["last_send"] = time()
for idx in queue_idxes_to_remove:
self._queue_forwards.pop(idx)
def add_forward_queue(self, queue_forward, prefix_string=None, buffer_time=0):
'''Forward things received on the print queue to another queue'''
self._lock.acquire()
already_done = False
for item in self._queue_forwards:
if item["queue"] == queue_forward:
already_done = True
break
if not already_done:
item = {}
item["queue"] = queue_forward
item["prefix_string"] = prefix_string
item["buffer"] = []
item["buffer_time"] = buffer_time
item["last_send"] = time()
self._queue_forwards.append(item)
self._lock.release()
def remove_forward_queue(self, queue_forward):
'''Stop forwarding things received on the print queue to another queue'''
self._lock.acquire()
queues = []
self._send_forward(flush=True)
for item in self._queue_forwards:
if item["queue"] != queue_forward:
queues.append(item)
self._queue_forwards = queues
self._lock.release()
def stop_thread(self):
'''Helper function to stop the thread'''
self._lock.acquire()
self._running = False
# Write anything remaining to the window file
if self._window_update_pending:
self._window_file_handle.seek(0)
for item in self._window:
self._window_file_handle.write(item)
self._window_file_handle.flush()
self._window_update_pending = False
self._window_next_update_time = time() + self._window_update_period_seconds
self._lock.release()
def run(self):
'''Worker thread'''
self._running = True
while self._running:
# Print locally and store in any forwarding buffers
try:
my_string = self._queue.get(block=False, timeout=0.5)
print(my_string)
if self._file_handle:
self._file_handle.write(my_string + "\n")
self._lock.acquire()
if self._window is not None:
# Note that my_string can contain multiple lines,
# hence the need to split it here to maintain the
# window
for line in my_string.splitlines():
self._window.append(line + "\n")
self._window_update_pending = True
for queue_forward in self._queue_forwards:
queue_forward["buffer"].append(my_string)
self._lock.release()
except queue.Empty:
sleep(0.1)
except (OSError, EOFError, BrokenPipeError):
# Try to restore stdout
sleep(0.1)
sys.stdout = sys.__stdout__
self._lock.acquire()
# Send from any forwarding buffers
self._send_forward()
# Write the window to file if required
if self._window_update_pending and time() > self._window_next_update_time:
# If you don't do this you can end up with garbage
# at the end of the file
self._window_file_handle.truncate()
self._window_file_handle.seek(0)
for item in self._window:
self._window_file_handle.write(item)
self._window_update_pending = False
self._window_next_update_time = time() + self._window_update_period_seconds
self._lock.release()
class PrintToQueue():
'''Print to a queue, if there is one'''
def __init__(self, print_queue, file_handle, include_timestamp=False):
self._queues = []
self._lock = RLock()
if print_queue:
self._queues.append(print_queue)
self._file_handle = file_handle
self._include_timestamp = include_timestamp
def add_queue(self, print_queue):
'''Add a queue to the list of places to print to'''
self._lock.acquire()
already_done = False
for item in self._queues:
if item == print_queue:
already_done = True
break
if not already_done:
self._queues.append(print_queue)
self._lock.release()
def remove_queue(self, print_queue):
'''Remove a queue from the list of places to print to'''
self._lock.acquire()
queues = []
for item in self._queues:
if item != print_queue:
queues.append(item)
self._queues = queues
self._lock.release()
def string(self, string, file_only=False):
'''Print a string to the queue(s)'''
if self._include_timestamp:
string = strftime(TIME_FORMAT, gmtime()) + " " + string
if not file_only:
self._lock.acquire()
queue_idxes_to_remove = []
if self._queues:
for idx, print_queue in enumerate(self._queues):
try:
print_queue.put(string)
except (EOFError, BrokenPipeError):
queue_idxes_to_remove.append(idx)
for idx in queue_idxes_to_remove:
self._queues.pop(idx)
else:
print(string)
self._lock.release()
if self._file_handle:
self._file_handle.write(string + "\n")
self._file_handle.flush()
# This stolen from here:
# https://stackoverflow.com/questions/431684/how-do-i-change-the-working-directory-in-python
class ChangeDir():
'''Context manager for changing the current working directory'''
def __init__(self, new_path):
self._new_path = os.path.expanduser(new_path)
self._saved_path = None
def __enter__(self):
'''CD to new_path'''
self._saved_path = os.getcwd()
os.chdir(self._new_path)
def __exit__(self, etype, value, traceback):
'''CD back to saved_path'''
os.chdir(self._saved_path)
class Lock():
'''Hold a lock as a "with:"'''
def __init__(self, lock, guard_time_seconds,
lock_type, printer, prompt, keep_going_flag=None):
self._lock = lock
self._guard_time_seconds = guard_time_seconds
self._lock_type = lock_type
self._printer = printer
self._prompt = prompt
self._keep_going_flag = keep_going_flag
self._locked = False
def __enter__(self):
if not self._lock:
return True
# Wait on the lock
if not self._locked:
timeout_seconds = self._guard_time_seconds
self._printer.string("{}waiting up to {} second(s)" \
" for a {} lock...". \
format(self._prompt,
self._guard_time_seconds,
self._lock_type))
count = 0
while not self._lock.acquire(False) and \
((self._guard_time_seconds == 0) or (timeout_seconds > 0)) and \
keep_going(self._keep_going_flag, self._printer, self._prompt):
sleep(1)
timeout_seconds -= 1
count += 1
if count == 30:
self._printer.string("{}still waiting {} second(s)" \
" for a {} lock (locker is" \
" currently {}).". \
format(self._prompt, timeout_seconds,
self._lock_type, self._lock))
count = 0
if (self._guard_time_seconds == 0) or (timeout_seconds > 0):
self._locked = True
self._printer.string("{}{} lock acquired ({}).". \
format(self._prompt, self._lock_type,
self._lock))
return self._locked
def __exit__(self, _type, value, traceback):
del _type
del value
del traceback
if self._lock and self._locked:
try:
self._lock.release()
self._locked = False
self._printer.string("{}released a {} lock.".format(self._prompt,
self._lock_type))
except RuntimeError:
self._locked = False
self._printer.string("{}{} lock was already released.". \
format(self._prompt, self._lock_type))
def wait_for_completion(_list, purpose, guard_time_seconds,
printer, prompt, keep_going_flag):
'''Wait for a completion list to empty'''
completed = False
if len(_list) > 0:
timeout_seconds = guard_time_seconds
printer.string("{}waiting up to {} second(s)" \
" for {} completion...". \
format(prompt, guard_time_seconds, purpose))
count = 0
while (len(_list) > 0) and \
((guard_time_seconds == 0) or (timeout_seconds > 0)) and \
keep_going(keep_going_flag, printer, prompt):
sleep(1)
timeout_seconds -= 1
count += 1
if count == 30:
list_text = ""
for item in _list:
if list_text:
list_text += ", "
list_text += str(item)
printer.string("{}still waiting {} second(s)" \
" for {} to complete (waiting" \
" for {}).". \
format(prompt, timeout_seconds,
purpose, list_text))
count = 0
if len(_list) == 0:
completed = True
printer.string("{}{} completed.".format(prompt, purpose))
return completed
def reset_nrf_target(connection, printer, prompt):
'''Reset a Nordic NRFxxx target'''
call_list = []
printer.string("{}resetting target...".format(prompt))
# Assemble the call list
call_list.append("nrfjprog")
call_list.append("--reset")
if connection and "debugger" in connection and connection["debugger"]:
call_list.append("-s")
call_list.append(connection["debugger"])
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
printer.string("{}in directory {} calling{}". \
format(prompt, os.getcwd(), tmp))
# Call it
return exe_run(call_list, 60, printer, prompt)
def usb_cutter_reset(usb_cutter_id_strs, printer, prompt):
'''Cut and then un-cut USB cables using Cleware USB cutters'''
# First switch the USB cutters off
action = "1"
count = 0
call_list_root = ["usbswitchcmd"]
call_list_root.append("-s")
call_list_root.append("-n")
while count < 2:
for usb_cutter_id_str in usb_cutter_id_strs:
call_list = call_list_root.copy()
call_list.append(usb_cutter_id_str)
call_list.append(action)
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
if printer:
printer.string("{}in directory {} calling{}". \
format(prompt, os.getcwd(), tmp))
# Set shell to keep Jenkins happy
exe_run(call_list, 0, printer, prompt, shell_cmd=True)
# Wait 5ish seconds
if printer:
printer.string("{}waiting {} second(s)...". \
format(prompt, HW_RESET_DURATION_SECONDS))
sleep(HW_RESET_DURATION_SECONDS)
# "0" to switch the USB cutters on again
action = "0"
count += 1
def kmtronic_reset(ip_address, hex_bitmap, printer, prompt):
'''Cut and then un-cut power using a KMTronic box'''
# KMTronic is a web relay box which will be controlling
# power to, for instance, EVKs The last byte of the URL
# is a hex bitmap of the outputs where 0 sets off and 1
# sets on
# Take only the last two digits of the hex bitmap
hex_bitmap_len = len(hex_bitmap)
hex_bitmap = hex_bitmap[hex_bitmap_len - 2:hex_bitmap_len]
kmtronic_off = "http://" + ip_address + "FFE0" + hex_bitmap
kmtronic_on = "http://" + ip_address + "FFE0" + "{0:x}".format(int(hex_bitmap, 16) ^ 0xFF)
try:
# First switch the given bit positions off
if printer:
printer.string("{}sending {}". \
format(prompt, kmtronic_off))
response = requests.get(kmtronic_off)
# Wait 5ish seconds
if printer:
printer.string("{}...received response {}, waiting {} second(s)...". \
format(prompt, response.status_code, HW_RESET_DURATION_SECONDS))
sleep(HW_RESET_DURATION_SECONDS)
# Switch the given bit positions on
if printer:
printer.string("{}sending {}".format(prompt, kmtronic_on))
response = requests.get(kmtronic_on)
if printer:
printer.string("{}...received response {}.". \
format(prompt, response.status_code))
except requests.ConnectionError:
if printer:
printer.string("{}unable to connect to KMTronic box at {}.". \
format(prompt, ip_address))
# Look for a single line anywhere in message
# beginning with "test: ". This must be followed by
# "x.y.z a.b.c m.n.o" (i.e. instance IDs space separated)
# and then an optional "blah" filter string, or just "*"
# and an optional "blah" filter string or "None".
# Valid examples are:
#
# test: 1
# test: 1 3 7
# test: 1.0.3 3 7.0
# test: 1 2 example
# test: 1.1 8 portInit
# test: *
# test: * port
# test: none
#
# Filter strings must NOT begin with a digit.
# There cannot be more than one * or a * with any other instance.
# There can only be one filter string.
# Only whitespace is expected after this on the line.
# Anything else is ignored.
# Populates instances with the "0 4.5 13.5.1" bit as instance
# entries [[0], [4, 5], [13, 5, 1]] and returns the filter
# string, if any.
def commit_message_parse(message, instances, printer=None, prompt=None):
'''Find stuff in a commit message'''
instances_all = False
instances_local = []
filter_string_local = None
found = False
if message:
# Search through message for a line beginning
# with "test:"
if printer:
printer.string("{}### parsing message to see if it contains a test directive...". \
format(prompt))
lines = message.split("\\n")
for idx1, line in enumerate(lines):
if printer:
printer.string("{}text line {}: \"{}\"".format(prompt, idx1 + 1, line))
if line.lower().startswith("test:"):
found = True
instances_all = False
# Pick through what follows
parts = line[5:].split()
for part in parts:
if instances_all and (part[0].isdigit() or part == "*" or part.lower() == "none"):
# If we've had a "*" and this is another one
# or it begins with a digit then this is
# obviously not a "test:" line,
# leave the loop and try again.
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if filter_string_local:
# If we've had a filter string then nothing
# must follow so this is not a "test:" line,
# leave the loop and try again.
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...extraneous characters after test directive," \
" ignoring.".format(prompt))
found = False
break
if part[0].isdigit():
# If this part begins with a digit it could
# be an instance containing numbers
instance = []
bad = False
for item in part.split("."):
try:
instance.append(int(item))
except ValueError:
# Some rubbish, not a test line so
# leave the loop and try the next
# line
bad = True
break
if bad:
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if instance:
instances_local.append(instance[:])
elif part == "*":
if instances_local:
# If we've already had any instances
# this is obviously not a test line,
# leave the loop and try again
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
# If we haven't had any instances and
# this is a * then it means "all"
instances_local.append(part)
instances_all = True
elif part.lower() == "none":
if instances_local:
# If we've already had any instances
# this is obviously not a test line,
# leave the loop and try again
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
instances_local = []
filter_string_local = None
break
elif instances_local and not part == "*":
# If we've had an instance and this
# is not a "*" then this must be a
# filter string
filter_string_local = part
else:
# Found some rubbish, not a "test:"
# line after all, leave the loop
# and try the next line
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if found:
text = "found test directive with"
if instances_local:
text += " instance(s)" + get_instances_text(instances_local)
if filter_string_local:
text += " and filter \"" + filter_string_local + "\""
else:
text += " instances \"None\""
if printer:
printer.string("{}{}.".format(prompt, text))
break
if printer:
printer.string("{}no test directive found".format(prompt))
if found and instances_local:
instances.extend(instances_local[:])
return found, filter_string_local
| [
"psutil.Process",
"time.sleep",
"copy.copy",
"threading.Thread.__init__",
"re.split",
"collections.deque",
"subprocess.Popen",
"os.chmod",
"platform.system",
"os.path.isdir",
"os.path.expanduser",
"subprocess.check_output",
"requests.get",
"time.time",
"multiprocessing.RLock",
"time.gmtime",
"os.environ.get",
"os.geteuid",
"os.getcwd",
"os.chdir",
"serial.Serial",
"shutil.rmtree",
"threading.Thread",
"queue.Queue"
]
| [((7666, 7695), 'os.chmod', 'os.chmod', (['path', 'stat.S_IWRITE'], {}), '(path, stat.S_IWRITE)\n', (7674, 7695), False, 'import os\n'), ((7837, 7861), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (7850, 7861), False, 'import os\n'), ((15992, 16016), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (16005, 16016), False, 'import os\n'), ((25113, 25140), 'psutil.Process', 'psutil.Process', (['process_pid'], {}), '(process_pid)\n', (25127, 25140), False, 'import psutil\n'), ((26760, 26766), 'time.time', 'time', ([], {}), '()\n', (26764, 26766), False, 'from time import sleep, time, gmtime, strftime\n'), ((1277, 1294), 'platform.system', 'platform.system', ([], {}), '()\n', (1292, 1294), False, 'import platform\n'), ((6230, 6300), 'subprocess.check_output', 'subprocess.check_output', (['"""subst"""'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), "('subst', stderr=subprocess.STDOUT, shell=True)\n", (6253, 6300), False, 'import subprocess\n'), ((13314, 13361), 'serial.Serial', 'serial.Serial', (['serial_name', 'speed'], {'timeout': '(0.05)'}), '(serial_name, speed, timeout=0.05)\n', (13327, 13361), False, 'import serial\n'), ((19599, 19623), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (19612, 19623), False, 'import os\n'), ((20603, 20627), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (20616, 20627), False, 'import os\n'), ((23558, 23624), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(cmd, stderr=subprocess.STDOUT, shell=True)\n', (23581, 23624), False, 'import subprocess\n'), ((29253, 29266), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (29264, 29266), False, 'import queue\n'), ((29289, 29374), 'threading.Thread', 'threading.Thread', ([], {'target': 'read_from_process_and_queue', 'args': '(process, read_queue)'}), '(target=read_from_process_and_queue, args=(process, read_queue)\n )\n', (29305, 29374), False, 'import threading\n'), ((40553, 40560), 'multiprocessing.RLock', 'RLock', ([], {}), '()\n', (40558, 40560), False, 'from multiprocessing import RLock\n'), ((41021, 41027), 'time.time', 'time', ([], {}), '()\n', (41025, 41027), False, 'from time import sleep, time, gmtime, strftime\n'), ((41036, 41067), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (41061, 41067), False, 'import threading\n'), ((46178, 46185), 'multiprocessing.RLock', 'RLock', ([], {}), '()\n', (46183, 46185), False, 'from multiprocessing import RLock\n'), ((48188, 48216), 'os.path.expanduser', 'os.path.expanduser', (['new_path'], {}), '(new_path)\n', (48206, 48216), False, 'import os\n'), ((48330, 48341), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (48339, 48341), False, 'import os\n'), ((48350, 48374), 'os.chdir', 'os.chdir', (['self._new_path'], {}), '(self._new_path)\n', (48358, 48374), False, 'import os\n'), ((48468, 48494), 'os.chdir', 'os.chdir', (['self._saved_path'], {}), '(self._saved_path)\n', (48476, 48494), False, 'import os\n'), ((54454, 54486), 'time.sleep', 'sleep', (['HW_RESET_DURATION_SECONDS'], {}), '(HW_RESET_DURATION_SECONDS)\n', (54459, 54486), False, 'from time import sleep, time, gmtime, strftime\n'), ((55416, 55442), 'requests.get', 'requests.get', (['kmtronic_off'], {}), '(kmtronic_off)\n', (55428, 55442), False, 'import requests\n'), ((55674, 55706), 'time.sleep', 'sleep', (['HW_RESET_DURATION_SECONDS'], {}), '(HW_RESET_DURATION_SECONDS)\n', (55679, 55706), False, 'from time import sleep, time, gmtime, strftime\n'), ((55861, 55886), 'requests.get', 'requests.get', (['kmtronic_on'], {}), '(kmtronic_on)\n', (55873, 55886), False, 'import requests\n'), ((5939, 5982), 're.split', 're.split', (['"""( |\\\\".*?\\\\"|\'.*?\')"""', 'cmd_line'], {}), '(\'( |\\\\".*?\\\\"|\\\'.*?\\\')\', cmd_line)\n', (5947, 5982), False, 'import re\n'), ((14864, 14872), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (14869, 14872), False, 'from time import sleep, time, gmtime, strftime\n'), ((25522, 25532), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (25527, 25532), False, 'from time import sleep, time, gmtime, strftime\n'), ((30329, 30335), 'time.time', 'time', ([], {}), '()\n', (30333, 30335), False, 'from time import sleep, time, gmtime, strftime\n'), ((30879, 30889), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (30884, 30889), False, 'from time import sleep, time, gmtime, strftime\n'), ((34941, 34992), 'subprocess.Popen', 'subprocess.Popen', (['self._call_list'], {}), '(self._call_list, **popen_keywords)\n', (34957, 34992), False, 'import subprocess\n'), ((40811, 40862), 'collections.deque', 'deque', (['self._window_file_handle'], {'maxlen': 'window_size'}), '(self._window_file_handle, maxlen=window_size)\n', (40816, 40862), False, 'from collections import deque\n'), ((42990, 42996), 'time.time', 'time', ([], {}), '()\n', (42994, 42996), False, 'from time import sleep, time, gmtime, strftime\n'), ((51881, 51889), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (51886, 51889), False, 'from time import sleep, time, gmtime, strftime\n'), ((53265, 53276), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (53274, 53276), False, 'import os\n'), ((8317, 8366), 'shutil.rmtree', 'shutil.rmtree', (['directory'], {'onerror': 'remove_readonly'}), '(directory, onerror=remove_readonly)\n', (8330, 8366), False, 'import shutil\n'), ((9412, 9432), 'copy.copy', 'copy', (['instances_out1'], {}), '(instances_out1)\n', (9416, 9432), False, 'from copy import copy\n'), ((10213, 10225), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (10223, 10225), False, 'import os\n'), ((15869, 15880), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15878, 15880), False, 'import os\n'), ((30860, 30866), 'time.time', 'time', ([], {}), '()\n', (30864, 30866), False, 'from time import sleep, time, gmtime, strftime\n'), ((33269, 33285), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (33283, 33285), False, 'import psutil\n'), ((33688, 33704), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (33702, 33704), False, 'import psutil\n'), ((36457, 36465), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (36462, 36465), False, 'from time import sleep, time, gmtime, strftime\n'), ((42284, 42290), 'time.time', 'time', ([], {}), '()\n', (42288, 42290), False, 'from time import sleep, time, gmtime, strftime\n'), ((43987, 43993), 'time.time', 'time', ([], {}), '()\n', (43991, 43993), False, 'from time import sleep, time, gmtime, strftime\n'), ((49704, 49712), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (49709, 49712), False, 'from time import sleep, time, gmtime, strftime\n'), ((8695, 8703), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (8700, 8703), False, 'from time import sleep, time, gmtime, strftime\n'), ((29795, 29801), 'time.time', 'time', ([], {}), '()\n', (29799, 29801), False, 'from time import sleep, time, gmtime, strftime\n'), ((36733, 36743), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (36738, 36743), False, 'from time import sleep, time, gmtime, strftime\n'), ((41341, 41347), 'time.time', 'time', ([], {}), '()\n', (41345, 41347), False, 'from time import sleep, time, gmtime, strftime\n'), ((45074, 45084), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (45079, 45084), False, 'from time import sleep, time, gmtime, strftime\n'), ((45198, 45208), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (45203, 45208), False, 'from time import sleep, time, gmtime, strftime\n'), ((45464, 45470), 'time.time', 'time', ([], {}), '()\n', (45468, 45470), False, 'from time import sleep, time, gmtime, strftime\n'), ((45912, 45918), 'time.time', 'time', ([], {}), '()\n', (45916, 45918), False, 'from time import sleep, time, gmtime, strftime\n'), ((9851, 9894), 'os.environ.get', 'os.environ.get', (['"""SystemRoot"""', '"""C:\\\\windows"""'], {}), "('SystemRoot', 'C:\\\\windows')\n", (9865, 9894), False, 'import os\n'), ((47226, 47234), 'time.gmtime', 'gmtime', ([], {}), '()\n', (47232, 47234), False, 'from time import sleep, time, gmtime, strftime\n'), ((54128, 54139), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (54137, 54139), False, 'import os\n'), ((21886, 21897), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (21895, 21897), False, 'import os\n'), ((17251, 17262), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17260, 17262), False, 'import os\n'), ((18391, 18402), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (18400, 18402), False, 'import os\n'), ((29654, 29660), 'time.time', 'time', ([], {}), '()\n', (29658, 29660), False, 'from time import sleep, time, gmtime, strftime\n'), ((29720, 29726), 'time.time', 'time', ([], {}), '()\n', (29724, 29726), False, 'from time import sleep, time, gmtime, strftime\n')] |
import numpy as np
import astropy.modeling.blackbody as bb
import astropy.constants as const
from astropy.io import fits
from scipy.interpolate import interp2d
class FaiglerMazehFit():
def __init__(self, P_orb, inc, R_star, M_star, T_star, A_ellip=False, A_beam=False,
R_p=False, a=False, u=False, g=0.65, logg=None, tele='TESS', M_p=False,
K=False):
self.P_orb = P_orb # orbital period in days
self.inc = inc * np.pi / 180 # inclination converted to radians
self.R_star = R_star # radius of the star in solar units
self.M_star = M_star # mass of the star in solar units
self.T_star = T_star # temperature of the star [K]
self.A_ellip = A_ellip # ellipsoidal amplitude in ppm
self.A_beam = A_beam # beaming amplitude in ppm
self.g = g # gravity-darkening coefficient, expected range is 0.3-1.0
self.logg = logg # log surface gravity of the star [cm s^-2]
self.tele = tele.lower() # observation instrument used, default is TESS. Only other
# other option (for now) is Kepler.
self.R_p = R_p # radius of the planet in jupiter radii
self.a = a
self.u = u # the limb-darkening coefficient, range is 0-1
self.g = g
self.M_p = M_p
self.K = K
# get the mass from the ellipsoidal amplitude, if given.
# u is the limb-darkening coefficient, range is 0-1
if not M_p and not not A_ellip and not not logg:
self.u = self.LDC()
self.M_p = self.m_from_ellip()
# star-planet separation [au] assuming a circular orbit
if not a and not not M_p:
self.a = get_a(self.P_orb * 86400, self.M_star * const.M_sun.value, \
self.M_p * const.M_jup.value) / const.au.value
def alpha_ellip(self):
if not self.u:
self.u = self.LDC()
if not self.g:
self.g = self.GDC()
a = 15 + self.u
b = 1 + self.g
c = 3 - self.u
return 0.15 * a * b / c
def RV_amp(self):
"""
Returns the radial velocity amplitude [m/s] of the star given a companion mass.
"""
return 27 / 40 * const.c.value \
* self.M_star ** (-2/3) \
* self.P_orb ** (-1/3) \
* self.M_p * np.sin(self.inc)
def doppler_shift(self, K):
"""
Returns the shift in wavelength for a given radial velocity amplitude.
"""
return K / const.c.value
def response_convolution(self, lambdas, response):
return response * bb.blackbody_lambda(lambdas, self.T_star).value
def alpha_beam(self, K):
"""
Returns the factor that accounts for the flux lost when a star gets Doppler shifted
in and out of the observer's bandpass.
"""
print(K)
rest_lambdas, response = response_func(self.tele)
flux_rest = np.trapz(self.response_convolution(rest_lambdas, response), \
x=rest_lambdas)
blueshifted_lambdas = rest_lambdas - self.doppler_shift(K=K)
flux_blueshift = np.trapz(self.response_convolution(blueshifted_lambdas, response), \
x=rest_lambdas)
redshifted_lambdas = rest_lambdas + self.doppler_shift(K=K)
flux_redshift = np.trapz(self.response_convolution(redshifted_lambdas, response), \
x=rest_lambdas)
alpha_blue = abs( (flux_rest - flux_blueshift) / flux_rest )
alpha_red = abs( (flux_rest - flux_redshift) / flux_rest )
return 1 - np.mean([alpha_red, alpha_blue])
def m_from_ellip(self):
return self.A_ellip \
* self.R_star ** (-3) \
* self.M_star ** 2 \
* self.P_orb ** 2 \
/ (12.8 * self.alpha_ellip() * np.sin(self.inc) ** 2)
def ellip_from_m(self):
return self.M_p * 12.8 * self.alpha_ellip() * np.sin(self.inc) ** 2 \
* self.R_star ** 3 \
* self.M_star ** (-2) \
* self.P_orb ** (-2)
def m_from_beam(self, K=False, alpha_beam=False):
if not alpha_beam and not K and not not self.M_p:
alpha_beam = self.alpha_beam(K=self.RV_amp())
elif not alpha_beam and not not K:
alpha_beam = self.alpha_beam(K=K)
elif not not K and not not alpha_beam:
raise ValueError("Please only specify either K or alpha_beam, not both.")
elif not K and not alpha_beam:
raise ValueError("Please specify a radial velocity (K) or alpha_beam parameter")
return self.A_beam \
* self.M_star ** (2/3) \
* self.P_orb ** (1/3) \
/ (alpha_beam * np.sin(self.inc) * 2.7)
def beam_from_m(self):
"""
Returns the expected Doppler beaming amplitude [ppm] for a given mass.
"""
if not self.M_p:
raise ValueError("Argument 'M_p' must be specified if you're trying to " +
"derive a beaming amplitude from a mass.")
if not self.K:
K=self.RV_amp()
return 2.7 * self.alpha_beam(K=self.K) \
* self.M_star ** (-2/3) \
* self.P_orb ** (-1/3) \
* self.M_p * np.sin(self.inc)
def Ag_from_thermref(self, A_thermref):
"""
Return the geometric albedo derived from the thermal + ref amplitude.
"""
return A_thermref * (self.R_p / self.a) ** -2 * (const.au / const.R_jup) ** 2
def mass(self, derived_from=None, K=False, alpha_beam=False):
if derived_from == "ellip":
return self.m_from_ellip()
elif derived_from == "beam":
return self.m_from_beam(K=K, alpha_beam=alpha_beam)
else:
raise ValueError("derived_from must equal either 'ellip' or 'beam'")
def nearest_neighbors(self, value, array, max_difference):
"""
Returns a set of nearest neighbor indices of the given array.
"""
return set(list((np.where(abs(array - value) < max_difference))[0]))
def correct_maxdiff(self, value, array, guess):
while len(self.nearest_neighbors(value, array, guess)) > 0:
guess -= 0.01 * guess
return guess
def shared_neighbor(self, value1, array1, max_diff1, value2, array2, max_diff2):
set1 = self.nearest_neighbors(value1, array1, max_diff1)
set2 = self.nearest_neighbors(value2, array2, max_diff2)
nearest = list(set1.intersection(set2))
# if len(nearest) > 1:
# newmax_diff1 = self.correct_maxdiff(value1, array1, max_diff1)
# newmax_diff2 = self.correct_maxdiff(value2, array2, max_diff2)
# print(newmax_diff1, newmax_diff2)
# if newmax_diff2 > newmax_diff1:
# max_diff2 = newmax_diff2
# else:
# max_diff1 = newmax_diff1
# set1 = self.nearest_neighbors(value1, array1, max_diff1)
# set2 = self.nearest_neighbors(value2, array2, max_diff2)
# nearest = list(set1.intersection(set2))
# print(nearest)
# # if len(nearest) > 1:
# # raise ValueError("Multiple shared nearest neighbors, indices = ", nearest)
# # else:
# # return nearest[0]
return nearest[0]
def tess_warning(self):
if self.tele != 'tess':
raise ValueError("This function is only appropriate for observations done with " +
"the TESS satellite")
def claret_LDC(self):
"""
Returns the mu coefficient and the four-parameters used in the Claret four-parameter
limb-darkening law (Claret 2000). These are obtained by finding the nearest neighbor
in the model limb-darkening of TESS from Claret 2018.
"""
# print("claret_LDC is still garbage, sorry. Quitting now...")
# exit()
self.tess_warning()
logg, Teff, a1, a2, a3, a4, mu, mod = np.genfromtxt('../claret_ldc.dat',
usecols=(0,1,4,5,6,7,8,10),
unpack=True)
mod = np.genfromtxt('../claret_ldc.dat', usecols=(10,), dtype='str')
if self.T_star <= 3000:
# the PC model is meant for cool stars, and if we break it up this way we can do an
# easier 2D interpolation.
mask = mod == 'PD'
else:
mask = mod == 'PC'
logg = logg[mask]
Teff = Teff[mask]
a1 = a1[mask]
a2 = a2[mask]
a3 = a3[mask]
a4 = a4[mask]
mu = mu[mask]
nearest = self.shared_neighbor(self.T_star, Teff, 100, self.logg, logg, 0.25)
mu = mu[nearest]
a_coeffs = [a1[nearest], a2[nearest], a3[nearest], a4[nearest]]
return mu, a_coeffs
def GDC(self):
"""
Returns the gravity-darkening coefficient from the Claret 2017 model
"""
self.tess_warning()
logg, log_Teff, g = np.genfromtxt('../claret_gdc.dat', usecols=(2,3,4), unpack=True)
nearest = self.shared_neighbor(np.log10(self.T_star), log_Teff, .01, self.logg,
logg, 0.25)
return g[nearest]
def LDC(self):
"""
Returns the limb-darkening coefficient of the host star.
"""
mu, a_coeffs = self.claret_LDC()
return 1 - sum([a_coeffs[k] * (1 - mu ** ((k+1) / 2)) for k in range(4)])
def get_response_specs(tele):
if tele=="tess":
return "../tess-response-function-v1.0.csv", ',', 1e1
elif tele=="kepler":
return "../kepler_hires.dat", '\t', 1e4
def response_func(tele):
file, delimiter, to_AA = get_response_specs(tele)
lambdas, response = np.genfromtxt(file, delimiter=delimiter, usecols=(0,1), unpack=True)
return lambdas * to_AA, response
def get_a(P, M_star, M_p):
"""
Use Kepler's third law to derive the star-planet separation.
"""
return (P ** 2 * const.G.value * (M_star + M_p) / (4 * np.pi ** 2)) ** (1/3)
| [
"numpy.mean",
"numpy.log10",
"astropy.modeling.blackbody.blackbody_lambda",
"numpy.sin",
"numpy.genfromtxt"
]
| [((8484, 8553), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': 'delimiter', 'usecols': '(0, 1)', 'unpack': '(True)'}), '(file, delimiter=delimiter, usecols=(0, 1), unpack=True)\n', (8497, 8553), True, 'import numpy as np\n'), ((6992, 7078), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../claret_ldc.dat"""'], {'usecols': '(0, 1, 4, 5, 6, 7, 8, 10)', 'unpack': '(True)'}), "('../claret_ldc.dat', usecols=(0, 1, 4, 5, 6, 7, 8, 10),\n unpack=True)\n", (7005, 7078), True, 'import numpy as np\n'), ((7105, 7167), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../claret_ldc.dat"""'], {'usecols': '(10,)', 'dtype': '"""str"""'}), "('../claret_ldc.dat', usecols=(10,), dtype='str')\n", (7118, 7167), True, 'import numpy as np\n'), ((7820, 7886), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../claret_gdc.dat"""'], {'usecols': '(2, 3, 4)', 'unpack': '(True)'}), "('../claret_gdc.dat', usecols=(2, 3, 4), unpack=True)\n", (7833, 7886), True, 'import numpy as np\n'), ((2097, 2113), 'numpy.sin', 'np.sin', (['self.inc'], {}), '(self.inc)\n', (2103, 2113), True, 'import numpy as np\n'), ((3203, 3235), 'numpy.mean', 'np.mean', (['[alpha_red, alpha_blue]'], {}), '([alpha_red, alpha_blue])\n', (3210, 3235), True, 'import numpy as np\n'), ((4636, 4652), 'numpy.sin', 'np.sin', (['self.inc'], {}), '(self.inc)\n', (4642, 4652), True, 'import numpy as np\n'), ((7921, 7942), 'numpy.log10', 'np.log10', (['self.T_star'], {}), '(self.T_star)\n', (7929, 7942), True, 'import numpy as np\n'), ((2332, 2373), 'astropy.modeling.blackbody.blackbody_lambda', 'bb.blackbody_lambda', (['lambdas', 'self.T_star'], {}), '(lambdas, self.T_star)\n', (2351, 2373), True, 'import astropy.modeling.blackbody as bb\n'), ((3407, 3423), 'numpy.sin', 'np.sin', (['self.inc'], {}), '(self.inc)\n', (3413, 3423), True, 'import numpy as np\n'), ((4182, 4198), 'numpy.sin', 'np.sin', (['self.inc'], {}), '(self.inc)\n', (4188, 4198), True, 'import numpy as np\n'), ((3504, 3520), 'numpy.sin', 'np.sin', (['self.inc'], {}), '(self.inc)\n', (3510, 3520), True, 'import numpy as np\n')] |
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ServerState save."""
import functools
import os
import attr
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.examples.mnist import models
from tensorflow_federated.python.research.utils import checkpoint_utils
@attr.s(cmp=False, frozen=False)
class Obj(object):
"""Container for all state that need to be stored in the checkpoint.
Attributes:
model: A ModelWeights structure, containing Tensors or Variables.
optimizer_state: A list of Tensors or Variables, in the order returned by
optimizer.variables().
round_num: Training round_num.
"""
model = attr.ib()
optimizer_state = attr.ib()
round_num = attr.ib()
@classmethod
def from_anon_tuple(cls, anon_tuple, round_num):
# TODO(b/130724878): These conversions should not be needed.
return cls(
model=anon_tuple.model._asdict(recursive=True),
optimizer_state=list(anon_tuple.optimizer_state),
round_num=round_num)
class SavedStateTest(tf.test.TestCase):
def test_save_and_load(self):
server_optimizer_fn = functools.partial(
tf.keras.optimizers.SGD, learning_rate=0.1, momentum=0.9)
iterative_process = tff.learning.build_federated_averaging_process(
models.model_fn, server_optimizer_fn=server_optimizer_fn)
server_state = iterative_process.initialize()
# TODO(b/130724878): These conversions should not be needed.
obj = Obj.from_anon_tuple(server_state, 1)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_1')
checkpoint_utils.save(obj, export_dir)
loaded_obj = checkpoint_utils.load(export_dir, obj)
self.assertAllClose(tf.nest.flatten(obj), tf.nest.flatten(loaded_obj))
def test_load_latest_state(self):
server_optimizer_fn = functools.partial(
tf.keras.optimizers.SGD, learning_rate=0.1, momentum=0.9)
iterative_process = tff.learning.build_federated_averaging_process(
models.model_fn, server_optimizer_fn=server_optimizer_fn)
server_state = iterative_process.initialize()
# TODO(b/130724878): These conversions should not be needed.
obj_1 = Obj.from_anon_tuple(server_state, 1)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_1')
checkpoint_utils.save(obj_1, export_dir)
# TODO(b/130724878): These conversions should not be needed.
obj_2 = Obj.from_anon_tuple(server_state, 2)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_2')
checkpoint_utils.save(obj_2, export_dir)
export_dir = checkpoint_utils.latest_checkpoint(self.get_temp_dir())
loaded_obj = checkpoint_utils.load(export_dir, obj_1)
self.assertEqual(os.path.join(self.get_temp_dir(), 'ckpt_2'), export_dir)
self.assertAllClose(tf.nest.flatten(obj_2), tf.nest.flatten(loaded_obj))
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| [
"attr.s",
"tensorflow_federated.learning.build_federated_averaging_process",
"tensorflow_federated.python.research.utils.checkpoint_utils.save",
"tensorflow.test.main",
"tensorflow.compat.v1.enable_v2_behavior",
"functools.partial",
"tensorflow.nest.flatten",
"tensorflow_federated.python.research.utils.checkpoint_utils.load",
"attr.ib"
]
| [((890, 921), 'attr.s', 'attr.s', ([], {'cmp': '(False)', 'frozen': '(False)'}), '(cmp=False, frozen=False)\n', (896, 921), False, 'import attr\n'), ((1255, 1264), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1262, 1264), False, 'import attr\n'), ((1285, 1294), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1292, 1294), False, 'import attr\n'), ((1309, 1318), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1316, 1318), False, 'import attr\n'), ((3433, 3466), 'tensorflow.compat.v1.enable_v2_behavior', 'tf.compat.v1.enable_v2_behavior', ([], {}), '()\n', (3464, 3466), True, 'import tensorflow as tf\n'), ((3469, 3483), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3481, 3483), True, 'import tensorflow as tf\n'), ((1711, 1786), 'functools.partial', 'functools.partial', (['tf.keras.optimizers.SGD'], {'learning_rate': '(0.1)', 'momentum': '(0.9)'}), '(tf.keras.optimizers.SGD, learning_rate=0.1, momentum=0.9)\n', (1728, 1786), False, 'import functools\n'), ((1821, 1929), 'tensorflow_federated.learning.build_federated_averaging_process', 'tff.learning.build_federated_averaging_process', (['models.model_fn'], {'server_optimizer_fn': 'server_optimizer_fn'}), '(models.model_fn,\n server_optimizer_fn=server_optimizer_fn)\n', (1867, 1929), True, 'import tensorflow_federated as tff\n'), ((2163, 2201), 'tensorflow_federated.python.research.utils.checkpoint_utils.save', 'checkpoint_utils.save', (['obj', 'export_dir'], {}), '(obj, export_dir)\n', (2184, 2201), False, 'from tensorflow_federated.python.research.utils import checkpoint_utils\n'), ((2220, 2258), 'tensorflow_federated.python.research.utils.checkpoint_utils.load', 'checkpoint_utils.load', (['export_dir', 'obj'], {}), '(export_dir, obj)\n', (2241, 2258), False, 'from tensorflow_federated.python.research.utils import checkpoint_utils\n'), ((2398, 2473), 'functools.partial', 'functools.partial', (['tf.keras.optimizers.SGD'], {'learning_rate': '(0.1)', 'momentum': '(0.9)'}), '(tf.keras.optimizers.SGD, learning_rate=0.1, momentum=0.9)\n', (2415, 2473), False, 'import functools\n'), ((2508, 2616), 'tensorflow_federated.learning.build_federated_averaging_process', 'tff.learning.build_federated_averaging_process', (['models.model_fn'], {'server_optimizer_fn': 'server_optimizer_fn'}), '(models.model_fn,\n server_optimizer_fn=server_optimizer_fn)\n', (2554, 2616), True, 'import tensorflow_federated as tff\n'), ((2851, 2891), 'tensorflow_federated.python.research.utils.checkpoint_utils.save', 'checkpoint_utils.save', (['obj_1', 'export_dir'], {}), '(obj_1, export_dir)\n', (2872, 2891), False, 'from tensorflow_federated.python.research.utils import checkpoint_utils\n'), ((3072, 3112), 'tensorflow_federated.python.research.utils.checkpoint_utils.save', 'checkpoint_utils.save', (['obj_2', 'export_dir'], {}), '(obj_2, export_dir)\n', (3093, 3112), False, 'from tensorflow_federated.python.research.utils import checkpoint_utils\n'), ((3205, 3245), 'tensorflow_federated.python.research.utils.checkpoint_utils.load', 'checkpoint_utils.load', (['export_dir', 'obj_1'], {}), '(export_dir, obj_1)\n', (3226, 3245), False, 'from tensorflow_federated.python.research.utils import checkpoint_utils\n'), ((2284, 2304), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['obj'], {}), '(obj)\n', (2299, 2304), True, 'import tensorflow as tf\n'), ((2306, 2333), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['loaded_obj'], {}), '(loaded_obj)\n', (2321, 2333), True, 'import tensorflow as tf\n'), ((3349, 3371), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['obj_2'], {}), '(obj_2)\n', (3364, 3371), True, 'import tensorflow as tf\n'), ((3373, 3400), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['loaded_obj'], {}), '(loaded_obj)\n', (3388, 3400), True, 'import tensorflow as tf\n')] |
"""
field.py
Class instance used for modifying field via Display window.
"""
# Load the needed packages
from functools import partial
from ..core import Variable, Component, QtGui, QtCore
class FieldButtonWindow(Component):
'''Class to display a Window with Field name radio buttons.'''
Vradar = None #: see :ref:`shared_variable`
Vfield = None #: see :ref:`shared_variable`
def __init__(self, Vradar=None, Vfield=None, name="FieldButtons",
parent=None):
'''
Initialize the class to create the interface.
Parameters
----------
[Optional]
Vradar : :py:class:`~artview.core.core.Variable` instance
Radar signal variable. If None start new one with None
Vfield : :py:class:`~artview.core.core.Variable` instance
Field signal variable. If None start new one empty string
name : string
Field Radiobutton window name.
parent : PyQt instance
Parent instance to associate to FieldButtonWindow.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
Notes
-----
This class records the selected button and passes the
change value back to variable.
'''
super(FieldButtonWindow, self).__init__(name=name, parent=parent)
# Set up signal, so that DISPLAY can react to external
# (or internal) changes in field (Core.Variable instances expected)
# The change is sent through Vfield
if Vradar is None:
self.Vradar = Variable(None)
else:
self.Vradar = Vradar
if Vfield is None:
self.Vfield = Variable('')
else:
self.Vfield = Vfield
self.sharedVariables = {"Vradar": self.NewRadar,
"Vfield": self.NewField}
self.connectAllVariables()
self.CreateFieldWidget()
self.SetFieldRadioButtons()
self.show()
########################
# Button methods #
########################
def FieldSelectCmd(self, field):
'''Captures a selection and updates field variable.'''
self.Vfield.change(field)
def CreateFieldWidget(self):
'''Create a widget to store radio buttons to control field adjust.'''
self.radioBox = QtGui.QGroupBox("Field Selection", parent=self)
self.rBox_layout = QtGui.QVBoxLayout(self.radioBox)
self.radioBox.setLayout(self.rBox_layout)
self.setCentralWidget(self.radioBox)
def SetFieldRadioButtons(self):
'''Set a field selection using radio buttons.'''
# Instantiate the buttons into a list for future use
self.fieldbutton = {}
if self.Vradar.value is None:
return
# Loop through and create each field button and
# connect a value when selected
for field in self.Vradar.value.fields.keys():
button = QtGui.QRadioButton(field, self.radioBox)
self.fieldbutton[field] = button
QtCore.QObject.connect(button, QtCore.SIGNAL("clicked()"),
partial(self.FieldSelectCmd, field))
self.rBox_layout.addWidget(button)
# set Checked the current field
self.NewField(self.Vfield, self.Vfield.value, True)
def NewField(self, variable, value, strong):
'''Slot for 'ValueChanged' signal of
:py:class:`Vfield <artview.core.core.Variable>`.
This will:
* Update radio check
'''
if (self.Vradar.value is not None and
value in self.Vradar.value.fields):
self.fieldbutton[value].setChecked(True)
def NewRadar(self, variable, value, strong):
'''Slot for 'ValueChanged' signal of
:py:class:`Vradar <artview.core.core.Variable>`.
This will:
* Recreate radio items
'''
self.CreateFieldWidget()
self.SetFieldRadioButtons()
| [
"functools.partial"
]
| [((3174, 3209), 'functools.partial', 'partial', (['self.FieldSelectCmd', 'field'], {}), '(self.FieldSelectCmd, field)\n', (3181, 3209), False, 'from functools import partial\n')] |
from bson.errors import InvalidId
from django.core.exceptions import ValidationError
from django.utils.encoding import smart_str
from mongoengine import dereference
from mongoengine.base.document import BaseDocument
from mongoengine.document import Document
from rest_framework import serializers
from mongoengine.fields import ObjectId
import sys
if sys.version_info[0] >= 3:
def unicode(val):
return str(val)
class MongoDocumentField(serializers.WritableField):
MAX_RECURSION_DEPTH = 5 # default value of depth
def __init__(self, *args, **kwargs):
try:
self.model_field = kwargs.pop('model_field')
self.depth = kwargs.pop('depth', self.MAX_RECURSION_DEPTH)
except KeyError:
raise ValueError("%s requires 'model_field' kwarg" % self.type_label)
super(MongoDocumentField, self).__init__(*args, **kwargs)
def transform_document(self, document, depth):
data = {}
# serialize each required field
for field in document._fields:
if hasattr(document, smart_str(field)):
# finally check for an attribute 'field' on the instance
obj = getattr(document, field)
else:
continue
val = self.transform_object(obj, depth-1)
if val is not None:
data[field] = val
return data
def transform_dict(self, obj, depth):
return dict([(key, self.transform_object(val, depth-1))
for key, val in obj.items()])
def transform_object(self, obj, depth):
"""
Models to natives
Recursion for (embedded) objects
"""
if depth == 0:
# Return primary key if exists, else return default text
return str(getattr(obj, 'pk', "Max recursion depth exceeded"))
elif isinstance(obj, BaseDocument):
# Document, EmbeddedDocument
return self.transform_document(obj, depth-1)
elif isinstance(obj, dict):
# Dictionaries
return self.transform_dict(obj, depth-1)
elif isinstance(obj, list):
# List
return [self.transform_object(value, depth-1) for value in obj]
elif obj is None:
return None
else:
return unicode(obj) if isinstance(obj, ObjectId) else obj
class ReferenceField(MongoDocumentField):
type_label = 'ReferenceField'
def from_native(self, value):
try:
dbref = self.model_field.to_python(value)
except InvalidId:
raise ValidationError(self.error_messages['invalid'])
instance = dereference.DeReference().__call__([dbref])[0]
# Check if dereference was successful
if not isinstance(instance, Document):
msg = self.error_messages['invalid']
raise ValidationError(msg)
return instance
def to_native(self, obj):
return self.transform_object(obj, self.depth)
class ListField(MongoDocumentField):
type_label = 'ListField'
def from_native(self, value):
return self.model_field.to_python(value)
def to_native(self, obj):
return self.transform_object(obj, self.depth)
class EmbeddedDocumentField(MongoDocumentField):
type_label = 'EmbeddedDocumentField'
def __init__(self, *args, **kwargs):
try:
self.document_type = kwargs.pop('document_type')
except KeyError:
raise ValueError("EmbeddedDocumentField requires 'document_type' kwarg")
super(EmbeddedDocumentField, self).__init__(*args, **kwargs)
def get_default_value(self):
return self.to_native(self.default())
def to_native(self, obj):
if obj is None:
return None
else:
return self.model_field.to_mongo(obj)
def from_native(self, value):
return self.model_field.to_python(value)
class DynamicField(MongoDocumentField):
type_label = 'DynamicField'
def to_native(self, obj):
return self.model_field.to_python(obj) | [
"django.core.exceptions.ValidationError",
"django.utils.encoding.smart_str",
"mongoengine.dereference.DeReference"
]
| [((2884, 2904), 'django.core.exceptions.ValidationError', 'ValidationError', (['msg'], {}), '(msg)\n', (2899, 2904), False, 'from django.core.exceptions import ValidationError\n'), ((1074, 1090), 'django.utils.encoding.smart_str', 'smart_str', (['field'], {}), '(field)\n', (1083, 1090), False, 'from django.utils.encoding import smart_str\n'), ((2608, 2655), 'django.core.exceptions.ValidationError', 'ValidationError', (["self.error_messages['invalid']"], {}), "(self.error_messages['invalid'])\n", (2623, 2655), False, 'from django.core.exceptions import ValidationError\n'), ((2676, 2701), 'mongoengine.dereference.DeReference', 'dereference.DeReference', ([], {}), '()\n', (2699, 2701), False, 'from mongoengine import dereference\n')] |
import pytest
from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_OPERATION
from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_TAG
from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_OPERATION
from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_TAG
from fuzz_lightyear.datastore import get_excluded_operations
from fuzz_lightyear.datastore import get_included_tags
from fuzz_lightyear.datastore import get_non_vulnerable_operations
from fuzz_lightyear.datastore import get_user_defined_mapping
from fuzz_lightyear.plugins import get_enabled_plugins
from fuzz_lightyear.request import get_victim_session_factory
from fuzz_lightyear.supplements.abstraction import get_abstraction
@pytest.fixture(autouse=True)
def clear_caches():
get_abstraction.cache_clear()
get_user_defined_mapping.cache_clear()
get_enabled_plugins.cache_clear()
get_victim_session_factory.cache_clear()
get_excluded_operations.cache_clear()
get_non_vulnerable_operations.cache_clear()
get_included_tags.cache_clear()
_ALL_POST_FUZZ_HOOKS_BY_OPERATION.clear()
_ALL_POST_FUZZ_HOOKS_BY_TAG.clear()
_RERUN_POST_FUZZ_HOOKS_BY_OPERATION.clear()
_RERUN_POST_FUZZ_HOOKS_BY_TAG.clear()
@pytest.fixture(autouse=True)
def ignore_hypothesis_non_interactive_example_warning():
"""In theory we're not supposed to use hypothesis'
strategy.example(), but fuzz-lightyear isn't using
hypothesis in a normal way.
"""
import warnings
from hypothesis.errors import NonInteractiveExampleWarning
warnings.filterwarnings(
'ignore',
category=NonInteractiveExampleWarning,
)
| [
"fuzz_lightyear.datastore.get_non_vulnerable_operations.cache_clear",
"fuzz_lightyear.datastore._ALL_POST_FUZZ_HOOKS_BY_TAG.clear",
"fuzz_lightyear.supplements.abstraction.get_abstraction.cache_clear",
"fuzz_lightyear.datastore._RERUN_POST_FUZZ_HOOKS_BY_TAG.clear",
"fuzz_lightyear.datastore.get_included_tags.cache_clear",
"fuzz_lightyear.datastore._ALL_POST_FUZZ_HOOKS_BY_OPERATION.clear",
"fuzz_lightyear.plugins.get_enabled_plugins.cache_clear",
"fuzz_lightyear.datastore._RERUN_POST_FUZZ_HOOKS_BY_OPERATION.clear",
"pytest.fixture",
"fuzz_lightyear.request.get_victim_session_factory.cache_clear",
"fuzz_lightyear.datastore.get_user_defined_mapping.cache_clear",
"warnings.filterwarnings",
"fuzz_lightyear.datastore.get_excluded_operations.cache_clear"
]
| [((723, 751), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (737, 751), False, 'import pytest\n'), ((1238, 1266), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1252, 1266), False, 'import pytest\n'), ((776, 805), 'fuzz_lightyear.supplements.abstraction.get_abstraction.cache_clear', 'get_abstraction.cache_clear', ([], {}), '()\n', (803, 805), False, 'from fuzz_lightyear.supplements.abstraction import get_abstraction\n'), ((810, 848), 'fuzz_lightyear.datastore.get_user_defined_mapping.cache_clear', 'get_user_defined_mapping.cache_clear', ([], {}), '()\n', (846, 848), False, 'from fuzz_lightyear.datastore import get_user_defined_mapping\n'), ((853, 886), 'fuzz_lightyear.plugins.get_enabled_plugins.cache_clear', 'get_enabled_plugins.cache_clear', ([], {}), '()\n', (884, 886), False, 'from fuzz_lightyear.plugins import get_enabled_plugins\n'), ((891, 931), 'fuzz_lightyear.request.get_victim_session_factory.cache_clear', 'get_victim_session_factory.cache_clear', ([], {}), '()\n', (929, 931), False, 'from fuzz_lightyear.request import get_victim_session_factory\n'), ((936, 973), 'fuzz_lightyear.datastore.get_excluded_operations.cache_clear', 'get_excluded_operations.cache_clear', ([], {}), '()\n', (971, 973), False, 'from fuzz_lightyear.datastore import get_excluded_operations\n'), ((978, 1021), 'fuzz_lightyear.datastore.get_non_vulnerable_operations.cache_clear', 'get_non_vulnerable_operations.cache_clear', ([], {}), '()\n', (1019, 1021), False, 'from fuzz_lightyear.datastore import get_non_vulnerable_operations\n'), ((1026, 1057), 'fuzz_lightyear.datastore.get_included_tags.cache_clear', 'get_included_tags.cache_clear', ([], {}), '()\n', (1055, 1057), False, 'from fuzz_lightyear.datastore import get_included_tags\n'), ((1063, 1104), 'fuzz_lightyear.datastore._ALL_POST_FUZZ_HOOKS_BY_OPERATION.clear', '_ALL_POST_FUZZ_HOOKS_BY_OPERATION.clear', ([], {}), '()\n', (1102, 1104), False, 'from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_OPERATION\n'), ((1109, 1144), 'fuzz_lightyear.datastore._ALL_POST_FUZZ_HOOKS_BY_TAG.clear', '_ALL_POST_FUZZ_HOOKS_BY_TAG.clear', ([], {}), '()\n', (1142, 1144), False, 'from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_TAG\n'), ((1149, 1192), 'fuzz_lightyear.datastore._RERUN_POST_FUZZ_HOOKS_BY_OPERATION.clear', '_RERUN_POST_FUZZ_HOOKS_BY_OPERATION.clear', ([], {}), '()\n', (1190, 1192), False, 'from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_OPERATION\n'), ((1197, 1234), 'fuzz_lightyear.datastore._RERUN_POST_FUZZ_HOOKS_BY_TAG.clear', '_RERUN_POST_FUZZ_HOOKS_BY_TAG.clear', ([], {}), '()\n', (1232, 1234), False, 'from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_TAG\n'), ((1561, 1633), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'NonInteractiveExampleWarning'}), "('ignore', category=NonInteractiveExampleWarning)\n", (1584, 1633), False, 'import warnings\n')] |
import logging
from qrutilities.imageutils import ImageUtils
from PyQt4.QtGui import QColor
logger = logging.getLogger('console')
class WellPlotStyleHandler(object):
'''
classdocs
'''
def saveDataState(self, wellPlotData, wellPlotStyleWidget):
if wellPlotStyleWidget.plotTitleOnCheckBox.isChecked():
wellPlotData.title_on = True
else:
wellPlotData.title_on = False
wellPlotData.title = wellPlotStyleWidget.plotTitleLineEdit.text()
r,g,b,a = QColor(wellPlotStyleWidget.trackBackgroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.plot_background_rgb = rgbString
wellPlotData.plot_background_alpha = wellPlotStyleWidget.trackBackgroundOpacitySpinBox.value()
r,g,b,a = QColor(wellPlotStyleWidget.labelBackgroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.label_background_rgb = rgbString
wellPlotData.label_background_alpha = wellPlotStyleWidget.labelBackgroundOpacitySpinBox.value()
r,g,b,a = QColor(wellPlotStyleWidget.labelForegroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.label_foreground_rgb = rgbString
wellPlotData.label_foreground_alpha = wellPlotStyleWidget.labelForegroundOpacitySpinBox.value()
if wellPlotStyleWidget.singleRowLabelsCheckBox.isChecked():
wellPlotData.single_row_header_labels = True
else:
wellPlotData.single_row_header_labels = False | [
"logging.getLogger",
"qrutilities.imageutils.ImageUtils.rgbToString"
]
| [((101, 129), 'logging.getLogger', 'logging.getLogger', (['"""console"""'], {}), "('console')\n", (118, 129), False, 'import logging\n'), ((615, 646), 'qrutilities.imageutils.ImageUtils.rgbToString', 'ImageUtils.rgbToString', (['r', 'g', 'b'], {}), '(r, g, b)\n', (637, 646), False, 'from qrutilities.imageutils import ImageUtils\n'), ((924, 955), 'qrutilities.imageutils.ImageUtils.rgbToString', 'ImageUtils.rgbToString', (['r', 'g', 'b'], {}), '(r, g, b)\n', (946, 955), False, 'from qrutilities.imageutils import ImageUtils\n'), ((1235, 1266), 'qrutilities.imageutils.ImageUtils.rgbToString', 'ImageUtils.rgbToString', (['r', 'g', 'b'], {}), '(r, g, b)\n', (1257, 1266), False, 'from qrutilities.imageutils import ImageUtils\n')] |
import datetime
import json
import dateutil.parser
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.utils import timezone
from apps.devicelocation.models import DeviceLocation
from apps.physicaldevice.models import Device
from apps.property.models import GenericProperty
from apps.report.models import GeneratedUserReport
from apps.sqsworker.exceptions import WorkerActionHardError
from apps.stream.models import StreamId, StreamVariable
from apps.streamdata.models import StreamData
from apps.streamevent.models import StreamEventData
from apps.streamfilter.models import *
from apps.streamnote.models import StreamNote
from apps.utils.data_mask.mask_utils import get_data_mask_event, set_data_mask
from apps.utils.gid.convert import *
from apps.utils.test_util import TestMixin
from ..models import *
from ..worker.archive_device_data import ArchiveDeviceDataAction
user_model = get_user_model()
class DataBlockCreateWorkerTests(TestMixin, TestCase):
def setUp(self):
self.usersTestSetup()
self.orgTestSetup()
self.deviceTemplateTestSetup()
self.v1 = StreamVariable.objects.create_variable(
name='Var A', project=self.p1, created_by=self.u2, lid=1,
)
self.v2 = StreamVariable.objects.create_variable(
name='Var B', project=self.p1, created_by=self.u3, lid=2,
)
self.pd1 = Device.objects.create_device(project=self.p1, label='d1', template=self.dt1, created_by=self.u2)
self.pd2 = Device.objects.create_device(project=self.p1, label='d2', template=self.dt1, created_by=self.u2)
StreamId.objects.create_after_new_device(self.pd1)
StreamId.objects.create_after_new_device(self.pd2)
self.s1 = StreamId.objects.filter(variable=self.v1).first()
self.s2 = StreamId.objects.filter(variable=self.v2).first()
def tearDown(self):
StreamFilterAction.objects.all().delete()
StreamFilterTrigger.objects.all().delete()
StreamFilter.objects.all().delete()
StreamId.objects.all().delete()
StreamVariable.objects.all().delete()
GenericProperty.objects.all().delete()
Device.objects.all().delete()
StreamData.objects.all().delete()
StreamEventData.objects.all().delete()
self.deviceTemplateTestTearDown()
self.orgTestTearDown()
self.userTestTearDown()
def testDataBlockActionBadArguments(self):
with self.assertRaises(WorkerActionHardError):
ArchiveDeviceDataAction.schedule(args={})
with self.assertRaises(WorkerActionHardError):
ArchiveDeviceDataAction.schedule(args={'foobar': 5})
with self.assertRaises(WorkerActionHardError):
ArchiveDeviceDataAction.schedule(args={'data_block_slug': 'b--0000-0000-0000-0001', 'extra-bad-arg': 'foo'})
self.assertTrue(ArchiveDeviceDataAction._arguments_ok({'data_block_slug': 'b--0000-0000-0000-0001'}))
action = ArchiveDeviceDataAction()
self.assertIsNotNone(action)
with self.assertRaises(WorkerActionHardError):
action.execute(arguments={'foobar': 5})
def testDataBlockActionNoDataBlock(self):
action = ArchiveDeviceDataAction()
self.assertIsNotNone(action)
with self.assertRaises(WorkerActionHardError):
action.execute({'data_block_slug': 'b--0000-0000-0000-0001'})
def testDataBlockActionMigrateProperties(self):
db1 = DataBlock.objects.create(org=self.o1, title='test', device=self.pd1, block=1, created_by=self.u1)
GenericProperty.objects.create_int_property(slug=self.pd1.slug,
created_by=self.u1,
name='prop1', value=4)
GenericProperty.objects.create_str_property(slug=self.pd1.slug,
created_by=self.u1,
name='prop2', value='4')
GenericProperty.objects.create_bool_property(slug=self.pd1.slug,
created_by=self.u1, is_system=True,
name='@prop3', value=True)
self.assertEqual(GenericProperty.objects.object_properties_qs(self.pd1).count(), 3)
self.assertEqual(GenericProperty.objects.object_properties_qs(db1).count(), 0)
action = ArchiveDeviceDataAction()
action._block = db1
action._device = self.pd1
action._migrate_properties()
self.assertEqual(GenericProperty.objects.object_properties_qs(self.pd1).count(), 1)
self.assertEqual(GenericProperty.objects.object_properties_qs(db1).count(), 3)
def testDataBlockActionMigrateStreams(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
var3 = StreamVariable.objects.create_variable(
name='Var C', project=self.p1, created_by=self.u2, lid=3,
)
stream3 = StreamId.objects.create_stream(
project=self.p1, variable=var3, device=device, created_by=self.u2
)
self.assertEqual(self.p1.variables.count(), 3)
count0 = StreamId.objects.count()
self.assertEqual(device.streamids.count(), 3)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamId.objects.count(), count0 + 3)
def testDataBlockActionMigrateStreamData(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=5,
int_value=5
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=6,
int_value=6
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=7,
int_value=7
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=8,
int_value=8
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=9,
int_value=9
)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 3)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 2)
action._migrate_stream_data()
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 0)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream1).count(), 3)
new_stream2 = block.get_stream_slug_for(self.v2.formatted_lid)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream2).count(), 2)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream1).first().project_slug, '')
def testDataBlockActionMigrateStreamEvents(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=2
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=3
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream2.slug,
streamer_local_id=4
)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 1)
action._migrate_stream_events()
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 0)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream1).count(), 2)
new_stream2 = block.get_stream_slug_for(self.v2.formatted_lid)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream2).count(), 1)
def testDataBlockActionMigrateStreamNote(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
StreamNote.objects.create(
target_slug=device.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='System 1',
type='sc'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 2'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u1,
note='Note 3'
)
StreamNote.objects.create(
target_slug=device.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 4'
)
self.assertEqual(StreamNote.objects.count(), 4)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 2)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 2)
action._migrate_stream_notes()
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 0)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 1)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamNote.objects.count(), 4)
self.assertEqual(StreamNote.objects.filter(target_slug=new_stream1).count(), 2)
self.assertEqual(StreamNote.objects.filter(target_slug=block.slug).count(), 1)
def testDataBlockActionMigrateDeviceLocations(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=device.slug,
lat=12.1234, lon=10.000,
user=self.u2
)
DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=device.slug,
lat=12.1234, lon=11.000,
user=self.u2
)
DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=device.slug,
lat=12.1234, lon=12.000,
user=self.u2
)
self.assertEqual(DeviceLocation.objects.count(), 3)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
self.assertEqual(DeviceLocation.objects.filter(target_slug=device.slug).count(), 3)
action._migrate_device_locations()
self.assertEqual(DeviceLocation.objects.filter(target_slug=device.slug).count(), 0)
self.assertEqual(DeviceLocation.objects.filter(target_slug=block.slug).count(), 3)
def testDataBlockActionMigrateReports(self):
db1 = DataBlock.objects.create(org=self.pd1.org, title='test', device=self.pd1, block=1, created_by=self.u2)
GeneratedUserReport.objects.create(
org=self.pd1.org,
label='My report 1',
source_ref=self.pd1.slug,
created_by=self.u2
)
GeneratedUserReport.objects.create(
org=self.pd1.org,
label='My report 2',
source_ref=self.pd1.slug,
created_by=self.u2
)
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=self.pd1.slug).count(), 2)
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=db1.slug).count(), 0)
action = ArchiveDeviceDataAction()
action._block = db1
action._device = self.pd1
action._migrate_reports()
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=self.pd1.slug).count(), 0)
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=db1.slug).count(), 2)
def testDataBlockActionTestAll(self):
sg = SensorGraph.objects.create(name='SG 1',
major_version=1,
created_by=self.u1, org=self.o1)
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, sg=sg, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
GenericProperty.objects.create_int_property(slug=device.slug,
created_by=self.u1,
name='prop1', value=4)
GenericProperty.objects.create_str_property(slug=device.slug,
created_by=self.u1,
name='prop2', value='4')
GenericProperty.objects.create_bool_property(slug=device.slug,
created_by=self.u1,
name='prop3', value=True)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=2
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=3
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream2.slug,
streamer_local_id=4
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=5,
int_value=5
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=6,
int_value=6
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=7,
int_value=7
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=8,
int_value=8
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=9,
int_value=9
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 1'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 2'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 3'
)
StreamNote.objects.create(
target_slug=device.slug,
timestamp=timezone.now(),
created_by=self.u1,
note='Note 4'
)
self.assertEqual(GenericProperty.objects.object_properties_qs(device).count(), 3)
self.assertEqual(GenericProperty.objects.object_properties_qs(block).count(), 0)
self.assertEqual(device.streamids.count(), 2)
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 3)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 1)
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 3)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 1)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action.execute(arguments={'data_block_slug': block.slug})
self.assertEqual(GenericProperty.objects.object_properties_qs(device).count(), 0)
self.assertEqual(GenericProperty.objects.object_properties_qs(block).count(), 3)
self.assertEqual(device.streamids.count(), 4)
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 0)
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 0)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 1)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamId.objects.filter(slug=new_stream1).count(), 1)
new_stream2 = block.get_stream_slug_for(self.v2.formatted_lid)
self.assertEqual(StreamId.objects.filter(slug=new_stream2).count(), 1)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream1).count(), 3)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream1).count(), 2)
self.assertEqual(StreamNote.objects.filter(target_slug=new_stream1).count(), 3)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream2).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream2).count(), 1)
block = DataBlock.objects.first()
self.assertIsNotNone(block.completed_on)
self.assertIsNotNone(block.sg)
self.assertEqual(block.sg, sg)
def testDataBlockActionTestDataMask(self):
sg = SensorGraph.objects.create(name='SG 1',
major_version=1,
created_by=self.u1, org=self.o1)
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, sg=sg, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
dt1 = dateutil.parser.parse('2017-09-28T10:00:00Z')
dt2 = dateutil.parser.parse('2017-09-28T11:00:00Z')
dt3 = dateutil.parser.parse('2017-09-30T10:00:00Z')
dt4 = dateutil.parser.parse('2017-09-30T10:10:00Z')
dt5 = dateutil.parser.parse('2017-09-30T10:20:00Z')
set_data_mask(device, '2017-09-28T10:30:00Z', '2017-09-30T10:15:00Z', [], [], self.u1)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt1,
int_value=5
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt2,
int_value=6
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt3,
int_value=7
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt4,
int_value=8
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt5,
int_value=9
)
self.assertEqual(device.streamids.count(), 1)
data_mask_event = get_data_mask_event(device)
mask_slug = data_mask_event.stream_slug
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 5)
self.assertEqual(StreamEventData.objects.filter(stream_slug=mask_slug).count(), 1)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action.execute(arguments={'data_block_slug': block.slug})
self.assertEqual(device.streamids.count(), 2)
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=mask_slug).count(), 0)
data_mask_event = get_data_mask_event(block)
self.assertEqual(StreamEventData.objects.filter(stream_slug=data_mask_event.stream_slug).count(), 1)
| [
"apps.property.models.GenericProperty.objects.create_str_property",
"apps.streamdata.models.StreamData.objects.all",
"apps.report.models.GeneratedUserReport.objects.filter",
"apps.property.models.GenericProperty.objects.create_int_property",
"apps.physicaldevice.models.Device.objects.create_device",
"apps.stream.models.StreamId.objects.filter",
"apps.devicelocation.models.DeviceLocation.objects.filter",
"apps.streamevent.models.StreamEventData.objects.filter",
"apps.stream.models.StreamId.objects.create_after_new_device",
"apps.stream.models.StreamVariable.objects.all",
"django.contrib.auth.get_user_model",
"apps.property.models.GenericProperty.objects.all",
"apps.stream.models.StreamId.objects.count",
"apps.stream.models.StreamId.objects.all",
"django.utils.timezone.now",
"apps.utils.data_mask.mask_utils.set_data_mask",
"apps.streamevent.models.StreamEventData.objects.all",
"apps.utils.data_mask.mask_utils.get_data_mask_event",
"apps.physicaldevice.models.Device.objects.all",
"apps.property.models.GenericProperty.objects.create_bool_property",
"apps.streamnote.models.StreamNote.objects.filter",
"apps.report.models.GeneratedUserReport.objects.create",
"apps.devicelocation.models.DeviceLocation.objects.count",
"apps.streamnote.models.StreamNote.objects.count",
"apps.stream.models.StreamId.objects.create_stream",
"apps.streamdata.models.StreamData.objects.create",
"apps.streamdata.models.StreamData.objects.filter",
"apps.stream.models.StreamVariable.objects.create_variable",
"apps.property.models.GenericProperty.objects.object_properties_qs"
]
| [((937, 953), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (951, 953), False, 'from django.contrib.auth import get_user_model\n'), ((1148, 1248), 'apps.stream.models.StreamVariable.objects.create_variable', 'StreamVariable.objects.create_variable', ([], {'name': '"""Var A"""', 'project': 'self.p1', 'created_by': 'self.u2', 'lid': '(1)'}), "(name='Var A', project=self.p1,\n created_by=self.u2, lid=1)\n", (1186, 1248), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((1286, 1386), 'apps.stream.models.StreamVariable.objects.create_variable', 'StreamVariable.objects.create_variable', ([], {'name': '"""Var B"""', 'project': 'self.p1', 'created_by': 'self.u3', 'lid': '(2)'}), "(name='Var B', project=self.p1,\n created_by=self.u3, lid=2)\n", (1324, 1386), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((1425, 1525), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p1', 'label': '"""d1"""', 'template': 'self.dt1', 'created_by': 'self.u2'}), "(project=self.p1, label='d1', template=self.dt1,\n created_by=self.u2)\n", (1453, 1525), False, 'from apps.physicaldevice.models import Device\n'), ((1541, 1641), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p1', 'label': '"""d2"""', 'template': 'self.dt1', 'created_by': 'self.u2'}), "(project=self.p1, label='d2', template=self.dt1,\n created_by=self.u2)\n", (1569, 1641), False, 'from apps.physicaldevice.models import Device\n'), ((1646, 1696), 'apps.stream.models.StreamId.objects.create_after_new_device', 'StreamId.objects.create_after_new_device', (['self.pd1'], {}), '(self.pd1)\n', (1686, 1696), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((1705, 1755), 'apps.stream.models.StreamId.objects.create_after_new_device', 'StreamId.objects.create_after_new_device', (['self.pd2'], {}), '(self.pd2)\n', (1745, 1755), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((3611, 3722), 'apps.property.models.GenericProperty.objects.create_int_property', 'GenericProperty.objects.create_int_property', ([], {'slug': 'self.pd1.slug', 'created_by': 'self.u1', 'name': '"""prop1"""', 'value': '(4)'}), "(slug=self.pd1.slug, created_by=\n self.u1, name='prop1', value=4)\n", (3654, 3722), False, 'from apps.property.models import GenericProperty\n'), ((3830, 3943), 'apps.property.models.GenericProperty.objects.create_str_property', 'GenericProperty.objects.create_str_property', ([], {'slug': 'self.pd1.slug', 'created_by': 'self.u1', 'name': '"""prop2"""', 'value': '"""4"""'}), "(slug=self.pd1.slug, created_by=\n self.u1, name='prop2', value='4')\n", (3873, 3943), False, 'from apps.property.models import GenericProperty\n'), ((4051, 4183), 'apps.property.models.GenericProperty.objects.create_bool_property', 'GenericProperty.objects.create_bool_property', ([], {'slug': 'self.pd1.slug', 'created_by': 'self.u1', 'is_system': '(True)', 'name': '"""@prop3"""', 'value': '(True)'}), "(slug=self.pd1.slug, created_by\n =self.u1, is_system=True, name='@prop3', value=True)\n", (4095, 4183), False, 'from apps.property.models import GenericProperty\n'), ((4854, 4954), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p1', 'label': '"""d3"""', 'template': 'self.dt1', 'created_by': 'self.u2'}), "(project=self.p1, label='d3', template=self.dt1,\n created_by=self.u2)\n", (4882, 4954), False, 'from apps.physicaldevice.models import Device\n'), ((5081, 5186), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v1', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v1, device=\n device, created_by=self.u2)\n', (5111, 5186), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((5222, 5327), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v2', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v2, device=\n device, created_by=self.u2)\n', (5252, 5327), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((5360, 5460), 'apps.stream.models.StreamVariable.objects.create_variable', 'StreamVariable.objects.create_variable', ([], {'name': '"""Var C"""', 'project': 'self.p1', 'created_by': 'self.u2', 'lid': '(3)'}), "(name='Var C', project=self.p1,\n created_by=self.u2, lid=3)\n", (5398, 5460), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((5498, 5600), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'var3', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=var3, device=\n device, created_by=self.u2)\n', (5528, 5600), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((5690, 5714), 'apps.stream.models.StreamId.objects.count', 'StreamId.objects.count', ([], {}), '()\n', (5712, 5714), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((6041, 6141), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p1', 'label': '"""d3"""', 'template': 'self.dt1', 'created_by': 'self.u2'}), "(project=self.p1, label='d3', template=self.dt1,\n created_by=self.u2)\n", (6069, 6141), False, 'from apps.physicaldevice.models import Device\n'), ((6268, 6373), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v1', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v1, device=\n device, created_by=self.u2)\n', (6298, 6373), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((6409, 6514), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v2', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v2, device=\n device, created_by=self.u2)\n', (6439, 6514), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((8573, 8673), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p1', 'label': '"""d3"""', 'template': 'self.dt1', 'created_by': 'self.u2'}), "(project=self.p1, label='d3', template=self.dt1,\n created_by=self.u2)\n", (8601, 8673), False, 'from apps.physicaldevice.models import Device\n'), ((8800, 8905), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v1', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v1, device=\n device, created_by=self.u2)\n', (8830, 8905), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((8941, 9046), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v2', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v2, device=\n device, created_by=self.u2)\n', (8971, 9046), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((10595, 10695), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p1', 'label': '"""d3"""', 'template': 'self.dt1', 'created_by': 'self.u2'}), "(project=self.p1, label='d3', template=self.dt1,\n created_by=self.u2)\n", (10623, 10695), False, 'from apps.physicaldevice.models import Device\n'), ((10822, 10927), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v1', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v1, device=\n device, created_by=self.u2)\n', (10852, 10927), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((10953, 11058), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v2', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v2, device=\n device, created_by=self.u2)\n', (10983, 11058), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((12785, 12885), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p1', 'label': '"""d3"""', 'template': 'self.dt1', 'created_by': 'self.u2'}), "(project=self.p1, label='d3', template=self.dt1,\n created_by=self.u2)\n", (12813, 12885), False, 'from apps.physicaldevice.models import Device\n'), ((14218, 14341), 'apps.report.models.GeneratedUserReport.objects.create', 'GeneratedUserReport.objects.create', ([], {'org': 'self.pd1.org', 'label': '"""My report 1"""', 'source_ref': 'self.pd1.slug', 'created_by': 'self.u2'}), "(org=self.pd1.org, label='My report 1',\n source_ref=self.pd1.slug, created_by=self.u2)\n", (14252, 14341), False, 'from apps.report.models import GeneratedUserReport\n'), ((14404, 14527), 'apps.report.models.GeneratedUserReport.objects.create', 'GeneratedUserReport.objects.create', ([], {'org': 'self.pd1.org', 'label': '"""My report 2"""', 'source_ref': 'self.pd1.slug', 'created_by': 'self.u2'}), "(org=self.pd1.org, label='My report 2',\n source_ref=self.pd1.slug, created_by=self.u2)\n", (14438, 14527), False, 'from apps.report.models import GeneratedUserReport\n'), ((15348, 15455), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p1', 'label': '"""d3"""', 'template': 'self.dt1', 'sg': 'sg', 'created_by': 'self.u2'}), "(project=self.p1, label='d3', template=self.dt1,\n sg=sg, created_by=self.u2)\n", (15376, 15455), False, 'from apps.physicaldevice.models import Device\n'), ((15582, 15687), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v1', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v1, device=\n device, created_by=self.u2)\n', (15612, 15687), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((15723, 15828), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v2', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v2, device=\n device, created_by=self.u2)\n', (15753, 15828), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((15855, 15964), 'apps.property.models.GenericProperty.objects.create_int_property', 'GenericProperty.objects.create_int_property', ([], {'slug': 'device.slug', 'created_by': 'self.u1', 'name': '"""prop1"""', 'value': '(4)'}), "(slug=device.slug, created_by=\n self.u1, name='prop1', value=4)\n", (15898, 15964), False, 'from apps.property.models import GenericProperty\n'), ((16072, 16183), 'apps.property.models.GenericProperty.objects.create_str_property', 'GenericProperty.objects.create_str_property', ([], {'slug': 'device.slug', 'created_by': 'self.u1', 'name': '"""prop2"""', 'value': '"""4"""'}), "(slug=device.slug, created_by=\n self.u1, name='prop2', value='4')\n", (16115, 16183), False, 'from apps.property.models import GenericProperty\n'), ((16291, 16404), 'apps.property.models.GenericProperty.objects.create_bool_property', 'GenericProperty.objects.create_bool_property', ([], {'slug': 'device.slug', 'created_by': 'self.u1', 'name': '"""prop3"""', 'value': '(True)'}), "(slug=device.slug, created_by=\n self.u1, name='prop3', value=True)\n", (16335, 16404), False, 'from apps.property.models import GenericProperty\n'), ((21704, 21811), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p1', 'label': '"""d3"""', 'template': 'self.dt1', 'sg': 'sg', 'created_by': 'self.u2'}), "(project=self.p1, label='d3', template=self.dt1,\n sg=sg, created_by=self.u2)\n", (21732, 21811), False, 'from apps.physicaldevice.models import Device\n'), ((21938, 22043), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', ([], {'project': 'self.p1', 'variable': 'self.v1', 'device': 'device', 'created_by': 'self.u2'}), '(project=self.p1, variable=self.v1, device=\n device, created_by=self.u2)\n', (21968, 22043), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((22371, 22462), 'apps.utils.data_mask.mask_utils.set_data_mask', 'set_data_mask', (['device', '"""2017-09-28T10:30:00Z"""', '"""2017-09-30T10:15:00Z"""', '[]', '[]', 'self.u1'], {}), "(device, '2017-09-28T10:30:00Z', '2017-09-30T10:15:00Z', [], [\n ], self.u1)\n", (22384, 22462), False, 'from apps.utils.data_mask.mask_utils import get_data_mask_event, set_data_mask\n'), ((22467, 22563), 'apps.streamdata.models.StreamData.objects.create', 'StreamData.objects.create', ([], {'stream_slug': 'stream1.slug', 'type': '"""Num"""', 'timestamp': 'dt1', 'int_value': '(5)'}), "(stream_slug=stream1.slug, type='Num', timestamp=\n dt1, int_value=5)\n", (22492, 22563), False, 'from apps.streamdata.models import StreamData\n'), ((22625, 22721), 'apps.streamdata.models.StreamData.objects.create', 'StreamData.objects.create', ([], {'stream_slug': 'stream1.slug', 'type': '"""Num"""', 'timestamp': 'dt2', 'int_value': '(6)'}), "(stream_slug=stream1.slug, type='Num', timestamp=\n dt2, int_value=6)\n", (22650, 22721), False, 'from apps.streamdata.models import StreamData\n'), ((22783, 22879), 'apps.streamdata.models.StreamData.objects.create', 'StreamData.objects.create', ([], {'stream_slug': 'stream1.slug', 'type': '"""Num"""', 'timestamp': 'dt3', 'int_value': '(7)'}), "(stream_slug=stream1.slug, type='Num', timestamp=\n dt3, int_value=7)\n", (22808, 22879), False, 'from apps.streamdata.models import StreamData\n'), ((22941, 23037), 'apps.streamdata.models.StreamData.objects.create', 'StreamData.objects.create', ([], {'stream_slug': 'stream1.slug', 'type': '"""Num"""', 'timestamp': 'dt4', 'int_value': '(8)'}), "(stream_slug=stream1.slug, type='Num', timestamp=\n dt4, int_value=8)\n", (22966, 23037), False, 'from apps.streamdata.models import StreamData\n'), ((23099, 23195), 'apps.streamdata.models.StreamData.objects.create', 'StreamData.objects.create', ([], {'stream_slug': 'stream1.slug', 'type': '"""Num"""', 'timestamp': 'dt5', 'int_value': '(9)'}), "(stream_slug=stream1.slug, type='Num', timestamp=\n dt5, int_value=9)\n", (23124, 23195), False, 'from apps.streamdata.models import StreamData\n'), ((23331, 23358), 'apps.utils.data_mask.mask_utils.get_data_mask_event', 'get_data_mask_event', (['device'], {}), '(device)\n', (23350, 23358), False, 'from apps.utils.data_mask.mask_utils import get_data_mask_event, set_data_mask\n'), ((24022, 24048), 'apps.utils.data_mask.mask_utils.get_data_mask_event', 'get_data_mask_event', (['block'], {}), '(block)\n', (24041, 24048), False, 'from apps.utils.data_mask.mask_utils import get_data_mask_event, set_data_mask\n'), ((5932, 5956), 'apps.stream.models.StreamId.objects.count', 'StreamId.objects.count', ([], {}), '()\n', (5954, 5956), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((11841, 11867), 'apps.streamnote.models.StreamNote.objects.count', 'StreamNote.objects.count', ([], {}), '()\n', (11865, 11867), False, 'from apps.streamnote.models import StreamNote\n'), ((12503, 12529), 'apps.streamnote.models.StreamNote.objects.count', 'StreamNote.objects.count', ([], {}), '()\n', (12527, 12529), False, 'from apps.streamnote.models import StreamNote\n'), ((13579, 13609), 'apps.devicelocation.models.DeviceLocation.objects.count', 'DeviceLocation.objects.count', ([], {}), '()\n', (13607, 13609), False, 'from apps.devicelocation.models import DeviceLocation\n'), ((1774, 1815), 'apps.stream.models.StreamId.objects.filter', 'StreamId.objects.filter', ([], {'variable': 'self.v1'}), '(variable=self.v1)\n', (1797, 1815), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((1842, 1883), 'apps.stream.models.StreamId.objects.filter', 'StreamId.objects.filter', ([], {'variable': 'self.v2'}), '(variable=self.v2)\n', (1865, 1883), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((2070, 2092), 'apps.stream.models.StreamId.objects.all', 'StreamId.objects.all', ([], {}), '()\n', (2090, 2092), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((2110, 2138), 'apps.stream.models.StreamVariable.objects.all', 'StreamVariable.objects.all', ([], {}), '()\n', (2136, 2138), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((2156, 2185), 'apps.property.models.GenericProperty.objects.all', 'GenericProperty.objects.all', ([], {}), '()\n', (2183, 2185), False, 'from apps.property.models import GenericProperty\n'), ((2203, 2223), 'apps.physicaldevice.models.Device.objects.all', 'Device.objects.all', ([], {}), '()\n', (2221, 2223), False, 'from apps.physicaldevice.models import Device\n'), ((2241, 2265), 'apps.streamdata.models.StreamData.objects.all', 'StreamData.objects.all', ([], {}), '()\n', (2263, 2265), False, 'from apps.streamdata.models import StreamData\n'), ((2283, 2312), 'apps.streamevent.models.StreamEventData.objects.all', 'StreamEventData.objects.all', ([], {}), '()\n', (2310, 2312), False, 'from apps.streamevent.models import StreamEventData\n'), ((6652, 6666), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6664, 6666), False, 'from django.utils import timezone\n'), ((6854, 6868), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6866, 6868), False, 'from django.utils import timezone\n'), ((7056, 7070), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (7068, 7070), False, 'from django.utils import timezone\n'), ((7258, 7272), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (7270, 7272), False, 'from django.utils import timezone\n'), ((7460, 7474), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (7472, 7474), False, 'from django.utils import timezone\n'), ((9127, 9141), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (9139, 9141), False, 'from django.utils import timezone\n'), ((9318, 9332), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (9330, 9332), False, 'from django.utils import timezone\n'), ((9509, 9523), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (9521, 9523), False, 'from django.utils import timezone\n'), ((11171, 11185), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (11183, 11185), False, 'from django.utils import timezone\n'), ((11375, 11389), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (11387, 11389), False, 'from django.utils import timezone\n'), ((11554, 11568), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (11566, 11568), False, 'from django.utils import timezone\n'), ((11732, 11746), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (11744, 11746), False, 'from django.utils import timezone\n'), ((13056, 13070), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (13068, 13070), False, 'from django.utils import timezone\n'), ((13242, 13256), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (13254, 13256), False, 'from django.utils import timezone\n'), ((13428, 13442), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (13440, 13442), False, 'from django.utils import timezone\n'), ((16568, 16582), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (16580, 16582), False, 'from django.utils import timezone\n'), ((16759, 16773), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (16771, 16773), False, 'from django.utils import timezone\n'), ((16950, 16964), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (16962, 16964), False, 'from django.utils import timezone\n'), ((17198, 17212), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (17210, 17212), False, 'from django.utils import timezone\n'), ((17400, 17414), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (17412, 17414), False, 'from django.utils import timezone\n'), ((17602, 17616), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (17614, 17616), False, 'from django.utils import timezone\n'), ((17804, 17818), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (17816, 17818), False, 'from django.utils import timezone\n'), ((18006, 18020), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (18018, 18020), False, 'from django.utils import timezone\n'), ((18184, 18198), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (18196, 18198), False, 'from django.utils import timezone\n'), ((18363, 18377), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (18375, 18377), False, 'from django.utils import timezone\n'), ((18542, 18556), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (18554, 18556), False, 'from django.utils import timezone\n'), ((18720, 18734), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (18732, 18734), False, 'from django.utils import timezone\n'), ((4310, 4364), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', (['self.pd1'], {}), '(self.pd1)\n', (4354, 4364), False, 'from apps.property.models import GenericProperty\n'), ((4402, 4451), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', (['db1'], {}), '(db1)\n', (4446, 4451), False, 'from apps.property.models import GenericProperty\n'), ((4632, 4686), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', (['self.pd1'], {}), '(self.pd1)\n', (4676, 4686), False, 'from apps.property.models import GenericProperty\n'), ((4724, 4773), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', (['db1'], {}), '(db1)\n', (4768, 4773), False, 'from apps.property.models import GenericProperty\n'), ((7707, 7758), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'stream1.slug'}), '(stream_slug=stream1.slug)\n', (7732, 7758), False, 'from apps.streamdata.models import StreamData\n'), ((7796, 7847), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'stream2.slug'}), '(stream_slug=stream2.slug)\n', (7821, 7847), False, 'from apps.streamdata.models import StreamData\n'), ((7925, 7976), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'stream1.slug'}), '(stream_slug=stream1.slug)\n', (7950, 7976), False, 'from apps.streamdata.models import StreamData\n'), ((8014, 8065), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'stream2.slug'}), '(stream_slug=stream2.slug)\n', (8039, 8065), False, 'from apps.streamdata.models import StreamData\n'), ((8175, 8225), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'new_stream1'}), '(stream_slug=new_stream1)\n', (8200, 8225), False, 'from apps.streamdata.models import StreamData\n'), ((8334, 8384), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'new_stream2'}), '(stream_slug=new_stream2)\n', (8359, 8384), False, 'from apps.streamdata.models import StreamData\n'), ((9802, 9858), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'stream1.slug'}), '(stream_slug=stream1.slug)\n', (9832, 9858), False, 'from apps.streamevent.models import StreamEventData\n'), ((9896, 9952), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'stream2.slug'}), '(stream_slug=stream2.slug)\n', (9926, 9952), False, 'from apps.streamevent.models import StreamEventData\n'), ((10032, 10088), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'stream1.slug'}), '(stream_slug=stream1.slug)\n', (10062, 10088), False, 'from apps.streamevent.models import StreamEventData\n'), ((10126, 10182), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'stream2.slug'}), '(stream_slug=stream2.slug)\n', (10156, 10182), False, 'from apps.streamevent.models import StreamEventData\n'), ((10292, 10347), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'new_stream1'}), '(stream_slug=new_stream1)\n', (10322, 10347), False, 'from apps.streamevent.models import StreamEventData\n'), ((10456, 10511), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'new_stream2'}), '(stream_slug=new_stream2)\n', (10486, 10511), False, 'from apps.streamevent.models import StreamEventData\n'), ((12036, 12087), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'stream1.slug'}), '(target_slug=stream1.slug)\n', (12061, 12087), False, 'from apps.streamnote.models import StreamNote\n'), ((12125, 12175), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'device.slug'}), '(target_slug=device.slug)\n', (12150, 12175), False, 'from apps.streamnote.models import StreamNote\n'), ((12254, 12305), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'stream1.slug'}), '(target_slug=stream1.slug)\n', (12279, 12305), False, 'from apps.streamnote.models import StreamNote\n'), ((12343, 12393), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'device.slug'}), '(target_slug=device.slug)\n', (12368, 12393), False, 'from apps.streamnote.models import StreamNote\n'), ((12559, 12609), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'new_stream1'}), '(target_slug=new_stream1)\n', (12584, 12609), False, 'from apps.streamnote.models import StreamNote\n'), ((12647, 12696), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'block.slug'}), '(target_slug=block.slug)\n', (12672, 12696), False, 'from apps.streamnote.models import StreamNote\n'), ((13746, 13800), 'apps.devicelocation.models.DeviceLocation.objects.filter', 'DeviceLocation.objects.filter', ([], {'target_slug': 'device.slug'}), '(target_slug=device.slug)\n', (13775, 13800), False, 'from apps.devicelocation.models import DeviceLocation\n'), ((13883, 13937), 'apps.devicelocation.models.DeviceLocation.objects.filter', 'DeviceLocation.objects.filter', ([], {'target_slug': 'device.slug'}), '(target_slug=device.slug)\n', (13912, 13937), False, 'from apps.devicelocation.models import DeviceLocation\n'), ((13975, 14028), 'apps.devicelocation.models.DeviceLocation.objects.filter', 'DeviceLocation.objects.filter', ([], {'target_slug': 'block.slug'}), '(target_slug=block.slug)\n', (14004, 14028), False, 'from apps.devicelocation.models import DeviceLocation\n'), ((14608, 14668), 'apps.report.models.GeneratedUserReport.objects.filter', 'GeneratedUserReport.objects.filter', ([], {'source_ref': 'self.pd1.slug'}), '(source_ref=self.pd1.slug)\n', (14642, 14668), False, 'from apps.report.models import GeneratedUserReport\n'), ((14706, 14761), 'apps.report.models.GeneratedUserReport.objects.filter', 'GeneratedUserReport.objects.filter', ([], {'source_ref': 'db1.slug'}), '(source_ref=db1.slug)\n', (14740, 14761), False, 'from apps.report.models import GeneratedUserReport\n'), ((14939, 14999), 'apps.report.models.GeneratedUserReport.objects.filter', 'GeneratedUserReport.objects.filter', ([], {'source_ref': 'self.pd1.slug'}), '(source_ref=self.pd1.slug)\n', (14973, 14999), False, 'from apps.report.models import GeneratedUserReport\n'), ((15037, 15092), 'apps.report.models.GeneratedUserReport.objects.filter', 'GeneratedUserReport.objects.filter', ([], {'source_ref': 'db1.slug'}), '(source_ref=db1.slug)\n', (15071, 15092), False, 'from apps.report.models import GeneratedUserReport\n'), ((18830, 18882), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', (['device'], {}), '(device)\n', (18874, 18882), False, 'from apps.property.models import GenericProperty\n'), ((18920, 18971), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', (['block'], {}), '(block)\n', (18964, 18971), False, 'from apps.property.models import GenericProperty\n'), ((19065, 19116), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'stream1.slug'}), '(stream_slug=stream1.slug)\n', (19090, 19116), False, 'from apps.streamdata.models import StreamData\n'), ((19154, 19205), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'stream2.slug'}), '(stream_slug=stream2.slug)\n', (19179, 19205), False, 'from apps.streamdata.models import StreamData\n'), ((19243, 19299), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'stream1.slug'}), '(stream_slug=stream1.slug)\n', (19273, 19299), False, 'from apps.streamevent.models import StreamEventData\n'), ((19337, 19393), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'stream2.slug'}), '(stream_slug=stream2.slug)\n', (19367, 19393), False, 'from apps.streamevent.models import StreamEventData\n'), ((19431, 19482), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'stream1.slug'}), '(target_slug=stream1.slug)\n', (19456, 19482), False, 'from apps.streamnote.models import StreamNote\n'), ((19520, 19570), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'device.slug'}), '(target_slug=device.slug)\n', (19545, 19570), False, 'from apps.streamnote.models import StreamNote\n'), ((19781, 19833), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', (['device'], {}), '(device)\n', (19825, 19833), False, 'from apps.property.models import GenericProperty\n'), ((19871, 19922), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', (['block'], {}), '(block)\n', (19915, 19922), False, 'from apps.property.models import GenericProperty\n'), ((20016, 20067), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'stream1.slug'}), '(stream_slug=stream1.slug)\n', (20041, 20067), False, 'from apps.streamdata.models import StreamData\n'), ((20105, 20156), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'stream2.slug'}), '(stream_slug=stream2.slug)\n', (20130, 20156), False, 'from apps.streamdata.models import StreamData\n'), ((20194, 20250), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'stream1.slug'}), '(stream_slug=stream1.slug)\n', (20224, 20250), False, 'from apps.streamevent.models import StreamEventData\n'), ((20288, 20344), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'stream2.slug'}), '(stream_slug=stream2.slug)\n', (20318, 20344), False, 'from apps.streamevent.models import StreamEventData\n'), ((20382, 20433), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'stream1.slug'}), '(target_slug=stream1.slug)\n', (20407, 20433), False, 'from apps.streamnote.models import StreamNote\n'), ((20471, 20521), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'device.slug'}), '(target_slug=device.slug)\n', (20496, 20521), False, 'from apps.streamnote.models import StreamNote\n'), ((20631, 20672), 'apps.stream.models.StreamId.objects.filter', 'StreamId.objects.filter', ([], {'slug': 'new_stream1'}), '(slug=new_stream1)\n', (20654, 20672), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((20781, 20822), 'apps.stream.models.StreamId.objects.filter', 'StreamId.objects.filter', ([], {'slug': 'new_stream2'}), '(slug=new_stream2)\n', (20804, 20822), False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((20861, 20911), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'new_stream1'}), '(stream_slug=new_stream1)\n', (20886, 20911), False, 'from apps.streamdata.models import StreamData\n'), ((20949, 21004), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'new_stream1'}), '(stream_slug=new_stream1)\n', (20979, 21004), False, 'from apps.streamevent.models import StreamEventData\n'), ((21042, 21092), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', ([], {'target_slug': 'new_stream1'}), '(target_slug=new_stream1)\n', (21067, 21092), False, 'from apps.streamnote.models import StreamNote\n'), ((21130, 21180), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'new_stream2'}), '(stream_slug=new_stream2)\n', (21155, 21180), False, 'from apps.streamdata.models import StreamData\n'), ((21218, 21273), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'new_stream2'}), '(stream_slug=new_stream2)\n', (21248, 21273), False, 'from apps.streamevent.models import StreamEventData\n'), ((23432, 23483), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'stream1.slug'}), '(stream_slug=stream1.slug)\n', (23457, 23483), False, 'from apps.streamdata.models import StreamData\n'), ((23521, 23574), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'mask_slug'}), '(stream_slug=mask_slug)\n', (23551, 23574), False, 'from apps.streamevent.models import StreamEventData\n'), ((23840, 23891), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'stream1.slug'}), '(stream_slug=stream1.slug)\n', (23865, 23891), False, 'from apps.streamdata.models import StreamData\n'), ((23929, 23982), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'mask_slug'}), '(stream_slug=mask_slug)\n', (23959, 23982), False, 'from apps.streamevent.models import StreamEventData\n'), ((24074, 24145), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', ([], {'stream_slug': 'data_mask_event.stream_slug'}), '(stream_slug=data_mask_event.stream_slug)\n', (24104, 24145), False, 'from apps.streamevent.models import StreamEventData\n'), ((8423, 8473), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', ([], {'stream_slug': 'new_stream1'}), '(stream_slug=new_stream1)\n', (8448, 8473), False, 'from apps.streamdata.models import StreamData\n')] |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import optparse
import os
import sys
import tempfile
import zipfile
from util import build_utils
def _CheckFilePathEndsWithJar(parser, file_path):
if not file_path.endswith(".jar"):
# dx ignores non .jar files.
parser.error("%s does not end in .jar" % file_path)
def _CheckFilePathsEndWithJar(parser, file_paths):
for file_path in file_paths:
_CheckFilePathEndsWithJar(parser, file_path)
def _RemoveUnwantedFilesFromZip(dex_path):
iz = zipfile.ZipFile(dex_path, 'r')
tmp_dex_path = '%s.tmp.zip' % dex_path
oz = zipfile.ZipFile(tmp_dex_path, 'w', zipfile.ZIP_DEFLATED)
for i in iz.namelist():
if i.endswith('.dex'):
oz.writestr(i, iz.read(i))
os.remove(dex_path)
os.rename(tmp_dex_path, dex_path)
def _ParseArgs(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--android-sdk-tools',
help='Android sdk build tools directory.')
parser.add_option('--output-directory',
default=os.getcwd(),
help='Path to the output build directory.')
parser.add_option('--dex-path', help='Dex output path.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME.')
parser.add_option('--proguard-enabled',
help='"true" if proguard is enabled.')
parser.add_option('--debug-build-proguard-enabled',
help='"true" if proguard is enabled for debug build.')
parser.add_option('--proguard-enabled-input-path',
help=('Path to dex in Release mode when proguard '
'is enabled.'))
parser.add_option('--no-locals', default='0',
help='Exclude locals list from the dex file.')
parser.add_option('--incremental',
action='store_true',
help='Enable incremental builds when possible.')
parser.add_option('--inputs', help='A list of additional input paths.')
parser.add_option('--excluded-paths',
help='A list of paths to exclude from the dex file.')
parser.add_option('--main-dex-list-path',
help='A file containing a list of the classes to '
'include in the main dex.')
parser.add_option('--multidex-configuration-path',
help='A JSON file containing multidex build configuration.')
parser.add_option('--multi-dex', default=False, action='store_true',
help='Generate multiple dex files.')
options, paths = parser.parse_args(args)
required_options = ('android_sdk_tools',)
build_utils.CheckOptions(options, parser, required=required_options)
if options.multidex_configuration_path:
with open(options.multidex_configuration_path) as multidex_config_file:
multidex_config = json.loads(multidex_config_file.read())
options.multi_dex = multidex_config.get('enabled', False)
if options.multi_dex and not options.main_dex_list_path:
logging.warning('multidex cannot be enabled without --main-dex-list-path')
options.multi_dex = False
elif options.main_dex_list_path and not options.multi_dex:
logging.warning('--main-dex-list-path is unused if multidex is not enabled')
if options.inputs:
options.inputs = build_utils.ParseGnList(options.inputs)
_CheckFilePathsEndWithJar(parser, options.inputs)
if options.excluded_paths:
options.excluded_paths = build_utils.ParseGnList(options.excluded_paths)
if options.proguard_enabled_input_path:
_CheckFilePathEndsWithJar(parser, options.proguard_enabled_input_path)
_CheckFilePathsEndWithJar(parser, paths)
return options, paths
def _AllSubpathsAreClassFiles(paths, changes):
for path in paths:
if any(not p.endswith('.class') for p in changes.IterChangedSubpaths(path)):
return False
return True
def _DexWasEmpty(paths, changes):
for path in paths:
if any(p.endswith('.class')
for p in changes.old_metadata.IterSubpaths(path)):
return False
return True
def _IterAllClassFiles(changes):
for path in changes.IterAllPaths():
for subpath in changes.IterAllSubpaths(path):
if subpath.endswith('.class'):
yield path
def _MightHitDxBug(changes):
# We've seen dx --incremental fail for small libraries. It's unlikely a
# speed-up anyways in this case.
num_classes = sum(1 for x in _IterAllClassFiles(changes))
if num_classes < 10:
return True
# We've also been able to consistently produce a failure by adding an empty
# line to the top of the first .java file of a library.
# https://crbug.com/617935
first_file = next(_IterAllClassFiles(changes))
for path in changes.IterChangedPaths():
for subpath in changes.IterChangedSubpaths(path):
if first_file == subpath:
return True
return False
def _RunDx(changes, options, dex_cmd, paths):
with build_utils.TempDir() as classes_temp_dir:
# --multi-dex is incompatible with --incremental.
if options.multi_dex:
dex_cmd.append('--main-dex-list=%s' % options.main_dex_list_path)
else:
# --incremental tells dx to merge all newly dex'ed .class files with
# what that already exist in the output dex file (existing classes are
# replaced).
# Use --incremental when .class files are added or modified, but not when
# any are removed (since it won't know to remove them).
if (options.incremental
and not _MightHitDxBug(changes)
and changes.AddedOrModifiedOnly()):
changed_inputs = set(changes.IterChangedPaths())
changed_paths = [p for p in paths if p in changed_inputs]
if not changed_paths:
return
# When merging in other dex files, there's no easy way to know if
# classes were removed from them.
if (_AllSubpathsAreClassFiles(changed_paths, changes)
and not _DexWasEmpty(changed_paths, changes)):
dex_cmd.append('--incremental')
for path in changed_paths:
changed_subpaths = set(changes.IterChangedSubpaths(path))
# Note: |changed_subpaths| may be empty if nothing changed.
if changed_subpaths:
build_utils.ExtractAll(path, path=classes_temp_dir,
predicate=lambda p: p in changed_subpaths)
paths = [classes_temp_dir]
dex_cmd += paths
build_utils.CheckOutput(dex_cmd, print_stderr=False)
if options.dex_path.endswith('.zip'):
_RemoveUnwantedFilesFromZip(options.dex_path)
def _OnStaleMd5(changes, options, dex_cmd, paths):
_RunDx(changes, options, dex_cmd, paths)
build_utils.WriteJson(
[os.path.relpath(p, options.output_directory) for p in paths],
options.dex_path + '.inputs')
def main(args):
options, paths = _ParseArgs(args)
if ((options.proguard_enabled == 'true'
and options.configuration_name == 'Release')
or (options.debug_build_proguard_enabled == 'true'
and options.configuration_name == 'Debug')):
paths = [options.proguard_enabled_input_path]
if options.inputs:
paths += options.inputs
if options.excluded_paths:
# Excluded paths are relative to the output directory.
exclude_paths = options.excluded_paths
paths = [p for p in paths if not
os.path.relpath(p, options.output_directory) in exclude_paths]
input_paths = list(paths)
dx_binary = os.path.join(options.android_sdk_tools, 'dx')
# See http://crbug.com/272064 for context on --force-jumbo.
# See https://github.com/android/platform_dalvik/commit/dd140a22d for
# --num-threads.
# See http://crbug.com/658782 for why -JXmx2G was added.
dex_cmd = [dx_binary, '-JXmx2G', '--num-threads=8', '--dex', '--force-jumbo',
'--output', options.dex_path]
if options.no_locals != '0':
dex_cmd.append('--no-locals')
if options.multi_dex:
input_paths.append(options.main_dex_list_path)
dex_cmd += [
'--multi-dex',
'--minimal-main-dex',
]
output_paths = [
options.dex_path,
options.dex_path + '.inputs',
]
# An escape hatch to be able to check if incremental dexing is causing
# problems.
force = int(os.environ.get('DISABLE_INCREMENTAL_DX', 0))
build_utils.CallAndWriteDepfileIfStale(
lambda changes: _OnStaleMd5(changes, options, dex_cmd, paths),
options,
input_paths=input_paths,
input_strings=dex_cmd,
output_paths=output_paths,
force=force,
pass_changes=True)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"util.build_utils.CheckOptions",
"zipfile.ZipFile",
"os.rename",
"util.build_utils.ParseGnList",
"os.path.join",
"optparse.OptionParser",
"logging.warning",
"util.build_utils.TempDir",
"os.environ.get",
"os.getcwd",
"util.build_utils.CheckOutput",
"util.build_utils.ExpandFileArgs",
"util.build_utils.ExtractAll",
"os.path.relpath",
"util.build_utils.AddDepfileOption",
"os.remove"
]
| [((675, 705), 'zipfile.ZipFile', 'zipfile.ZipFile', (['dex_path', '"""r"""'], {}), "(dex_path, 'r')\n", (690, 705), False, 'import zipfile\n'), ((754, 810), 'zipfile.ZipFile', 'zipfile.ZipFile', (['tmp_dex_path', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(tmp_dex_path, 'w', zipfile.ZIP_DEFLATED)\n", (769, 810), False, 'import zipfile\n'), ((899, 918), 'os.remove', 'os.remove', (['dex_path'], {}), '(dex_path)\n', (908, 918), False, 'import os\n'), ((921, 954), 'os.rename', 'os.rename', (['tmp_dex_path', 'dex_path'], {}), '(tmp_dex_path, dex_path)\n', (930, 954), False, 'import os\n'), ((988, 1020), 'util.build_utils.ExpandFileArgs', 'build_utils.ExpandFileArgs', (['args'], {}), '(args)\n', (1014, 1020), False, 'from util import build_utils\n'), ((1033, 1056), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (1054, 1056), False, 'import optparse\n'), ((1059, 1095), 'util.build_utils.AddDepfileOption', 'build_utils.AddDepfileOption', (['parser'], {}), '(parser)\n', (1087, 1095), False, 'from util import build_utils\n'), ((2878, 2946), 'util.build_utils.CheckOptions', 'build_utils.CheckOptions', (['options', 'parser'], {'required': 'required_options'}), '(options, parser, required=required_options)\n', (2902, 2946), False, 'from util import build_utils\n'), ((7679, 7724), 'os.path.join', 'os.path.join', (['options.android_sdk_tools', '"""dx"""'], {}), "(options.android_sdk_tools, 'dx')\n", (7691, 7724), False, 'import os\n'), ((3256, 3330), 'logging.warning', 'logging.warning', (['"""multidex cannot be enabled without --main-dex-list-path"""'], {}), "('multidex cannot be enabled without --main-dex-list-path')\n", (3271, 3330), False, 'import logging\n'), ((3546, 3585), 'util.build_utils.ParseGnList', 'build_utils.ParseGnList', (['options.inputs'], {}), '(options.inputs)\n', (3569, 3585), False, 'from util import build_utils\n'), ((3698, 3745), 'util.build_utils.ParseGnList', 'build_utils.ParseGnList', (['options.excluded_paths'], {}), '(options.excluded_paths)\n', (3721, 3745), False, 'from util import build_utils\n'), ((5151, 5172), 'util.build_utils.TempDir', 'build_utils.TempDir', ([], {}), '()\n', (5170, 5172), False, 'from util import build_utils\n'), ((6657, 6709), 'util.build_utils.CheckOutput', 'build_utils.CheckOutput', (['dex_cmd'], {'print_stderr': '(False)'}), '(dex_cmd, print_stderr=False)\n', (6680, 6709), False, 'from util import build_utils\n'), ((8455, 8498), 'os.environ.get', 'os.environ.get', (['"""DISABLE_INCREMENTAL_DX"""', '(0)'], {}), "('DISABLE_INCREMENTAL_DX', 0)\n", (8469, 8498), False, 'import os\n'), ((1273, 1284), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1282, 1284), False, 'import os\n'), ((3426, 3502), 'logging.warning', 'logging.warning', (['"""--main-dex-list-path is unused if multidex is not enabled"""'], {}), "('--main-dex-list-path is unused if multidex is not enabled')\n", (3441, 3502), False, 'import logging\n'), ((6929, 6973), 'os.path.relpath', 'os.path.relpath', (['p', 'options.output_directory'], {}), '(p, options.output_directory)\n', (6944, 6973), False, 'import os\n'), ((7572, 7616), 'os.path.relpath', 'os.path.relpath', (['p', 'options.output_directory'], {}), '(p, options.output_directory)\n', (7587, 7616), False, 'import os\n'), ((6462, 6560), 'util.build_utils.ExtractAll', 'build_utils.ExtractAll', (['path'], {'path': 'classes_temp_dir', 'predicate': '(lambda p: p in changed_subpaths)'}), '(path, path=classes_temp_dir, predicate=lambda p: p in\n changed_subpaths)\n', (6484, 6560), False, 'from util import build_utils\n')] |
"""Django settings for botwtracker project.
Copyright (c) 2017, <NAME>.
botw-tracker is an open source software project released under the MIT License.
See the accompanying LICENSE file for terms.
"""
import os
from .config_local import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, '..', 'data')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'quests.apps.QuestsConfig',
'user.apps.UserConfig',
]
if USE_SIGNUP:
INSTALLED_APPS.append('signup.apps.SignupConfig')
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'botwtracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'botwtracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DATA_DIR, 'sqlite3.db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "..", "static")
]
| [
"os.path.abspath",
"os.path.join"
]
| [((397, 433), 'os.path.join', 'os.path.join', (['BASE_DIR', '""".."""', '"""data"""'], {}), "(BASE_DIR, '..', 'data')\n", (409, 433), False, 'import os\n'), ((2871, 2909), 'os.path.join', 'os.path.join', (['BASE_DIR', '""".."""', '"""static"""'], {}), "(BASE_DIR, '..', 'static')\n", (2883, 2909), False, 'import os\n'), ((358, 383), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (373, 383), False, 'import os\n'), ((2014, 2050), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""sqlite3.db"""'], {}), "(DATA_DIR, 'sqlite3.db')\n", (2026, 2050), False, 'import os\n'), ((1368, 1403), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (1380, 1403), False, 'import os\n')] |
import legacy_code.tf_cnn_siamese.configurations as conf
import tensorflow as tf
import numpy as np
def construct_cnn(x, conv_weights, conv_biases, fc_weights, fc_biases,
dropout = False):
"""
constructs the convolution graph for one image
:param x: input node
:param conv_weights: convolution weights
:param conv_biases: relu biases for each convolution
:param fc_weights: fully connected weights, only one set should be used here
:param fc_biases: fully connected biases, only one set should be used here
:param dropout: whether to add a dropout layer for the fully connected layer
:return: output node
"""
k = conf.NUM_POOL
for i in range(conf.NUM_CONVS):
x = tf.nn.conv2d(x, conv_weights[i], strides=[1, 1, 1, 1], padding='SAME',
data_format=conf.DATA_FORMAT)
x = tf.nn.relu(tf.nn.bias_add(x, conv_biases[i],
data_format=conf.DATA_FORMAT))
if k > 0:
x = tf.nn.max_pool(x, ksize=conf.POOL_KDIM,strides=conf.POOL_KDIM,
padding='VALID', data_format=conf.DATA_FORMAT)
k -= 1
# Reshape the feature map cuboids into vectors for fc layers
features_shape = x.get_shape().as_list()
n = features_shape[0]
m = features_shape[1] * features_shape[2] * features_shape[3]
features = tf.reshape(x, [n, m])
# last fc_weights determine output dimensions
fc = tf.nn.sigmoid(tf.matmul(features, fc_weights[0]) + fc_biases[0])
# for actual training
if dropout:
fc = tf.nn.dropout(fc, conf.DROP_RATE)
return fc
def construct_logits_model(x_1, x_2, conv_weights, conv_biases, fc_weights,
fc_biases, dropout=False):
"""
constructs the logit node before the final sigmoid activation
:param x_1: input image node 1
:param x_2: input image node 2
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:param dropout: whether to include dropout layers
:return: logit node
"""
with tf.name_scope("twin_1"):
twin_1 = construct_cnn(x_1, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
with tf.name_scope("twin_2"):
twin_2 = construct_cnn(x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
# logits on squared difference
sq_diff = tf.squared_difference(twin_1, twin_2)
logits = tf.matmul(sq_diff, fc_weights[1]) + fc_biases[1]
return logits
def construct_full_model(x_1, x_2, conv_weights, conv_biases,fc_weights,
fc_biases):
"""
constructs the graph for the neural network without loss node or optimizer
:param x_1: input image node 1
:param x_2: input image node 2
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:return: sigmoid output node
"""
logits = construct_logits_model(x_1, x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout=False)
return tf.nn.sigmoid(logits)
def construct_loss_optimizer(x_1, x_2, labels, conv_weights, conv_biases,
fc_weights, fc_biases, dropout=False,
lagrange=False):
"""
constructs the neural network graph with the loss and optimizer node
:param x_1: input image node 1
:param x_2: input image node 2
:param labels: expected output
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:param dropout: whether to use dropout
:param lagrange: whether to apply constraints
:return: the node for the optimizer as well as the loss
"""
logits = construct_logits_model(x_1, x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
# cross entropy loss on sigmoids of joined output and labels
loss_vec = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=logits)
loss = tf.reduce_mean(loss_vec)
if lagrange:
# constraints on sigmoid layers
regularizers = (tf.nn.l2_loss(fc_weights[0]) + tf.nn.l2_loss(fc_biases[0]) +
tf.nn.l2_loss(fc_weights[1]) + tf.nn.l2_loss(fc_biases[1]))
loss += conf.LAMBDA * regularizers
# setting up the optimization
batch = tf.Variable(0, dtype=conf.DTYPE)
# vanilla momentum optimizer
# accumulation = momentum * accumulation + gradient
# every epoch: variable -= learning_rate * accumulation
# batch_total = labels.shape[0]
# learning_rate = tf.train.exponential_decay(
# conf.BASE_LEARNING_RATE,
# batch * conf.BATCH_SIZE, # Current index into the dataset.
# batch_total,
# conf.DECAY_RATE, # Decay rate.
# staircase=True)
# trainer = tf.train.MomentumOptimizer(learning_rate, conf.MOMENTUM)\
# .minimize(loss, global_step=batch)
# adaptive momentum estimation optimizer
# default params: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08
trainer = tf.train.AdamOptimizer().minimize(loss, global_step=batch)
return trainer, loss
def construct_joined_model(twin_1, twin_2, fc_weights, fc_biases):
"""
constructs joined model for two sets of extracted features
:param twin_1: features node extracted from first image
:param twin_2: features node extracted from second image
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:return: logit node
"""
# logits on squared difference
sq_diff = tf.squared_difference(twin_1, twin_2)
logits = tf.matmul(sq_diff, fc_weights[1]) + fc_biases[1]
return tf.nn.sigmoid(logits)
def initialize_weights():
"""
initializes the variable tensors to be trained in the neural network, decides
network dimensions
:return: nodes for the variables
"""
# twin network convolution and pooling variables
conv_weights = []
conv_biases = []
fc_weights = []
fc_biases = []
for i in range(conf.NUM_CONVS):
if i == 0:
inp = conf.NUM_CHANNELS
else:
inp = conf.NUM_FILTERS[i - 1]
out = conf.NUM_FILTERS[i]
conv_dim = [conf.FILTER_LEN, conf.FILTER_LEN, inp, out]
weight_name = "twin_conv" + str(i + 1) + "_weights"
bias_name = "twin_conv" + str(i + 1) + "_biases"
conv_weights.append(tf.Variable(tf.truncated_normal(conv_dim, stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name=weight_name))
conv_biases.append(tf.Variable(tf.zeros([out], dtype=conf.DTYPE),
name=bias_name))
# twin network fullly connected variables
inp = conf.FEATURE_MAP_SIZE
out = conf.NUM_FC_NEURONS
fc_weights.append(tf.Variable(tf.truncated_normal([inp, out], stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name="twin_fc_weights"))
fc_biases.append(tf.Variable(tf.constant(0.1, shape=[out], dtype=conf.DTYPE),
name="twin_fc_biases"))
# joined network fully connected variables
inp = conf.NUM_FC_NEURONS
out = 1
fc_weights.append(tf.Variable(tf.truncated_normal([inp, out], stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name="joined_fc_weights"))
fc_biases.append(tf.Variable(tf.constant(0.1, shape=[out], dtype=conf.DTYPE),
name="joined_fc_biases"))
return conv_weights, conv_biases, fc_weights, fc_biases
def num_params():
"""
calculates the number of parameters in the model
:return: m, number of parameters
"""
m = 0
for i in range(conf.NUM_CONVS):
if i == 0:
inp = conf.NUM_CHANNELS
else:
inp = conf.NUM_FILTERS[i - 1]
out = conf.NUM_FILTERS[i]
conv_dim = [conf.FILTER_LEN, conf.FILTER_LEN, inp, out]
m += np.prod(conv_dim) + np.prod(out)
inp = conf.FEATURE_MAP_SIZE
out = conf.NUM_FC_NEURONS
m += inp * out + out
inp = conf.NUM_FC_NEURONS
out = 1
m += inp * out + out
return m
if __name__ == "__main__":
print("Number of Parameters: " + str(num_params()))
| [
"tensorflow.nn.conv2d",
"numpy.prod",
"tensorflow.nn.max_pool",
"tensorflow.Variable",
"tensorflow.squared_difference",
"tensorflow.nn.l2_loss",
"tensorflow.nn.sigmoid",
"tensorflow.name_scope",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.reduce_mean",
"tensorflow.train.AdamOptimizer",
"tensorflow.truncated_normal",
"tensorflow.constant",
"tensorflow.zeros",
"tensorflow.nn.bias_add"
]
| [((1327, 1348), 'tensorflow.reshape', 'tf.reshape', (['x', '[n, m]'], {}), '(x, [n, m])\n', (1337, 1348), True, 'import tensorflow as tf\n'), ((2472, 2509), 'tensorflow.squared_difference', 'tf.squared_difference', (['twin_1', 'twin_2'], {}), '(twin_1, twin_2)\n', (2493, 2509), True, 'import tensorflow as tf\n'), ((3253, 3274), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {}), '(logits)\n', (3266, 3274), True, 'import tensorflow as tf\n'), ((4223, 4292), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (4262, 4292), True, 'import tensorflow as tf\n'), ((4355, 4379), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_vec'], {}), '(loss_vec)\n', (4369, 4379), True, 'import tensorflow as tf\n'), ((4673, 4705), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'dtype': 'conf.DTYPE'}), '(0, dtype=conf.DTYPE)\n', (4684, 4705), True, 'import tensorflow as tf\n'), ((5897, 5934), 'tensorflow.squared_difference', 'tf.squared_difference', (['twin_1', 'twin_2'], {}), '(twin_1, twin_2)\n', (5918, 5934), True, 'import tensorflow as tf\n'), ((6004, 6025), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {}), '(logits)\n', (6017, 6025), True, 'import tensorflow as tf\n'), ((711, 815), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'conv_weights[i]'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'data_format': 'conf.DATA_FORMAT'}), "(x, conv_weights[i], strides=[1, 1, 1, 1], padding='SAME',\n data_format=conf.DATA_FORMAT)\n", (723, 815), True, 'import tensorflow as tf\n'), ((1516, 1549), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['fc', 'conf.DROP_RATE'], {}), '(fc, conf.DROP_RATE)\n', (1529, 1549), True, 'import tensorflow as tf\n'), ((2134, 2157), 'tensorflow.name_scope', 'tf.name_scope', (['"""twin_1"""'], {}), "('twin_1')\n", (2147, 2157), True, 'import tensorflow as tf\n'), ((2284, 2307), 'tensorflow.name_scope', 'tf.name_scope', (['"""twin_2"""'], {}), "('twin_2')\n", (2297, 2307), True, 'import tensorflow as tf\n'), ((2521, 2554), 'tensorflow.matmul', 'tf.matmul', (['sq_diff', 'fc_weights[1]'], {}), '(sq_diff, fc_weights[1])\n', (2530, 2554), True, 'import tensorflow as tf\n'), ((5946, 5979), 'tensorflow.matmul', 'tf.matmul', (['sq_diff', 'fc_weights[1]'], {}), '(sq_diff, fc_weights[1])\n', (5955, 5979), True, 'import tensorflow as tf\n'), ((852, 915), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'conv_biases[i]'], {'data_format': 'conf.DATA_FORMAT'}), '(x, conv_biases[i], data_format=conf.DATA_FORMAT)\n', (866, 915), True, 'import tensorflow as tf\n'), ((975, 1090), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': 'conf.POOL_KDIM', 'strides': 'conf.POOL_KDIM', 'padding': '"""VALID"""', 'data_format': 'conf.DATA_FORMAT'}), "(x, ksize=conf.POOL_KDIM, strides=conf.POOL_KDIM, padding=\n 'VALID', data_format=conf.DATA_FORMAT)\n", (989, 1090), True, 'import tensorflow as tf\n'), ((1418, 1452), 'tensorflow.matmul', 'tf.matmul', (['features', 'fc_weights[0]'], {}), '(features, fc_weights[0])\n', (1427, 1452), True, 'import tensorflow as tf\n'), ((4563, 4590), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['fc_biases[1]'], {}), '(fc_biases[1])\n', (4576, 4590), True, 'import tensorflow as tf\n'), ((5381, 5405), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (5403, 5405), True, 'import tensorflow as tf\n'), ((7111, 7188), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[inp, out]'], {'stddev': '(0.1)', 'seed': 'conf.SEED', 'dtype': 'conf.DTYPE'}), '([inp, out], stddev=0.1, seed=conf.SEED, dtype=conf.DTYPE)\n', (7130, 7188), True, 'import tensorflow as tf\n'), ((7310, 7357), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[out]', 'dtype': 'conf.DTYPE'}), '(0.1, shape=[out], dtype=conf.DTYPE)\n', (7321, 7357), True, 'import tensorflow as tf\n'), ((7529, 7606), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[inp, out]'], {'stddev': '(0.1)', 'seed': 'conf.SEED', 'dtype': 'conf.DTYPE'}), '([inp, out], stddev=0.1, seed=conf.SEED, dtype=conf.DTYPE)\n', (7548, 7606), True, 'import tensorflow as tf\n'), ((7730, 7777), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[out]', 'dtype': 'conf.DTYPE'}), '(0.1, shape=[out], dtype=conf.DTYPE)\n', (7741, 7777), True, 'import tensorflow as tf\n'), ((8244, 8261), 'numpy.prod', 'np.prod', (['conv_dim'], {}), '(conv_dim)\n', (8251, 8261), True, 'import numpy as np\n'), ((8264, 8276), 'numpy.prod', 'np.prod', (['out'], {}), '(out)\n', (8271, 8276), True, 'import numpy as np\n'), ((4532, 4560), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['fc_weights[1]'], {}), '(fc_weights[1])\n', (4545, 4560), True, 'import tensorflow as tf\n'), ((6687, 6762), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['conv_dim'], {'stddev': '(0.1)', 'seed': 'conf.SEED', 'dtype': 'conf.DTYPE'}), '(conv_dim, stddev=0.1, seed=conf.SEED, dtype=conf.DTYPE)\n', (6706, 6762), True, 'import tensorflow as tf\n'), ((6890, 6923), 'tensorflow.zeros', 'tf.zeros', (['[out]'], {'dtype': 'conf.DTYPE'}), '([out], dtype=conf.DTYPE)\n', (6898, 6923), True, 'import tensorflow as tf\n'), ((4451, 4479), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['fc_weights[0]'], {}), '(fc_weights[0])\n', (4464, 4479), True, 'import tensorflow as tf\n'), ((4482, 4509), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['fc_biases[0]'], {}), '(fc_biases[0])\n', (4495, 4509), True, 'import tensorflow as tf\n')] |
import sys
import logging
import unittest
from testfixtures import LogCapture
from twisted.python.failure import Failure
from scrapy.utils.log import (failure_to_exc_info, TopLevelFormatter,
LogCounterHandler, StreamLogger)
from scrapy.utils.test import get_crawler
from scrapy.extensions import telnet
class FailureToExcInfoTest(unittest.TestCase):
def test_failure(self):
try:
0 / 0
except ZeroDivisionError:
exc_info = sys.exc_info()
failure = Failure()
self.assertTupleEqual(exc_info, failure_to_exc_info(failure))
def test_non_failure(self):
self.assertIsNone(failure_to_exc_info('test'))
class TopLevelFormatterTest(unittest.TestCase):
def setUp(self):
self.handler = LogCapture()
self.handler.addFilter(TopLevelFormatter(['test']))
def test_top_level_logger(self):
logger = logging.getLogger('test')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_children_logger(self):
logger = logging.getLogger('test.test1')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_overlapping_name_logger(self):
logger = logging.getLogger('test2')
with self.handler as log:
logger.warning('test log msg')
log.check(('test2', 'WARNING', 'test log msg'))
def test_different_name_logger(self):
logger = logging.getLogger('different')
with self.handler as log:
logger.warning('test log msg')
log.check(('different', 'WARNING', 'test log msg'))
class LogCounterHandlerTest(unittest.TestCase):
def setUp(self):
settings = {'LOG_LEVEL': 'WARNING'}
if not telnet.TWISTED_CONCH_AVAILABLE:
# disable it to avoid the extra warning
settings['TELNETCONSOLE_ENABLED'] = False
self.logger = logging.getLogger('test')
self.logger.setLevel(logging.NOTSET)
self.logger.propagate = False
self.crawler = get_crawler(settings_dict=settings)
self.handler = LogCounterHandler(self.crawler)
self.logger.addHandler(self.handler)
def tearDown(self):
self.logger.propagate = True
self.logger.removeHandler(self.handler)
def test_init(self):
self.assertIsNone(self.crawler.stats.get_value('log_count/DEBUG'))
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
self.assertIsNone(self.crawler.stats.get_value('log_count/WARNING'))
self.assertIsNone(self.crawler.stats.get_value('log_count/ERROR'))
self.assertIsNone(self.crawler.stats.get_value('log_count/CRITICAL'))
def test_accepted_level(self):
self.logger.error('test log msg')
self.assertEqual(self.crawler.stats.get_value('log_count/ERROR'), 1)
def test_filtered_out_level(self):
self.logger.debug('test log msg')
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
class StreamLoggerTest(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
logger = logging.getLogger('test')
logger.setLevel(logging.WARNING)
sys.stdout = StreamLogger(logger, logging.ERROR)
def tearDown(self):
sys.stdout = self.stdout
def test_redirect(self):
with LogCapture() as log:
print('test log msg')
log.check(('test', 'ERROR', 'test log msg'))
| [
"logging.getLogger",
"scrapy.utils.log.LogCounterHandler",
"scrapy.utils.test.get_crawler",
"scrapy.utils.log.failure_to_exc_info",
"sys.exc_info",
"scrapy.utils.log.StreamLogger",
"twisted.python.failure.Failure",
"scrapy.utils.log.TopLevelFormatter",
"testfixtures.LogCapture"
]
| [((802, 814), 'testfixtures.LogCapture', 'LogCapture', ([], {}), '()\n', (812, 814), False, 'from testfixtures import LogCapture\n'), ((930, 955), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (947, 955), False, 'import logging\n'), ((1142, 1173), 'logging.getLogger', 'logging.getLogger', (['"""test.test1"""'], {}), "('test.test1')\n", (1159, 1173), False, 'import logging\n'), ((1368, 1394), 'logging.getLogger', 'logging.getLogger', (['"""test2"""'], {}), "('test2')\n", (1385, 1394), False, 'import logging\n'), ((1588, 1618), 'logging.getLogger', 'logging.getLogger', (['"""different"""'], {}), "('different')\n", (1605, 1618), False, 'import logging\n'), ((2047, 2072), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (2064, 2072), False, 'import logging\n'), ((2179, 2214), 'scrapy.utils.test.get_crawler', 'get_crawler', ([], {'settings_dict': 'settings'}), '(settings_dict=settings)\n', (2190, 2214), False, 'from scrapy.utils.test import get_crawler\n'), ((2238, 2269), 'scrapy.utils.log.LogCounterHandler', 'LogCounterHandler', (['self.crawler'], {}), '(self.crawler)\n', (2255, 2269), False, 'from scrapy.utils.log import failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger\n'), ((3258, 3283), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (3275, 3283), False, 'import logging\n'), ((3346, 3381), 'scrapy.utils.log.StreamLogger', 'StreamLogger', (['logger', 'logging.ERROR'], {}), '(logger, logging.ERROR)\n', (3358, 3381), False, 'from scrapy.utils.log import failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger\n'), ((589, 617), 'scrapy.utils.log.failure_to_exc_info', 'failure_to_exc_info', (['failure'], {}), '(failure)\n', (608, 617), False, 'from scrapy.utils.log import failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger\n'), ((678, 705), 'scrapy.utils.log.failure_to_exc_info', 'failure_to_exc_info', (['"""test"""'], {}), "('test')\n", (697, 705), False, 'from scrapy.utils.log import failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger\n'), ((846, 873), 'scrapy.utils.log.TopLevelFormatter', 'TopLevelFormatter', (["['test']"], {}), "(['test'])\n", (863, 873), False, 'from scrapy.utils.log import failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger\n'), ((3483, 3495), 'testfixtures.LogCapture', 'LogCapture', ([], {}), '()\n', (3493, 3495), False, 'from testfixtures import LogCapture\n'), ((501, 515), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (513, 515), False, 'import sys\n'), ((538, 547), 'twisted.python.failure.Failure', 'Failure', ([], {}), '()\n', (545, 547), False, 'from twisted.python.failure import Failure\n')] |
###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import onnx
from distutils.version import StrictVersion
# Rather than using ONNX protobuf definition throughout our codebase, we import ONNX protobuf definition here so that
# we can conduct quick fixes by overwriting ONNX functions without changing any lines elsewhere.
from onnx import onnx_pb as onnx_proto
from onnx import helper
def get_opset_number_from_onnx():
return onnx.defs.onnx_opset_version()
def _check_onnx_version():
import pkg_resources
min_required_version = pkg_resources.parse_version('1.0.1')
current_version = pkg_resources.get_distribution('onnx').parsed_version
assert current_version >= min_required_version , 'Keras2ONNX requires ONNX version 1.0.1 or a newer one'
_check_onnx_version()
is_tf_keras = False
if os.environ.get('TF_KERAS', '0') != '0':
is_tf_keras = True
if is_tf_keras:
from tensorflow.python import keras
else:
try:
import keras
except ImportError:
is_tf_keras = True
from tensorflow.python import keras
def is_keras_older_than(version_str):
return StrictVersion(keras.__version__.split('-')[0]) < StrictVersion(version_str)
def is_keras_later_than(version_str):
return StrictVersion(keras.__version__.split('-')[0]) > StrictVersion(version_str)
| [
"tensorflow.python.keras.__version__.split",
"onnx.defs.onnx_opset_version",
"os.environ.get",
"pkg_resources.parse_version",
"distutils.version.StrictVersion",
"pkg_resources.get_distribution"
]
| [((709, 739), 'onnx.defs.onnx_opset_version', 'onnx.defs.onnx_opset_version', ([], {}), '()\n', (737, 739), False, 'import onnx\n'), ((820, 856), 'pkg_resources.parse_version', 'pkg_resources.parse_version', (['"""1.0.1"""'], {}), "('1.0.1')\n", (847, 856), False, 'import pkg_resources\n'), ((1089, 1120), 'os.environ.get', 'os.environ.get', (['"""TF_KERAS"""', '"""0"""'], {}), "('TF_KERAS', '0')\n", (1103, 1120), False, 'import os\n'), ((879, 917), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""onnx"""'], {}), "('onnx')\n", (909, 917), False, 'import pkg_resources\n'), ((1440, 1466), 'distutils.version.StrictVersion', 'StrictVersion', (['version_str'], {}), '(version_str)\n', (1453, 1466), False, 'from distutils.version import StrictVersion\n'), ((1567, 1593), 'distutils.version.StrictVersion', 'StrictVersion', (['version_str'], {}), '(version_str)\n', (1580, 1593), False, 'from distutils.version import StrictVersion\n'), ((1405, 1433), 'tensorflow.python.keras.__version__.split', 'keras.__version__.split', (['"""-"""'], {}), "('-')\n", (1428, 1433), False, 'from tensorflow.python import keras\n'), ((1532, 1560), 'tensorflow.python.keras.__version__.split', 'keras.__version__.split', (['"""-"""'], {}), "('-')\n", (1555, 1560), False, 'from tensorflow.python import keras\n')] |
'''Test package.'''
import xroms
from glob import glob
import os
def test_open_netcdf():
'''Test xroms.open_netcdf().'''
base = os.path.join(xroms.__path__[0],'..','tests','input')
files = glob('%s/ocean_his_000?.nc' % base)
ds = xroms.open_netcdf(files)
assert ds
def test_open_zarr():
'''Test xroms.open_zarr().'''
base = os.path.join(xroms.__path__[0],'..','tests','input')
files = glob('%s/ocean_his_000?' % base)
ds = xroms.open_zarr(files, chunks={'ocean_time':2})
assert ds
| [
"xroms.open_zarr",
"os.path.join",
"glob.glob",
"xroms.open_netcdf"
]
| [((144, 199), 'os.path.join', 'os.path.join', (['xroms.__path__[0]', '""".."""', '"""tests"""', '"""input"""'], {}), "(xroms.__path__[0], '..', 'tests', 'input')\n", (156, 199), False, 'import os\n'), ((209, 244), 'glob.glob', 'glob', (["('%s/ocean_his_000?.nc' % base)"], {}), "('%s/ocean_his_000?.nc' % base)\n", (213, 244), False, 'from glob import glob\n'), ((254, 278), 'xroms.open_netcdf', 'xroms.open_netcdf', (['files'], {}), '(files)\n', (271, 278), False, 'import xroms\n'), ((375, 430), 'os.path.join', 'os.path.join', (['xroms.__path__[0]', '""".."""', '"""tests"""', '"""input"""'], {}), "(xroms.__path__[0], '..', 'tests', 'input')\n", (387, 430), False, 'import os\n'), ((440, 472), 'glob.glob', 'glob', (["('%s/ocean_his_000?' % base)"], {}), "('%s/ocean_his_000?' % base)\n", (444, 472), False, 'from glob import glob\n'), ((482, 530), 'xroms.open_zarr', 'xroms.open_zarr', (['files'], {'chunks': "{'ocean_time': 2}"}), "(files, chunks={'ocean_time': 2})\n", (497, 530), False, 'import xroms\n')] |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^piechart/', views.demo_piechart, name='demo_piechart'),
url(r'^linechart/', views.demo_linechart, name='demo_linechart'),
url(r'^linechart_without_date/', views.demo_linechart_without_date, name='demo_linechart_without_date'),
url(r'^linewithfocuschart/', views.demo_linewithfocuschart, name='demo_linewithfocuschart'),
url(r'^multibarchart/', views.demo_multibarchart, name='demo_multibarchart'),
url(r'^stackedareachart/', views.demo_stackedareachart, name='demo_stackedareachart'),
url(r'^multibarhorizontalchart/', views.demo_multibarhorizontalchart, name='demo_multibarhorizontalchart'),
url(r'^lineplusbarchart/', views.demo_lineplusbarchart, name='demo_lineplusbarchart'),
url(r'^cumulativelinechart/', views.demo_cumulativelinechart, name='demo_cumulativelinechart'),
url(r'^discretebarchart/', views.demo_discretebarchart, name='demo_discretebarchart'),
url(r'^discretebarchart_with_date/', views.demo_discretebarchart_with_date, name='demo_discretebarchart_date'),
url(r'^scatterchart/', views.demo_scatterchart, name='demo_scatterchart'),
url(r'^linechart_with_ampm/', views.demo_linechart_with_ampm, name='demo_linechart_with_ampm'),
# url(r'^demoproject/', include('demoproject.foo.urls')),
]
| [
"django.conf.urls.url"
]
| [((75, 109), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.home'], {'name': '"""home"""'}), "('^$', views.home, name='home')\n", (78, 109), False, 'from django.conf.urls import url\n'), ((116, 176), 'django.conf.urls.url', 'url', (['"""^piechart/"""', 'views.demo_piechart'], {'name': '"""demo_piechart"""'}), "('^piechart/', views.demo_piechart, name='demo_piechart')\n", (119, 176), False, 'from django.conf.urls import url\n'), ((183, 246), 'django.conf.urls.url', 'url', (['"""^linechart/"""', 'views.demo_linechart'], {'name': '"""demo_linechart"""'}), "('^linechart/', views.demo_linechart, name='demo_linechart')\n", (186, 246), False, 'from django.conf.urls import url\n'), ((253, 360), 'django.conf.urls.url', 'url', (['"""^linechart_without_date/"""', 'views.demo_linechart_without_date'], {'name': '"""demo_linechart_without_date"""'}), "('^linechart_without_date/', views.demo_linechart_without_date, name=\n 'demo_linechart_without_date')\n", (256, 360), False, 'from django.conf.urls import url\n'), ((362, 457), 'django.conf.urls.url', 'url', (['"""^linewithfocuschart/"""', 'views.demo_linewithfocuschart'], {'name': '"""demo_linewithfocuschart"""'}), "('^linewithfocuschart/', views.demo_linewithfocuschart, name=\n 'demo_linewithfocuschart')\n", (365, 457), False, 'from django.conf.urls import url\n'), ((459, 534), 'django.conf.urls.url', 'url', (['"""^multibarchart/"""', 'views.demo_multibarchart'], {'name': '"""demo_multibarchart"""'}), "('^multibarchart/', views.demo_multibarchart, name='demo_multibarchart')\n", (462, 534), False, 'from django.conf.urls import url\n'), ((541, 630), 'django.conf.urls.url', 'url', (['"""^stackedareachart/"""', 'views.demo_stackedareachart'], {'name': '"""demo_stackedareachart"""'}), "('^stackedareachart/', views.demo_stackedareachart, name=\n 'demo_stackedareachart')\n", (544, 630), False, 'from django.conf.urls import url\n'), ((632, 742), 'django.conf.urls.url', 'url', (['"""^multibarhorizontalchart/"""', 'views.demo_multibarhorizontalchart'], {'name': '"""demo_multibarhorizontalchart"""'}), "('^multibarhorizontalchart/', views.demo_multibarhorizontalchart, name=\n 'demo_multibarhorizontalchart')\n", (635, 742), False, 'from django.conf.urls import url\n'), ((744, 833), 'django.conf.urls.url', 'url', (['"""^lineplusbarchart/"""', 'views.demo_lineplusbarchart'], {'name': '"""demo_lineplusbarchart"""'}), "('^lineplusbarchart/', views.demo_lineplusbarchart, name=\n 'demo_lineplusbarchart')\n", (747, 833), False, 'from django.conf.urls import url\n'), ((835, 933), 'django.conf.urls.url', 'url', (['"""^cumulativelinechart/"""', 'views.demo_cumulativelinechart'], {'name': '"""demo_cumulativelinechart"""'}), "('^cumulativelinechart/', views.demo_cumulativelinechart, name=\n 'demo_cumulativelinechart')\n", (838, 933), False, 'from django.conf.urls import url\n'), ((935, 1024), 'django.conf.urls.url', 'url', (['"""^discretebarchart/"""', 'views.demo_discretebarchart'], {'name': '"""demo_discretebarchart"""'}), "('^discretebarchart/', views.demo_discretebarchart, name=\n 'demo_discretebarchart')\n", (938, 1024), False, 'from django.conf.urls import url\n'), ((1026, 1139), 'django.conf.urls.url', 'url', (['"""^discretebarchart_with_date/"""', 'views.demo_discretebarchart_with_date'], {'name': '"""demo_discretebarchart_date"""'}), "('^discretebarchart_with_date/', views.demo_discretebarchart_with_date,\n name='demo_discretebarchart_date')\n", (1029, 1139), False, 'from django.conf.urls import url\n'), ((1142, 1214), 'django.conf.urls.url', 'url', (['"""^scatterchart/"""', 'views.demo_scatterchart'], {'name': '"""demo_scatterchart"""'}), "('^scatterchart/', views.demo_scatterchart, name='demo_scatterchart')\n", (1145, 1214), False, 'from django.conf.urls import url\n'), ((1221, 1319), 'django.conf.urls.url', 'url', (['"""^linechart_with_ampm/"""', 'views.demo_linechart_with_ampm'], {'name': '"""demo_linechart_with_ampm"""'}), "('^linechart_with_ampm/', views.demo_linechart_with_ampm, name=\n 'demo_linechart_with_ampm')\n", (1224, 1319), False, 'from django.conf.urls import url\n')] |
# -*- coding:utf-8 -*-
import tensorflow as tf
class CNN:
def __init__(self, save_or_load_path=None, trainable=True, learning_rate = 0.00002,timestep=9,road=189,predstep=1):
self.trainable = trainable
self.learning_rate = learning_rate
self.road = road
self.input_size = timestep * road
self.output_size = predstep * road
self.bottom = tf.placeholder(tf.float32, shape=[None, self.input_size], name='input') # 25*2*6
self.target = tf.placeholder(tf.float32, shape=[None, self.output_size], name='target')
self.timestep = timestep
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return initial
def conv2d(self,x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def conv1d(self,x, W):
return tf.nn.conv1d(x, W, stride=2, padding='SAME')
def max_pool_2x2(self,x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def build_CNN(self, ):
# conv first
bottom = tf.reshape(self.bottom, [-1, self.road, self.timestep, 1])
W_conv1 = self.weight_variable([3, 3, 1, 64])
b_conv1 = self.bias_variable([64])
h_conv1 = tf.nn.elu(self.conv2d(bottom, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_flat3 = tf.reshape(h_pool1, [-1, 95 * 5 * 64])
W_fc2 = self.weight_variable([95 * 5 * 64, 1200])
b_fc2 = self.bias_variable([1200])
h = tf.nn.elu(tf.matmul(h_flat3, W_fc2) + b_fc2)
# h_flat3 = tf.reshape(h_pool3, [-1, 400])
W_fc2 = self.weight_variable([1200, self.output_size])
b_fc2 = self.bias_variable([self.output_size])
self.predict = tf.nn.elu(tf.matmul(h, W_fc2) + b_fc2)
global_step = tf.Variable(0, trainable=False)
self.learning_rate = 0.0002 #tf.train.exponential_decay(0.001, global_step, 500, 0.9,staircase=True)
self.loss = tf.reduce_mean(tf.squared_difference(self.target, self.predict))
self.accuracy = 1. - tf.reduce_mean(abs(self.target-self.predict)/self.target)
self.trainop = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=global_step)
# self.trainop = tf.train.RMSPropOptimizer(self.learning_rate, 0.99, 0.0, 1e-6).minimize(self.loss)
return self.predict
class CNN15:
def __init__(self, save_or_load_path=None, trainable=True, learning_rate = 0.00002,timestep=9,road=189,predstep=3):
self.trainable = trainable
self.learning_rate = learning_rate
self.road = road
self.input_size = timestep * road
self.output_size = predstep * road
self.bottom = tf.placeholder(tf.float32, shape=[None, self.input_size], name='input') # 25*2*6
self.target = tf.placeholder(tf.float32, shape=[None, self.output_size], name='target')
self.timestep = timestep
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return initial
def conv2d(self,x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def conv1d(self,x, W):
return tf.nn.conv1d(x, W, stride=2, padding='SAME')
def max_pool_2x2(self,x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def build_CNN(self, ):
# conv first
bottom = tf.reshape(self.bottom, [-1, self.road, self.timestep, 1])
W_conv1 = self.weight_variable([3, 3, 1, 64])
b_conv1 = self.bias_variable([64])
h_conv1 = tf.nn.elu(self.conv2d(bottom, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_flat3 = tf.reshape(h_pool1, [-1, 95 * 5 * 64])
W_fc2 = self.weight_variable([95 * 5 * 64, 1200])
b_fc2 = self.bias_variable([1200])
h = tf.nn.elu(tf.matmul(h_flat3, W_fc2) + b_fc2)
# h_flat3 = tf.reshape(h_pool3, [-1, 400])
W_fc2 = self.weight_variable([1200, self.output_size])
b_fc2 = self.bias_variable([self.output_size])
self.predict = tf.nn.elu(tf.matmul(h, W_fc2) + b_fc2)
global_step = tf.Variable(0, trainable=False)
self.learning_rate = 0.0002 #tf.train.exponential_decay(0.001, global_step, 500, 0.9,staircase=True)
self.loss = tf.reduce_mean(tf.squared_difference(self.target, self.predict))
self.accuracy = 1. - tf.reduce_mean(abs(self.target-self.predict)/self.target)
self.trainop = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=global_step)
# self.trainop = tf.train.RMSPropOptimizer(self.learning_rate, 0.99, 0.0, 1e-6).minimize(self.loss)
return self.predict
class CNN30:
def __init__(self, save_or_load_path=None, trainable=True, learning_rate=0.00002,timestep=9,road=189,predstep=6):
self.trainable = trainable
self.learning_rate = learning_rate
self.road = road
self.input_size = timestep * road
self.output_size = predstep * road
self.bottom = tf.placeholder(tf.float32, shape=[None, self.input_size], name='input') # 25*2*6
self.target = tf.placeholder(tf.float32, shape=[None, self.output_size], name='target')
self.timestep = timestep
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return initial
def conv2d(self,x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def conv1d(self,x, W):
return tf.nn.conv1d(x, W, stride=2, padding='SAME')
def max_pool_2x2(self,x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def build_CNN(self, ):
# conv first
bottom = tf.reshape(self.bottom, [-1, self.road, self.timestep, 1])
W_conv1 = self.weight_variable([3, 3, 1, 64])
b_conv1 = self.bias_variable([64])
h_conv1 = tf.nn.elu(self.conv2d(bottom, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_flat3 = tf.reshape(h_pool1, [-1, 95 * 5 * 64])
W_fc2 = self.weight_variable([95 * 5 * 64, 1200])
b_fc2 = self.bias_variable([1200])
h = tf.nn.elu(tf.matmul(h_flat3, W_fc2) + b_fc2)
# h_flat3 = tf.reshape(h_pool3, [-1, 400])
W_fc2 = self.weight_variable([1200, self.output_size])
b_fc2 = self.bias_variable([self.output_size])
self.predict = tf.nn.elu(tf.matmul(h, W_fc2) + b_fc2)
global_step = tf.Variable(0, trainable=False)
self.learning_rate = 0.0002 # tf.train.exponential_decay(0.001, global_step, 500, 0.9,staircase=True)
self.loss = tf.reduce_mean(tf.squared_difference(self.target, self.predict))
self.accuracy = 1. - tf.reduce_mean(abs(self.target - self.predict) / self.target)
self.trainop = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=global_step)
# self.trainop = tf.train.RMSPropOptimizer(self.learning_rate, 0.99, 0.0, 1e-6).minimize(self.loss)
return self.predict | [
"tensorflow.nn.conv2d",
"tensorflow.nn.max_pool",
"tensorflow.Variable",
"tensorflow.squared_difference",
"tensorflow.placeholder",
"tensorflow.nn.conv1d",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.train.AdamOptimizer",
"tensorflow.truncated_normal"
]
| [((391, 462), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.input_size]', 'name': '"""input"""'}), "(tf.float32, shape=[None, self.input_size], name='input')\n", (405, 462), True, 'import tensorflow as tf\n'), ((495, 568), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.output_size]', 'name': '"""target"""'}), "(tf.float32, shape=[None, self.output_size], name='target')\n", (509, 568), True, 'import tensorflow as tf\n'), ((658, 696), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (677, 696), True, 'import tensorflow as tf\n'), ((712, 732), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (723, 732), True, 'import tensorflow as tf\n'), ((787, 825), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (806, 825), True, 'import tensorflow as tf\n'), ((892, 948), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (904, 948), True, 'import tensorflow as tf\n'), ((992, 1036), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['x', 'W'], {'stride': '(2)', 'padding': '"""SAME"""'}), "(x, W, stride=2, padding='SAME')\n", (1004, 1036), True, 'import tensorflow as tf\n'), ((1083, 1158), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (1097, 1158), True, 'import tensorflow as tf\n'), ((1226, 1284), 'tensorflow.reshape', 'tf.reshape', (['self.bottom', '[-1, self.road, self.timestep, 1]'], {}), '(self.bottom, [-1, self.road, self.timestep, 1])\n', (1236, 1284), True, 'import tensorflow as tf\n'), ((1514, 1552), 'tensorflow.reshape', 'tf.reshape', (['h_pool1', '[-1, 95 * 5 * 64]'], {}), '(h_pool1, [-1, 95 * 5 * 64])\n', (1524, 1552), True, 'import tensorflow as tf\n'), ((1967, 1998), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (1978, 1998), True, 'import tensorflow as tf\n'), ((2875, 2946), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.input_size]', 'name': '"""input"""'}), "(tf.float32, shape=[None, self.input_size], name='input')\n", (2889, 2946), True, 'import tensorflow as tf\n'), ((2979, 3052), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.output_size]', 'name': '"""target"""'}), "(tf.float32, shape=[None, self.output_size], name='target')\n", (2993, 3052), True, 'import tensorflow as tf\n'), ((3142, 3180), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (3161, 3180), True, 'import tensorflow as tf\n'), ((3196, 3216), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (3207, 3216), True, 'import tensorflow as tf\n'), ((3271, 3309), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (3290, 3309), True, 'import tensorflow as tf\n'), ((3376, 3432), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (3388, 3432), True, 'import tensorflow as tf\n'), ((3476, 3520), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['x', 'W'], {'stride': '(2)', 'padding': '"""SAME"""'}), "(x, W, stride=2, padding='SAME')\n", (3488, 3520), True, 'import tensorflow as tf\n'), ((3567, 3642), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (3581, 3642), True, 'import tensorflow as tf\n'), ((3710, 3768), 'tensorflow.reshape', 'tf.reshape', (['self.bottom', '[-1, self.road, self.timestep, 1]'], {}), '(self.bottom, [-1, self.road, self.timestep, 1])\n', (3720, 3768), True, 'import tensorflow as tf\n'), ((3998, 4036), 'tensorflow.reshape', 'tf.reshape', (['h_pool1', '[-1, 95 * 5 * 64]'], {}), '(h_pool1, [-1, 95 * 5 * 64])\n', (4008, 4036), True, 'import tensorflow as tf\n'), ((4451, 4482), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (4462, 4482), True, 'import tensorflow as tf\n'), ((5358, 5429), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.input_size]', 'name': '"""input"""'}), "(tf.float32, shape=[None, self.input_size], name='input')\n", (5372, 5429), True, 'import tensorflow as tf\n'), ((5462, 5535), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.output_size]', 'name': '"""target"""'}), "(tf.float32, shape=[None, self.output_size], name='target')\n", (5476, 5535), True, 'import tensorflow as tf\n'), ((5625, 5663), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (5644, 5663), True, 'import tensorflow as tf\n'), ((5679, 5699), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (5690, 5699), True, 'import tensorflow as tf\n'), ((5754, 5792), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (5773, 5792), True, 'import tensorflow as tf\n'), ((5859, 5915), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (5871, 5915), True, 'import tensorflow as tf\n'), ((5959, 6003), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['x', 'W'], {'stride': '(2)', 'padding': '"""SAME"""'}), "(x, W, stride=2, padding='SAME')\n", (5971, 6003), True, 'import tensorflow as tf\n'), ((6050, 6125), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (6064, 6125), True, 'import tensorflow as tf\n'), ((6193, 6251), 'tensorflow.reshape', 'tf.reshape', (['self.bottom', '[-1, self.road, self.timestep, 1]'], {}), '(self.bottom, [-1, self.road, self.timestep, 1])\n', (6203, 6251), True, 'import tensorflow as tf\n'), ((6481, 6519), 'tensorflow.reshape', 'tf.reshape', (['h_pool1', '[-1, 95 * 5 * 64]'], {}), '(h_pool1, [-1, 95 * 5 * 64])\n', (6491, 6519), True, 'import tensorflow as tf\n'), ((6932, 6963), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (6943, 6963), True, 'import tensorflow as tf\n'), ((2145, 2193), 'tensorflow.squared_difference', 'tf.squared_difference', (['self.target', 'self.predict'], {}), '(self.target, self.predict)\n', (2166, 2193), True, 'import tensorflow as tf\n'), ((4629, 4677), 'tensorflow.squared_difference', 'tf.squared_difference', (['self.target', 'self.predict'], {}), '(self.target, self.predict)\n', (4650, 4677), True, 'import tensorflow as tf\n'), ((7112, 7160), 'tensorflow.squared_difference', 'tf.squared_difference', (['self.target', 'self.predict'], {}), '(self.target, self.predict)\n', (7133, 7160), True, 'import tensorflow as tf\n'), ((1677, 1702), 'tensorflow.matmul', 'tf.matmul', (['h_flat3', 'W_fc2'], {}), '(h_flat3, W_fc2)\n', (1686, 1702), True, 'import tensorflow as tf\n'), ((1914, 1933), 'tensorflow.matmul', 'tf.matmul', (['h', 'W_fc2'], {}), '(h, W_fc2)\n', (1923, 1933), True, 'import tensorflow as tf\n'), ((2305, 2347), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (2327, 2347), True, 'import tensorflow as tf\n'), ((4161, 4186), 'tensorflow.matmul', 'tf.matmul', (['h_flat3', 'W_fc2'], {}), '(h_flat3, W_fc2)\n', (4170, 4186), True, 'import tensorflow as tf\n'), ((4398, 4417), 'tensorflow.matmul', 'tf.matmul', (['h', 'W_fc2'], {}), '(h, W_fc2)\n', (4407, 4417), True, 'import tensorflow as tf\n'), ((4789, 4831), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (4811, 4831), True, 'import tensorflow as tf\n'), ((6643, 6668), 'tensorflow.matmul', 'tf.matmul', (['h_flat3', 'W_fc2'], {}), '(h_flat3, W_fc2)\n', (6652, 6668), True, 'import tensorflow as tf\n'), ((6880, 6899), 'tensorflow.matmul', 'tf.matmul', (['h', 'W_fc2'], {}), '(h, W_fc2)\n', (6889, 6899), True, 'import tensorflow as tf\n'), ((7276, 7318), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (7298, 7318), True, 'import tensorflow as tf\n')] |
import cv2
import mediapipe as mp
from time import sleep
import numpy as np
import autopy
import pynput
wCam, hCam = 1280, 720
wScr, hScr = autopy.screen.size()
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
mouse = pynput.mouse.Controller()
def findNodeDistance(imgHeight, imgWidth, landmarks, index1, index2):
x1 = int(landmarks[index1].x*imgWidth)
y1 = int(landmarks[index1].y*imgHeight)
z1 = int(landmarks[index1].z*imgWidth)
x2 = int(landmarks[index2].x*imgWidth)
y2 = int(landmarks[index2].y*imgHeight)
z2 = int(landmarks[index2].z*imgWidth)
dis = ((x1-x2)**2.0+(y1-y2)**2.0)**0.5
z_dis = abs(z1-z2)
return dis, z_dis
with mp_hands.Hands(
min_detection_confidence=0.8,
min_tracking_confidence=0.5) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS)
#cx, cy = int(hand_landmarks.landmark[8].x*wCam), int(hand_landmarks.landmark[8].y*hCam)
targetX, targetY = int(hand_landmarks.landmark[8].x*wScr), int(hand_landmarks.landmark[8].y*hScr)
mouse.position = (targetX, targetY)
xy_dis_8_12, z_dis_8_12 = findNodeDistance(hCam, wCam, hand_landmarks.landmark, 8, 12)
xy_dis_12_16, z_dis_12_16 = findNodeDistance(hCam, wCam, hand_landmarks.landmark, 12, 16)
if xy_dis_8_12 < 40 and z_dis_8_12 < 20:
mouse.click(pynput.mouse.Button.left)
sleep(0.3)
if xy_dis_12_16 < 40 and z_dis_12_16 < 20:
mouse.click(pynput.mouse.Button.left, 2)
sleep(0.3)
cv2.imshow('MediaPipe Hands', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release() | [
"autopy.screen.size",
"cv2.flip",
"time.sleep",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.cvtColor",
"pynput.mouse.Controller",
"cv2.waitKey"
]
| [((141, 161), 'autopy.screen.size', 'autopy.screen.size', ([], {}), '()\n', (159, 161), False, 'import autopy\n'), ((169, 188), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (185, 188), False, 'import cv2\n'), ((303, 328), 'pynput.mouse.Controller', 'pynput.mouse.Controller', ([], {}), '()\n', (326, 328), False, 'import pynput\n'), ((1556, 1594), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (1568, 1594), False, 'import cv2\n'), ((2647, 2683), 'cv2.imshow', 'cv2.imshow', (['"""MediaPipe Hands"""', 'image'], {}), "('MediaPipe Hands', image)\n", (2657, 2683), False, 'import cv2\n'), ((1223, 1241), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (1231, 1241), False, 'import cv2\n'), ((2695, 2709), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (2706, 2709), False, 'import cv2\n'), ((2476, 2486), 'time.sleep', 'sleep', (['(0.3)'], {}), '(0.3)\n', (2481, 2486), False, 'from time import sleep\n'), ((2627, 2637), 'time.sleep', 'sleep', (['(0.3)'], {}), '(0.3)\n', (2632, 2637), False, 'from time import sleep\n')] |
import threading
from queue import Queue
from multiprocessing.pool import ApplyResult
import tabular_logger as tlogger
class AsyncWorker(object):
@property
def concurrent_tasks(self):
raise NotImplementedError()
def run_async(self, task_id, task, callback):
raise NotImplementedError()
class WorkerHub(object):
def __init__(self, workers, input_queue, done_queue):
self.done_buffer = Queue()
self.workers = workers
self.available_workers = Queue()
self.done_queue = done_queue
self._cache = {}
self.input_queue = input_queue
for w in workers:
for t in w.concurrent_tasks:
self.available_workers.put((w, t))
self.__initialize_handlers()
def __initialize_handlers(self):
self._input_handler = threading.Thread(
target=WorkerHub._handle_input,
args=(self,)
)
self._input_handler._state = 0
tlogger.info('WorkerHub: _input_handler initialized')
self._output_handler = threading.Thread(
target=WorkerHub._handle_output,
args=(self,)
)
self._output_handler._state = 0
tlogger.info('WorkerHub: _output_handler initialized')
def worker_callback(self, worker, subworker, result):
worker_task = (worker, subworker)
if worker_task in self._cache:
task_id = self._cache[worker_task]
del self._cache[worker_task]
self.done_buffer.put((task_id, result))
else:
tlogger.warn('WorkerHub: Worker task not found in cache', worker_task)
tlogger.warn('WorkerHub: Subworker', subworker)
tlogger.warn('WorkerHub: Unable to process result', result)
# Return worker back
self.available_workers.put(worker_task)
@staticmethod
def _handle_input(self):
try:
while True:
worker_task = self.available_workers.get()
if worker_task is None:
tlogger.info('WorkerHub._handle_input NO MORE WORKERS AWAILABLE')
break
worker, subworker = worker_task
task = self.input_queue.get()
if task is None:
tlogger.info('WorkerHub._handle_input NO MORE INPUTS AWAILABLE')
break
task_id, task = task
self._cache[worker_task] = task_id
# tlogger.info('WorkerHub: put task id: %s in cache keyed by worker task: %s' % (task_id, worker_task))
worker.run_async(subworker, task, callback=self.worker_callback)
except:
tlogger.exception('WorkerHub._handle_input exception thrown')
raise
@staticmethod
def _handle_output(self):
try:
while True:
result = self.done_buffer.get()
if result is None:
tlogger.info('WorkerHub._handle_output done')
break
self.done_queue.put(result)
except:
tlogger.exception('WorkerHub._handle_output exception thrown')
raise
def initialize(self):
self._input_handler.start()
self._output_handler.start()
def close(self):
self.available_workers.put(None)
self.input_queue.put(None)
self.done_buffer.put(None)
class AsyncTaskHub(object):
def __init__(self, input_queue=None, results_queue=None):
if input_queue is None:
input_queue = Queue(64)
self.input_queue = input_queue
self._cache = {}
self.results_queue = None
if results_queue is not None:
self.results_queue = results_queue
self._output_handler = threading.Thread(
target=AsyncTaskHub._handle_output,
args=(self,)
)
self._output_handler.daemon = True
self._output_handler._state = 0
self._output_handler.start()
@staticmethod
def _handle_output(self):
try:
while True:
result = self.results_queue.get()
if result is None:
tlogger.info('AsyncTaskHub._handle_output done')
break
self.put(result)
except:
tlogger.exception('AsyncTaskHub._handle_output exception thrown')
raise
def run_async(self, task, callback=None, error_callback=None):
result = ApplyResult(self._cache, callback, error_callback)
self.input_queue.put((result._job, task))
return result
def put(self, result):
job, result=result
self._cache[job]._set(0, (True, result))
| [
"tabular_logger.info",
"multiprocessing.pool.ApplyResult",
"tabular_logger.exception",
"tabular_logger.warn",
"threading.Thread",
"queue.Queue"
]
| [((430, 437), 'queue.Queue', 'Queue', ([], {}), '()\n', (435, 437), False, 'from queue import Queue\n'), ((502, 509), 'queue.Queue', 'Queue', ([], {}), '()\n', (507, 509), False, 'from queue import Queue\n'), ((836, 898), 'threading.Thread', 'threading.Thread', ([], {'target': 'WorkerHub._handle_input', 'args': '(self,)'}), '(target=WorkerHub._handle_input, args=(self,))\n', (852, 898), False, 'import threading\n'), ((984, 1037), 'tabular_logger.info', 'tlogger.info', (['"""WorkerHub: _input_handler initialized"""'], {}), "('WorkerHub: _input_handler initialized')\n", (996, 1037), True, 'import tabular_logger as tlogger\n'), ((1070, 1133), 'threading.Thread', 'threading.Thread', ([], {'target': 'WorkerHub._handle_output', 'args': '(self,)'}), '(target=WorkerHub._handle_output, args=(self,))\n', (1086, 1133), False, 'import threading\n'), ((1220, 1274), 'tabular_logger.info', 'tlogger.info', (['"""WorkerHub: _output_handler initialized"""'], {}), "('WorkerHub: _output_handler initialized')\n", (1232, 1274), True, 'import tabular_logger as tlogger\n'), ((4565, 4615), 'multiprocessing.pool.ApplyResult', 'ApplyResult', (['self._cache', 'callback', 'error_callback'], {}), '(self._cache, callback, error_callback)\n', (4576, 4615), False, 'from multiprocessing.pool import ApplyResult\n'), ((1581, 1651), 'tabular_logger.warn', 'tlogger.warn', (['"""WorkerHub: Worker task not found in cache"""', 'worker_task'], {}), "('WorkerHub: Worker task not found in cache', worker_task)\n", (1593, 1651), True, 'import tabular_logger as tlogger\n'), ((1664, 1711), 'tabular_logger.warn', 'tlogger.warn', (['"""WorkerHub: Subworker"""', 'subworker'], {}), "('WorkerHub: Subworker', subworker)\n", (1676, 1711), True, 'import tabular_logger as tlogger\n'), ((1724, 1783), 'tabular_logger.warn', 'tlogger.warn', (['"""WorkerHub: Unable to process result"""', 'result'], {}), "('WorkerHub: Unable to process result', result)\n", (1736, 1783), True, 'import tabular_logger as tlogger\n'), ((3591, 3600), 'queue.Queue', 'Queue', (['(64)'], {}), '(64)\n', (3596, 3600), False, 'from queue import Queue\n'), ((3820, 3886), 'threading.Thread', 'threading.Thread', ([], {'target': 'AsyncTaskHub._handle_output', 'args': '(self,)'}), '(target=AsyncTaskHub._handle_output, args=(self,))\n', (3836, 3886), False, 'import threading\n'), ((2715, 2776), 'tabular_logger.exception', 'tlogger.exception', (['"""WorkerHub._handle_input exception thrown"""'], {}), "('WorkerHub._handle_input exception thrown')\n", (2732, 2776), True, 'import tabular_logger as tlogger\n'), ((3128, 3190), 'tabular_logger.exception', 'tlogger.exception', (['"""WorkerHub._handle_output exception thrown"""'], {}), "('WorkerHub._handle_output exception thrown')\n", (3145, 3190), True, 'import tabular_logger as tlogger\n'), ((4396, 4461), 'tabular_logger.exception', 'tlogger.exception', (['"""AsyncTaskHub._handle_output exception thrown"""'], {}), "('AsyncTaskHub._handle_output exception thrown')\n", (4413, 4461), True, 'import tabular_logger as tlogger\n'), ((2066, 2131), 'tabular_logger.info', 'tlogger.info', (['"""WorkerHub._handle_input NO MORE WORKERS AWAILABLE"""'], {}), "('WorkerHub._handle_input NO MORE WORKERS AWAILABLE')\n", (2078, 2131), True, 'import tabular_logger as tlogger\n'), ((2306, 2370), 'tabular_logger.info', 'tlogger.info', (['"""WorkerHub._handle_input NO MORE INPUTS AWAILABLE"""'], {}), "('WorkerHub._handle_input NO MORE INPUTS AWAILABLE')\n", (2318, 2370), True, 'import tabular_logger as tlogger\n'), ((2984, 3029), 'tabular_logger.info', 'tlogger.info', (['"""WorkerHub._handle_output done"""'], {}), "('WorkerHub._handle_output done')\n", (2996, 3029), True, 'import tabular_logger as tlogger\n'), ((4260, 4308), 'tabular_logger.info', 'tlogger.info', (['"""AsyncTaskHub._handle_output done"""'], {}), "('AsyncTaskHub._handle_output done')\n", (4272, 4308), True, 'import tabular_logger as tlogger\n')] |
"""Django Admin Panels for App"""
from django.contrib import admin
from mailer import models
@admin.register(models.SendingAddress)
class SendingAddressAdmin(admin.ModelAdmin):
"""Admin View for SendingAddress"""
list_display = ('address', 'organization')
list_filter = ('organization__name',)
actions = None
def has_delete_permission(self, request, obj=None):
"""The primary address can not be deleted via the django admin"""
if obj and obj.pk == 1:
return False
else:
return True
| [
"django.contrib.admin.register"
]
| [((96, 133), 'django.contrib.admin.register', 'admin.register', (['models.SendingAddress'], {}), '(models.SendingAddress)\n', (110, 133), False, 'from django.contrib import admin\n')] |
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import os
import time
import csv
from webdriver_manager.chrome import ChromeDriverManager
import math
from basic_function import basic_login, find_element
class TestConduit(object):
def setup(self):
browser_options = Options()
browser_options.headless = True
self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=browser_options)
self.driver.get("http://localhost:1667/#/")
def teardown(self):
self.driver.quit()
# -------- A028, TC-0037 Cookie kezelési tájékoztató --------
def test_cookie_process(self):
assert self.driver.find_element_by_id("cookie-policy-panel").is_displayed()
# Cookie-k elfogadása folyamat
self.driver.find_element_by_xpath(
"//button[@class='cookie__bar__buttons__button cookie__bar__buttons__button--accept']").click()
time.sleep(2)
# # Cookie-k elutasítása folyamat
# self.driver.find_element_by_xpath(
# "//button[@class='cookie__bar__buttons__button cookie__bar__buttons__button--decline']").click()
#
# time.sleep(2)
try:
self.driver.find_element_by_id("cookie-policy-panel")
time.sleep(2)
except NoSuchElementException:
return True
return False
# -------- A002, TC-0002 Regisztráció helyes adatokkal --------
def test_registration_process(self):
user_input_data = ["user200", "<EMAIL>", "<PASSWORD>"]
self.driver.find_element_by_xpath("//a[@href='#/register']").click()
# Beviteli mezők feltöltése a random user adatokkal
for i in range(len(user_input_data)):
self.driver.find_element_by_xpath(f"//fieldset[{i + 1}]/input").send_keys(user_input_data[i])
self.driver.find_element_by_tag_name("button").click()
time.sleep(2)
# Sikeres regisztrációs értesítési ablak szövegének ellenőrzése
swal_text = find_element(self.driver, By.CLASS_NAME, "swal-text")
assert swal_text.text == "Your registration was successful!"
# time.sleep(2)
# Értesítési ablak bezárása
close_btn = find_element(self.driver, By.XPATH, "//button[normalize-space()='OK']")
close_btn.click()
# self.driver.find_element_by_xpath("//button[normalize-space()='OK']").click()
time.sleep(1)
# Bejelentkezés tényének ellenőrzése
username_check = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").text
assert username_check == user_input_data[
0], f"Test Failed: Username did not match expected ({user_input_data[0]})."
# time.sleep(2)
# -------- A004, TC-0010 Bejelentkezés helyes adatokkal --------
def test_login_process(self):
user_input_data = ["user200", "<EMAIL>", "<PASSWORD>"]
self.driver.find_element_by_xpath("//a[@href='#/login']").click()
# Bejelentkezési űrlap feltöltése
for i in range(len(user_input_data) - 1):
self.driver.find_element_by_xpath(f"//fieldset[{i + 1}]/input").send_keys(user_input_data[i + 1])
time.sleep(1)
self.driver.find_element_by_tag_name("button").click()
time.sleep(3)
# Bejelentkezés tényének ellenőrzése
username_check = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").text
assert username_check == user_input_data[0], f"Test Failed: User is not logged in ({user_input_data[0]})."
time.sleep(2)
# -------- A010, TC-0034 Saját profil szerkesztése, képcsere --------
def test_edit_settings_process(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[@href='#/settings']").click()
time.sleep(2)
# Your Settings oldal megjelenésének ellenőrzése
settings_check = self.driver.find_element_by_tag_name("h1").text
assert settings_check == "Your Settings", f"Test Failed: Page names did not match expected ({settings_check})."
time.sleep(3)
# Beolvassuk az előkészített adatokat
with open('edit_user.csv') as article_file:
csv_reader = csv.reader(article_file, delimiter=';')
for row in csv_reader:
user_update = row
time.sleep(2)
# Feltöltjük az adatokkal a beviteli űrlap egyes sorait
user_picture = self.driver.find_element_by_class_name("form-control")
user_bio = self.driver.find_element_by_xpath("//textarea[@placeholder='Short bio about you']")
user_picture.clear()
user_picture.send_keys(user_update[0])
user_bio.clear()
user_bio.send_keys(user_update[1])
time.sleep(1)
self.driver.find_element_by_xpath("//button[normalize-space()='Update Settings']").click()
time.sleep(2)
# Sikeres update értesítési ablak szövegének ellenőrzése
assert self.driver.find_element_by_class_name("swal-title").text == "Update successful!"
time.sleep(2)
# Értesítési ablak bezárása
self.driver.find_element_by_xpath("//button[normalize-space()='OK']").click()
time.sleep(1)
# Ellenőrizzük a felhasználó profiljában történt változásokat
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").click()
time.sleep(2)
img_check = self.driver.find_element_by_class_name("user-img").get_attribute("src")
assert img_check == user_update[
0], f"Test Failed: Image did not match expected ({user_update[0]})."
bio_check = self.driver.find_element_by_css_selector("div[class='user-info'] p").text
assert bio_check == user_update[
1], f"Test Failed: User's bio did not match expected ({user_update[1]})."
time.sleep(2)
# -------- A005, TC-0003 Kijelentkezés --------
def test_logout_process(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//i[@class='ion-android-exit']").click()
time.sleep(2)
# Kijelentkezés tényének ellenőrzése
sign_out_check = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/login')]").text
assert sign_out_check == f"{sign_out_check}", f"Test Failed: User is logged in."
time.sleep(1)
# -------- A006, TC-0015 Új poszt létrehozása helyes adatokkal --------
def test_create_post(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[@href='#/editor']").click()
with open('new_post_content.csv') as article_file:
csv_reader = csv.reader(article_file, delimiter=';')
for row in csv_reader:
new_article_data = row
time.sleep(2)
# Beviteli űrlap feltöltése
self.driver.find_element_by_xpath("//input[starts-with(@placeholder,'Article')]").send_keys(new_article_data[0])
self.driver.find_element_by_xpath("//input[starts-with(@placeholder,'What')]").send_keys(new_article_data[1])
self.driver.find_element_by_xpath("//textarea[starts-with(@placeholder,'Write')]").send_keys(
new_article_data[2])
self.driver.find_element_by_xpath("//input[@placeholder='Enter tags']").send_keys(new_article_data[3])
time.sleep(1)
self.driver.find_element_by_css_selector("button[type='submit']").click()
time.sleep(2)
# Bejegyzés létrejöttének ellenőrzése
title_check = self.driver.find_element_by_tag_name("h1").text
assert title_check == new_article_data[
0], f"Test Failed: Content title did not match expected ({new_article_data[0]})."
time.sleep(2)
# -------- A006, TC-0015 Új adatbevitel helyes adatokkal (sorozatos) --------
def test_create_posts_process(self):
basic_login(self.driver)
for i in range(1):
with open('contents.csv') as article_file:
csv_reader = csv.reader(article_file, delimiter=';')
for row in csv_reader:
new_article_data = row
# Beviteli űrlap feltöltése
self.driver.find_element_by_xpath("//a[@href='#/editor']").click()
time.sleep(4)
self.driver.find_element_by_xpath("//input[@placeholder='Article Title']").send_keys(
new_article_data[0])
self.driver.find_element_by_xpath("//input[starts-with(@placeholder,'What')]").send_keys(
new_article_data[1])
self.driver.find_element_by_xpath("//textarea[starts-with(@placeholder,'Write')]").send_keys(
new_article_data[2])
self.driver.find_element_by_xpath("//input[@placeholder='Enter tags']").send_keys(
new_article_data[3])
time.sleep(1)
self.driver.find_element_by_css_selector("button[type='submit']").click()
time.sleep(2)
# Bejegyzés létrejöttének ellenőrzése
title_check = self.driver.find_element_by_tag_name("h1").text
assert title_check == new_article_data[
0], f"Test Failed: Content title did not match expected ({new_article_data[0]})."
time.sleep(4)
# -------- A015, TC-0024 Saját poszt törlése --------
def test_delete_post_process(self):
basic_login(self.driver)
my_articles = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]")
my_articles.click()
time.sleep(2)
articles_list = self.driver.find_elements_by_tag_name("h1")
if len(articles_list) > 0:
articles_list[0].click()
time.sleep(3)
self.driver.find_element_by_xpath("//*[@id='app']/div/div[1]/div/div/span/button/span").click()
time.sleep(2)
# Ellenőrizzük, hogy valóban törlődött-e a bejegyzés
my_articles.click()
time.sleep(2)
new_articles_list = self.driver.find_elements_by_tag_name("h1")
assert not new_articles_list[0] == articles_list[
0], f"Test Failed: Content is not deleted ({articles_list[0]})."
# -------- A029 Adatok lementése felületről --------
def test_export_my_last_post(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").click()
time.sleep(2)
articles_list = self.driver.find_elements_by_tag_name("h1")
if os.path.exists("my_last_article.txt"):
os.remove("my_last_article.txt")
else:
pass
articles_list[0].click()
time.sleep(2)
article_title = self.driver.find_element_by_tag_name("h1").text
article_text = self.driver.find_element_by_tag_name("p").text
with open("my_last_article.txt", "a") as my_txt:
my_txt.write(f"{article_title};{article_text};\n")
time.sleep(3)
# a kiírt tartalom ellenőrzése
with open("my_last_article.txt", "r") as my_txt2:
my_txt = my_txt2.readline()
my_txt_list = my_txt.split(";")
assert my_txt_list[0] == article_title, f"Test Failed: Content title is not exported."
assert my_txt_list[1] == article_text, f"Test Failed: Content text is not exported."
# -------- A007, TC-0025 Bejegyzések listájának megtekintése --------
def test_global_feed_list(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/')]").click()
time.sleep(2)
articles_list = self.driver.find_elements_by_xpath("//div[@class='article-preview']/a/h1")
if os.path.exists("titles_list.csv"):
os.remove("titles_list.csv")
else:
pass
for i in range(len(articles_list)):
article_title = articles_list[i].text
with open('titles_list.csv', 'a', encoding="utf-8") as csv_titles:
csv_titles.write(f"{article_title};")
# a lista hosszának ellenőrzése
with open('titles_list.csv', 'r', encoding="utf-8") as csv_titles2:
check_articles = csv.reader(csv_titles2, delimiter=';')
for row in check_articles:
check_articles_list = row
assert len(articles_list) == len(
check_articles_list) - 1, f"Test Failed: The length of the lists are not exactly the same."
# -------- A007, TC-0025 Bejegyzések listájának megtekintése (lapozóval) --------
def test_global_feed_pagination(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/')]").click()
time.sleep(2)
articles_list = self.driver.find_elements_by_xpath("//div[@class='article-preview']/a/h1")
# lapozógombok használata
pages = self.driver.find_elements_by_class_name("page-link")
for page in pages:
page.click()
time.sleep(1)
# Az oldal bejárásának ellenőrzése
assert len(pages) == int(math.ceil(
len(articles_list) / 10)), f"Test Failed: The length of the list and pagination not exactly the same."
| [
"selenium.webdriver.chrome.options.Options",
"os.path.exists",
"time.sleep",
"os.remove",
"webdriver_manager.chrome.ChromeDriverManager",
"csv.reader",
"basic_function.basic_login",
"basic_function.find_element"
]
| [((422, 431), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (429, 431), False, 'from selenium.webdriver.chrome.options import Options\n'), ((1060, 1073), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1070, 1073), False, 'import time\n'), ((2036, 2049), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2046, 2049), False, 'import time\n'), ((2143, 2196), 'basic_function.find_element', 'find_element', (['self.driver', 'By.CLASS_NAME', '"""swal-text"""'], {}), "(self.driver, By.CLASS_NAME, 'swal-text')\n", (2155, 2196), False, 'from basic_function import basic_login, find_element\n'), ((2348, 2419), 'basic_function.find_element', 'find_element', (['self.driver', 'By.XPATH', '"""//button[normalize-space()=\'OK\']"""'], {}), '(self.driver, By.XPATH, "//button[normalize-space()=\'OK\']")\n', (2360, 2419), False, 'from basic_function import basic_login, find_element\n'), ((2543, 2556), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2553, 2556), False, 'import time\n'), ((3318, 3331), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3328, 3331), False, 'import time\n'), ((3405, 3418), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3415, 3418), False, 'import time\n'), ((3687, 3700), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3697, 3700), False, 'import time\n'), ((3826, 3850), 'basic_function.basic_login', 'basic_login', (['self.driver'], {}), '(self.driver)\n', (3837, 3850), False, 'from basic_function import basic_login, find_element\n'), ((3938, 3951), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3948, 3951), False, 'import time\n'), ((4212, 4225), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4222, 4225), False, 'import time\n'), ((4468, 4481), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4478, 4481), False, 'import time\n'), ((4882, 4895), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4892, 4895), False, 'import time\n'), ((5005, 5018), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5015, 5018), False, 'import time\n'), ((5191, 5204), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5201, 5204), False, 'import time\n'), ((5337, 5350), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5347, 5350), False, 'import time\n'), ((5515, 5528), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5525, 5528), False, 'import time\n'), ((5974, 5987), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5984, 5987), False, 'import time\n'), ((6084, 6108), 'basic_function.basic_login', 'basic_login', (['self.driver'], {}), '(self.driver)\n', (6095, 6108), False, 'from basic_function import basic_login, find_element\n'), ((6203, 6216), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6213, 6216), False, 'import time\n'), ((6463, 6476), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6473, 6476), False, 'import time\n'), ((6594, 6618), 'basic_function.basic_login', 'basic_login', (['self.driver'], {}), '(self.driver)\n', (6605, 6618), False, 'from basic_function import basic_login, find_element\n'), ((6903, 6916), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6913, 6916), False, 'import time\n'), ((7448, 7461), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7458, 7461), False, 'import time\n'), ((7554, 7567), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7564, 7567), False, 'import time\n'), ((7836, 7849), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7846, 7849), False, 'import time\n'), ((7982, 8006), 'basic_function.basic_login', 'basic_login', (['self.driver'], {}), '(self.driver)\n', (7993, 8006), False, 'from basic_function import basic_login, find_element\n'), ((9638, 9662), 'basic_function.basic_login', 'basic_login', (['self.driver'], {}), '(self.driver)\n', (9649, 9662), False, 'from basic_function import basic_login, find_element\n'), ((9790, 9803), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9800, 9803), False, 'import time\n'), ((9954, 9967), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (9964, 9967), False, 'import time\n'), ((10082, 10095), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (10092, 10095), False, 'import time\n'), ((10195, 10208), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (10205, 10208), False, 'import time\n'), ((10524, 10548), 'basic_function.basic_login', 'basic_login', (['self.driver'], {}), '(self.driver)\n', (10535, 10548), False, 'from basic_function import basic_login, find_element\n'), ((10643, 10656), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (10653, 10656), False, 'import time\n'), ((10738, 10775), 'os.path.exists', 'os.path.exists', (['"""my_last_article.txt"""'], {}), "('my_last_article.txt')\n", (10752, 10775), False, 'import os\n'), ((10896, 10909), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (10906, 10909), False, 'import time\n'), ((11182, 11195), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (11192, 11195), False, 'import time\n'), ((11687, 11711), 'basic_function.basic_login', 'basic_login', (['self.driver'], {}), '(self.driver)\n', (11698, 11711), False, 'from basic_function import basic_login, find_element\n'), ((11805, 11818), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (11815, 11818), False, 'import time\n'), ((11931, 11964), 'os.path.exists', 'os.path.exists', (['"""titles_list.csv"""'], {}), "('titles_list.csv')\n", (11945, 11964), False, 'import os\n'), ((12817, 12841), 'basic_function.basic_login', 'basic_login', (['self.driver'], {}), '(self.driver)\n', (12828, 12841), False, 'from basic_function import basic_login, find_element\n'), ((12935, 12948), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (12945, 12948), False, 'import time\n'), ((1400, 1413), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1410, 1413), False, 'import time\n'), ((4350, 4389), 'csv.reader', 'csv.reader', (['article_file'], {'delimiter': '""";"""'}), "(article_file, delimiter=';')\n", (4360, 4389), False, 'import csv\n'), ((6780, 6819), 'csv.reader', 'csv.reader', (['article_file'], {'delimiter': '""";"""'}), "(article_file, delimiter=';')\n", (6790, 6819), False, 'import csv\n'), ((10789, 10821), 'os.remove', 'os.remove', (['"""my_last_article.txt"""'], {}), "('my_last_article.txt')\n", (10798, 10821), False, 'import os\n'), ((11978, 12006), 'os.remove', 'os.remove', (['"""titles_list.csv"""'], {}), "('titles_list.csv')\n", (11987, 12006), False, 'import os\n'), ((12412, 12450), 'csv.reader', 'csv.reader', (['csv_titles2'], {'delimiter': '""";"""'}), "(csv_titles2, delimiter=';')\n", (12422, 12450), False, 'import csv\n'), ((13218, 13231), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (13228, 13231), False, 'import time\n'), ((8119, 8158), 'csv.reader', 'csv.reader', (['article_file'], {'delimiter': '""";"""'}), "(article_file, delimiter=';')\n", (8129, 8158), False, 'import csv\n'), ((511, 532), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (530, 532), False, 'from webdriver_manager.chrome import ChromeDriverManager\n'), ((8397, 8410), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (8407, 8410), False, 'import time\n'), ((9045, 9058), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (9055, 9058), False, 'import time\n'), ((9175, 9188), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9185, 9188), False, 'import time\n'), ((9517, 9530), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (9527, 9530), False, 'import time\n')] |
import struct
import msgpack
from lbrynet.wallet.transaction import Transaction, Output
from torba.server.hash import hash_to_hex_str
from torba.server.block_processor import BlockProcessor
from lbrynet.schema.claim import Claim
from lbrynet.wallet.server.model import ClaimInfo
class LBRYBlockProcessor(BlockProcessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.env.coin.NET == "regtest":
self.prefetcher.polling_delay = 0.5
self.should_validate_signatures = self.env.boolean('VALIDATE_CLAIM_SIGNATURES', False)
self.logger.info("LbryumX Block Processor - Validating signatures: {}".format(self.should_validate_signatures))
def advance_blocks(self, blocks):
# save height, advance blocks as usual, then hook our claim tx processing
height = self.height + 1
super().advance_blocks(blocks)
pending_undo = []
for index, block in enumerate(blocks):
undo = self.advance_claim_txs(block.transactions, height + index)
pending_undo.append((height+index, undo,))
self.db.write_undo(pending_undo)
def advance_claim_txs(self, txs, height):
# TODO: generate claim undo info!
undo_info = []
add_undo = undo_info.append
update_inputs = set()
for etx, txid in txs:
update_inputs.clear()
tx = Transaction(etx.serialize())
for index, output in enumerate(tx.outputs):
if not output.is_claim:
continue
if output.script.is_claim_name:
add_undo(self.advance_claim_name_transaction(output, height, txid, index))
elif output.script.is_update_claim:
update_input = self.db.get_update_input(output.claim_hash, tx.inputs)
if update_input:
update_inputs.add(update_input)
add_undo(self.advance_update_claim(output, height, txid, index))
else:
info = (hash_to_hex_str(txid), output.claim_id,)
self.logger.error("REJECTED: {} updating {}".format(*info))
for txin in tx.inputs:
if txin not in update_inputs:
abandoned_claim_id = self.db.abandon_spent(txin.txo_ref.tx_ref.hash, txin.txo_ref.position)
if abandoned_claim_id:
add_undo((abandoned_claim_id, self.db.get_claim_info(abandoned_claim_id)))
return undo_info
def advance_update_claim(self, output: Output, height, txid, nout):
claim_id = output.claim_hash
claim_info = self.claim_info_from_output(output, txid, nout, height)
old_claim_info = self.db.get_claim_info(claim_id)
self.db.put_claim_id_for_outpoint(old_claim_info.txid, old_claim_info.nout, None)
if old_claim_info.cert_id:
self.db.remove_claim_from_certificate_claims(old_claim_info.cert_id, claim_id)
if claim_info.cert_id:
self.db.put_claim_id_signed_by_cert_id(claim_info.cert_id, claim_id)
self.db.put_claim_info(claim_id, claim_info)
self.db.put_claim_id_for_outpoint(txid, nout, claim_id)
return claim_id, old_claim_info
def advance_claim_name_transaction(self, output: Output, height, txid, nout):
claim_id = output.claim_hash
claim_info = self.claim_info_from_output(output, txid, nout, height)
if claim_info.cert_id:
self.db.put_claim_id_signed_by_cert_id(claim_info.cert_id, claim_id)
self.db.put_claim_info(claim_id, claim_info)
self.db.put_claim_id_for_outpoint(txid, nout, claim_id)
return claim_id, None
def backup_from_undo_info(self, claim_id, undo_claim_info):
"""
Undo information holds a claim state **before** a transaction changes it
There are 4 possibilities when processing it, of which only 3 are valid ones:
1. the claim is known and the undo info has info, it was an update
2. the claim is known and the undo info doesn't hold any info, it was claimed
3. the claim in unknown and the undo info has info, it was abandoned
4. the claim is unknown and the undo info does't hold info, error!
"""
undo_claim_info = ClaimInfo(*undo_claim_info) if undo_claim_info else None
current_claim_info = self.db.get_claim_info(claim_id)
if current_claim_info and undo_claim_info:
# update, remove current claim
self.db.remove_claim_id_for_outpoint(current_claim_info.txid, current_claim_info.nout)
if current_claim_info.cert_id:
self.db.remove_claim_from_certificate_claims(current_claim_info.cert_id, claim_id)
elif current_claim_info and not undo_claim_info:
# claim, abandon it
self.db.abandon_spent(current_claim_info.txid, current_claim_info.nout)
elif not current_claim_info and undo_claim_info:
# abandon, reclaim it (happens below)
pass
else:
# should never happen, unless the database got into an inconsistent state
raise Exception("Unexpected situation occurred on backup, this means the database is inconsistent. "
"Please report. Resetting the data folder (reindex) solves it for now.")
if undo_claim_info:
self.db.put_claim_info(claim_id, undo_claim_info)
if undo_claim_info.cert_id:
cert_id = self._checksig(undo_claim_info.value, undo_claim_info.address)
self.db.put_claim_id_signed_by_cert_id(cert_id, claim_id)
self.db.put_claim_id_for_outpoint(undo_claim_info.txid, undo_claim_info.nout, claim_id)
def backup_txs(self, txs):
self.logger.info("Reorg at height {} with {} transactions.".format(self.height, len(txs)))
undo_info = msgpack.loads(self.db.claim_undo_db.get(struct.pack(">I", self.height)), use_list=False)
for claim_id, undo_claim_info in reversed(undo_info):
self.backup_from_undo_info(claim_id, undo_claim_info)
return super().backup_txs(txs)
def backup_blocks(self, raw_blocks):
self.db.batched_flush_claims()
super().backup_blocks(raw_blocks=raw_blocks)
self.db.batched_flush_claims()
async def flush(self, flush_utxos):
self.db.batched_flush_claims()
return await super().flush(flush_utxos)
def claim_info_from_output(self, output: Output, txid, nout, height):
address = self.coin.address_from_script(output.script.source)
name, value, cert_id = output.script.values['claim_name'], output.script.values['claim'], None
assert txid and address
cert_id = self._checksig(value, address)
return ClaimInfo(name, value, txid, nout, output.amount, address, height, cert_id)
def _checksig(self, value, address):
try:
claim_dict = Claim.from_bytes(value)
cert_id = claim_dict.signing_channel_hash
if not self.should_validate_signatures:
return cert_id
if cert_id:
cert_claim = self.db.get_claim_info(cert_id)
if cert_claim:
certificate = Claim.from_bytes(cert_claim.value)
claim_dict.validate_signature(address, certificate)
return cert_id
except Exception:
pass
| [
"lbrynet.schema.claim.Claim.from_bytes",
"lbrynet.wallet.server.model.ClaimInfo",
"struct.pack",
"torba.server.hash.hash_to_hex_str"
]
| [((6877, 6952), 'lbrynet.wallet.server.model.ClaimInfo', 'ClaimInfo', (['name', 'value', 'txid', 'nout', 'output.amount', 'address', 'height', 'cert_id'], {}), '(name, value, txid, nout, output.amount, address, height, cert_id)\n', (6886, 6952), False, 'from lbrynet.wallet.server.model import ClaimInfo\n'), ((4367, 4394), 'lbrynet.wallet.server.model.ClaimInfo', 'ClaimInfo', (['*undo_claim_info'], {}), '(*undo_claim_info)\n', (4376, 4394), False, 'from lbrynet.wallet.server.model import ClaimInfo\n'), ((7033, 7056), 'lbrynet.schema.claim.Claim.from_bytes', 'Claim.from_bytes', (['value'], {}), '(value)\n', (7049, 7056), False, 'from lbrynet.schema.claim import Claim\n'), ((6016, 6046), 'struct.pack', 'struct.pack', (['""">I"""', 'self.height'], {}), "('>I', self.height)\n", (6027, 6046), False, 'import struct\n'), ((7344, 7378), 'lbrynet.schema.claim.Claim.from_bytes', 'Claim.from_bytes', (['cert_claim.value'], {}), '(cert_claim.value)\n', (7360, 7378), False, 'from lbrynet.schema.claim import Claim\n'), ((2096, 2117), 'torba.server.hash.hash_to_hex_str', 'hash_to_hex_str', (['txid'], {}), '(txid)\n', (2111, 2117), False, 'from torba.server.hash import hash_to_hex_str\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.