code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#Prediction model using an instance of the Monte Carlo simulation and Brownian Motion equation
#import of libraries
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
from scipy.stats import norm
#ticker selection
def mainFunction(tradingSymbol):
data = pd.DataFrame()
data[tradingSymbol] = wb.DataReader(tradingSymbol, data_source='yahoo', start='2019-1-1')['Adj Close']
#percent change of asset price
log_returns = np.log(1+ data.pct_change())
#graph showing growth over time beginning from 2015
data.plot(figsize = (10,6));
plt.show()
#graph of log returns of input ticker
#returns are normally distributed and have a consistent mean
log_returns.plot(figsize = (10,6))
plt.show()
#calculations
averageDailyReturn = log_returns.mean()
variance = log_returns.var()
drift = averageDailyReturn-(variance/2)
standardDeviation = log_returns.std()
#Brownian Motion equation
#r = drift + standardDeviation * (e^r)
#prediction of future stock price based on simulation below using numpy for storing data into array
np.array(drift)
drift.values
standardDeviation.values
#Brownian motion variable correlating to the distance between the mean and the number of standard deviation
norm.ppf(0.95)
#10 x 2 Matrix
x = np.random.rand(10,2)
norm.ppf(x)
#stores distances from the mean value, 0, into the 10 x 2 matrix
Z = norm.ppf(np.random.rand(10,2))
#time interval for the stock price forecast
timeInterval = 365
iterations = 5
#r = drift + standardDeviation * (e^r)
#10 sets of 365 random future stock prices of the ticker symbol
dailyReturns = np.exp(drift.values + standardDeviation.values * norm.ppf(np.random.rand(timeInterval,iterations)))
#returns into price points
presentPrice = data.iloc[-1]
priceList = np.zeros_like(dailyReturns)
priceList[0] = presentPrice
#iteration for the time interavl of 365
for t in range(1, timeInterval):
priceList[t] = priceList[t-1] * dailyReturns[t]
#showcases 10 paths of the future stock price
plt.figure(figsize =(10,6))
plt.plot(priceList)
plt.show()
|
[
"numpy.random.rand",
"pandas_datareader.data.DataReader",
"matplotlib.pyplot.plot",
"scipy.stats.norm.ppf",
"numpy.array",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"numpy.zeros_like",
"matplotlib.pyplot.show"
] |
[((322, 336), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (334, 336), True, 'import pandas as pd\n'), ((621, 631), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (629, 631), True, 'import matplotlib.pyplot as plt\n'), ((782, 792), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (790, 792), True, 'import matplotlib.pyplot as plt\n'), ((1149, 1164), 'numpy.array', 'np.array', (['drift'], {}), '(drift)\n', (1157, 1164), True, 'import numpy as np\n'), ((1328, 1342), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.95)'], {}), '(0.95)\n', (1336, 1342), False, 'from scipy.stats import norm\n'), ((1371, 1392), 'numpy.random.rand', 'np.random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (1385, 1392), True, 'import numpy as np\n'), ((1396, 1407), 'scipy.stats.norm.ppf', 'norm.ppf', (['x'], {}), '(x)\n', (1404, 1407), False, 'from scipy.stats import norm\n'), ((1923, 1950), 'numpy.zeros_like', 'np.zeros_like', (['dailyReturns'], {}), '(dailyReturns)\n', (1936, 1950), True, 'import numpy as np\n'), ((2175, 2202), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2185, 2202), True, 'import matplotlib.pyplot as plt\n'), ((2207, 2226), 'matplotlib.pyplot.plot', 'plt.plot', (['priceList'], {}), '(priceList)\n', (2215, 2226), True, 'import matplotlib.pyplot as plt\n'), ((2231, 2241), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2239, 2241), True, 'import matplotlib.pyplot as plt\n'), ((363, 430), 'pandas_datareader.data.DataReader', 'wb.DataReader', (['tradingSymbol'], {'data_source': '"""yahoo"""', 'start': '"""2019-1-1"""'}), "(tradingSymbol, data_source='yahoo', start='2019-1-1')\n", (376, 430), True, 'from pandas_datareader import data as wb\n'), ((1495, 1516), 'numpy.random.rand', 'np.random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (1509, 1516), True, 'import numpy as np\n'), ((1799, 1839), 'numpy.random.rand', 'np.random.rand', (['timeInterval', 'iterations'], {}), '(timeInterval, iterations)\n', (1813, 1839), True, 'import numpy as np\n')]
|
import os
import csv
import numpy as np
from sklearn.utils import shuffle
## Read in frame data
samples = []
with open('/../opt/carnd_p3/data/driving_log.csv') as csvfile: #open the log file
reader = csv.reader(csvfile) #as a readable csv
for line in reader:
samples.append(line) #add each line of the log file to samples
samples = samples[1:] # to remove table header
samples = shuffle(samples) # shuffle entire sample set before splitting into training and validation so that training isn't biased
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2) #split samples into 80% training, 20% validation
from scipy import ndimage #because cv2.imread() imports the image as BGR, and we want RGB
## Define generator to handle small portions of images at a time so that training is not as memory-heavy
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
# shuffle(samples) #shuffle within the training/validation sets, NOT NECESSARY SINCE SHUFFLING ALREADY SHUFFLED
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size] #collect the images for this batch
images = []
angles = []
for batch_sample in batch_samples:
path = '/../opt/carnd_p3/data/IMG/' #assign the location from which to read images
# read in images from all 3 cameras MAKING SURE TO READ IN AS RGB
center_image = ndimage.imread(path+batch_sample[0].split('/')[-1])
left_image = ndimage.imread(path+batch_sample[1].split('/')[-1])
right_image = ndimage.imread(path+batch_sample[2].split('/')[-1])
# read in steering angle
center_angle = float(batch_sample[3]) #read the steering angle
# apply a steering correction for the left and right images, in a way to generate "new" samples
correction = 0.2
left_angle = center_angle + correction
right_angle = center_angle - correction
# add images and angles to batch set
images.extend([center_image, left_image, right_image])
angles.extend([center_angle, left_angle, right_angle])
# copy all batches' images to final numpy array
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train) #shuffle before yielding result
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
ch, row, col = 3, 160, 320 # Full image format
#import Keras model layers
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.convolutional import Conv2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
# BUILD MODEL
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: x/127.5 - 1.0, input_shape=(row,col,ch)))
# Crop incoming data (training, validation, and autonomous so that everything is consistent)
model.add(Cropping2D(cropping=((60,20), (0,0)))) # could be first layer to reduce memory used in Lambda calculation, and thus faster training
# Begin CNN (similar to NVIDIA architecture)
# Convolution layer 1-3, kernel size 5 with stride of 2
model.add(Conv2D(24,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(36,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(48,(5,5),strides=(2,2),activation='relu'))
# Convolution layers 4-5, kernel size 3 wth stride of 1
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
# Flatten convolution output to yield single numerical result
model.add(Flatten())
# Fully connected layers to complete computations, gradually decreasing in parameters until final value
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
## Training hyper parameters to play with
## Stop training checkpoints...
# save_path = 'model{epoch:02d}-{val_loss:.2f}.h5'
# checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)
# stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)
## OR
batch_size = 32
epochs = 5 #***
## Compile and train the model
model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) #use Mean Squared Error to measure loss, use Adam optimizer for tuning
model.fit_generator(train_generator, steps_per_epoch= len(train_samples)/batch_size,validation_data=validation_generator, validation_steps=len(validation_samples)/batch_size, epochs=5, verbose = 1) # train using generators
#save the trained model
model.save('model.h5')
|
[
"keras.layers.core.Flatten",
"keras.layers.convolutional.Cropping2D",
"sklearn.model_selection.train_test_split",
"sklearn.utils.shuffle",
"keras.layers.core.Lambda",
"keras.models.Sequential",
"numpy.array",
"keras.layers.convolutional.Conv2D",
"csv.reader",
"keras.layers.core.Dense"
] |
[((399, 415), 'sklearn.utils.shuffle', 'shuffle', (['samples'], {}), '(samples)\n', (406, 415), False, 'from sklearn.utils import shuffle\n'), ((610, 650), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)'}), '(samples, test_size=0.2)\n', (626, 650), False, 'from sklearn.model_selection import train_test_split\n'), ((3171, 3183), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3181, 3183), False, 'from keras.models import Sequential\n'), ((206, 225), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (216, 225), False, 'import csv\n'), ((3274, 3335), 'keras.layers.core.Lambda', 'Lambda', (['(lambda x: x / 127.5 - 1.0)'], {'input_shape': '(row, col, ch)'}), '(lambda x: x / 127.5 - 1.0, input_shape=(row, col, ch))\n', (3280, 3335), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda\n'), ((3436, 3475), 'keras.layers.convolutional.Cropping2D', 'Cropping2D', ([], {'cropping': '((60, 20), (0, 0))'}), '(cropping=((60, 20), (0, 0)))\n', (3446, 3475), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((3680, 3733), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(24)', '(5, 5)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(24, (5, 5), strides=(2, 2), activation='relu')\n", (3686, 3733), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((3740, 3793), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(36)', '(5, 5)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(36, (5, 5), strides=(2, 2), activation='relu')\n", (3746, 3793), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((3800, 3853), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(48)', '(5, 5)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(48, (5, 5), strides=(2, 2), activation='relu')\n", (3806, 3853), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((3916, 3953), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (3922, 3953), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((3962, 3999), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (3968, 3999), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((4070, 4079), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (4077, 4079), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda\n'), ((4195, 4205), 'keras.layers.core.Dense', 'Dense', (['(100)'], {}), '(100)\n', (4200, 4205), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda\n'), ((4217, 4226), 'keras.layers.core.Dense', 'Dense', (['(50)'], {}), '(50)\n', (4222, 4226), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda\n'), ((4238, 4247), 'keras.layers.core.Dense', 'Dense', (['(10)'], {}), '(10)\n', (4243, 4247), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda\n'), ((4259, 4267), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (4264, 4267), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda\n'), ((2527, 2543), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2535, 2543), True, 'import numpy as np\n'), ((2566, 2582), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (2574, 2582), True, 'import numpy as np\n'), ((2601, 2626), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (2608, 2626), False, 'from sklearn.utils import shuffle\n')]
|
import numpy as np
from visual_dynamics.policies import CameraTargetPolicy
class RandomOffsetCameraTargetPolicy(CameraTargetPolicy):
def __init__(self, env, target_env, camera_node_name, agent_node_name, target_node_name,
height=12.0, radius=16.0, angle=(-np.pi/4, np.pi/4), tightness=0.1, hra_interpolation=True):
self.height = height
self.radius = radius
self.angle = angle
offset = self.sample_offset()
super(RandomOffsetCameraTargetPolicy, self).__init__(env, target_env, camera_node_name, agent_node_name,
target_node_name, offset, tightness=tightness,
hra_interpolation=hra_interpolation)
def reset(self):
self.offset = self.sample_offset()
state = super(RandomOffsetCameraTargetPolicy, self).reset()
# self.offset = self.sample_offset()
return state
def sample_offset(self):
height = np.random.uniform(*self.height) if isinstance(self.height, (list, tuple)) else self.height
radius = np.random.uniform(*self.radius) if isinstance(self.radius, (list, tuple)) else self.radius
angle = np.random.uniform(*self.angle) if isinstance(self.angle, (list, tuple)) else self.angle
return np.array([radius * np.sin(angle), -radius * np.cos(angle), height])
def _get_config(self):
config = super(RandomOffsetCameraTargetPolicy, self)._get_config()
config.pop('offset')
config.update({'height': self.height,
'radius': self.radius,
'angle': self.angle})
return config
|
[
"numpy.sin",
"numpy.cos",
"numpy.random.uniform"
] |
[((1027, 1058), 'numpy.random.uniform', 'np.random.uniform', (['*self.height'], {}), '(*self.height)\n', (1044, 1058), True, 'import numpy as np\n'), ((1135, 1166), 'numpy.random.uniform', 'np.random.uniform', (['*self.radius'], {}), '(*self.radius)\n', (1152, 1166), True, 'import numpy as np\n'), ((1242, 1272), 'numpy.random.uniform', 'np.random.uniform', (['*self.angle'], {}), '(*self.angle)\n', (1259, 1272), True, 'import numpy as np\n'), ((1364, 1377), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1370, 1377), True, 'import numpy as np\n'), ((1389, 1402), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1395, 1402), True, 'import numpy as np\n')]
|
"""
Area Weighted Interpolation
"""
import numpy as np
import geopandas as gpd
from ._vectorized_raster_interpolation import _fast_append_profile_in_gdf
import warnings
from scipy.sparse import dok_matrix, diags, coo_matrix
import pandas as pd
from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs
def _area_tables_binning(source_df, target_df, spatial_index):
"""Construct area allocation and source-target correspondence tables using a spatial indexing approach
...
NOTE: this currently relies on Geopandas' spatial index machinery
Parameters
----------
source_df : geopandas.GeoDataFrame
GeoDataFrame containing input data and polygons
target_df : geopandas.GeoDataFramee
GeoDataFrame defining the output geometries
spatial_index : str
Spatial index to use to build the allocation of area from source to
target tables. It currently support the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
tables : scipy.sparse.dok_matrix
"""
if _check_crs(source_df, target_df):
pass
else:
return None
df1 = source_df.copy()
df2 = target_df.copy()
# it is generally more performant to use the longer df as spatial index
if spatial_index == "auto":
if df1.shape[0] > df2.shape[0]:
spatial_index = "source"
else:
spatial_index = "target"
if spatial_index == "source":
ids_tgt, ids_src = df1.sindex.query_bulk(df2.geometry, predicate="intersects")
elif spatial_index == "target":
ids_src, ids_tgt = df2.sindex.query_bulk(df1.geometry, predicate="intersects")
else:
raise ValueError(
f"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'."
)
areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area
table = coo_matrix(
(areas, (ids_src, ids_tgt),),
shape=(df1.shape[0], df2.shape[0]),
dtype=np.float32,
)
table = table.todok()
return table
def _area_tables(source_df, target_df):
"""
Construct area allocation and source-target correspondence tables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
Returns
-------
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
source_df = source_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union = gpd.overlay(source_df, target_df, how="union")
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row[row.geometry.name].area
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
def _area_interpolate_binning(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
table=None,
allocate_total=True,
spatial_index="auto",
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
extensive_variables : list
[Optional. Default=None] Columns in dataframes for extensive variables
intensive_variables : list
[Optional. Default=None] Columns in dataframes for intensive variables
table : scipy.sparse.dok_matrix
[Optional. Default=None] Area allocation source-target correspondence
table. If not provided, it will be built from `source_df` and
`target_df` using `tobler.area_interpolate._area_tables_binning`
allocate_total : boolean
[Optional. Default=True] True if total value of source area should be
allocated. False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is exhausted by
intersections. See Notes for more details.
spatial_index : str
[Optional. Default="auto"] Spatial index to use to build the
allocation of area from source to target tables. It currently support
the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if table is None:
table = _area_tables_binning(source_df, target_df, spatial_index)
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = np.asarray(table.sum(axis=1))
den = den + (den == 0)
den = 1.0 / den
n = den.shape[0]
den = den.reshape((n,))
den = diags([den], [0])
weights = den.dot(table) # row standardize table
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = diags([vals], [0]).dot(weights)
estimates = estimates.sum(axis=0)
extensive.append(estimates.tolist()[0])
extensive = np.asarray(extensive)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
area = np.asarray(table.sum(axis=0))
den = 1.0 / (area + (area == 0))
n, k = den.shape
den = den.reshape((k,))
den = diags([den], [0])
weights = table.dot(den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
n = vals.shape[0]
vals = vals.reshape((n,))
estimates = diags([vals], [0])
estimates = estimates.dot(weights).sum(axis=0)
intensive.append(estimates.tolist()[0])
intensive = np.asarray(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_interpolate(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
tables=None,
allocate_total=True,
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
target_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
extensive_variables : list, (optional)
columns in dataframes for extensive variables
intensive_variables : list, (optional)
columns in dataframes for intensive variables
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
allocate_total : boolean
True if total value of source area should be allocated.
False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is
exhausted by intersections. See Notes for more details.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if tables is None:
SU, UT = _area_tables(source_df, target_df)
else:
SU, UT = tables
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = SU.sum(axis=1)
den = den + (den == 0)
weights = np.dot(np.diag(1 / den), SU)
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = np.dot(np.diag(vals), weights)
estimates = np.dot(estimates, UT)
estimates = estimates.sum(axis=0)
extensive.append(estimates)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
ST = np.dot(SU, UT)
area = ST.sum(axis=0)
den = np.diag(1.0 / (area + (area == 0)))
weights = np.dot(ST, den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
vals.shape = (len(vals), 1)
est = (vals * weights).sum(axis=0)
intensive.append(est)
intensive = np.array(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_tables_raster(
source_df, target_df, raster_path, codes=[21, 22, 23, 24], force_crs_match=True
):
"""
Construct area allocation and source-target correspondence tables according to a raster 'populated' areas
Parameters
----------
source_df : geopandas.GeoDataFrame
geeodataframe with geometry column of polygon type
target_df : geopandas.GeoDataFrame
geodataframe with geometry column of polygon type
raster_path : str
the path to the associated raster image.
codes : list
list of integer code values that should be considered as 'populated'.
Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity).
The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html
Only taken into consideration for harmonization raster based.
force_crs_match : bool (default is True)
Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file.
It is recommended to let this argument as True.
Returns
-------
tables: tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
target_df = target_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union_pre = gpd.overlay(source_df, target_df, how="union")
# Establishing a CRS for the generated union
warnings.warn(
"The CRS for the generated union will be set to be the same as source_df."
)
res_union_pre.crs = source_df.crs
# The 'append_profile_in_gdf' function is present in nlcd.py script
res_union = _fast_append_profile_in_gdf(
res_union_pre, raster_path, force_crs_match=force_crs_match
)
str_codes = [str(i) for i in codes]
str_list = ["Type_" + i for i in str_codes]
# Extract list of code names that actually appear in the appended dataset
str_list_ok = [col for col in res_union.columns if col in str_list]
res_union["Populated_Pixels"] = res_union[str_list_ok].sum(axis=1)
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row["Populated_Pixels"]
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
|
[
"scipy.sparse.diags",
"tobler.util.util._check_crs",
"pandas.DataFrame",
"numpy.asarray",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"tobler.util.util._nan_check",
"numpy.isnan",
"geopandas.overlay",
"scipy.sparse.coo_matrix",
"warnings.warn",
"tobler.util.util._inf_check",
"pandas.concat",
"numpy.arange"
] |
[((1390, 1422), 'tobler.util.util._check_crs', '_check_crs', (['source_df', 'target_df'], {}), '(source_df, target_df)\n', (1400, 1422), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((2243, 2340), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(areas, (ids_src, ids_tgt))'], {'shape': '(df1.shape[0], df2.shape[0])', 'dtype': 'np.float32'}), '((areas, (ids_src, ids_tgt)), shape=(df1.shape[0], df2.shape[0]),\n dtype=np.float32)\n', (2253, 2340), False, 'from scipy.sparse import dok_matrix, diags, coo_matrix\n'), ((3198, 3230), 'tobler.util.util._check_crs', '_check_crs', (['source_df', 'target_df'], {}), '(source_df, target_df)\n', (3208, 3230), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((3412, 3426), 'numpy.arange', 'np.arange', (['n_s'], {}), '(n_s)\n', (3421, 3426), True, 'import numpy as np\n'), ((3440, 3454), 'numpy.arange', 'np.arange', (['n_t'], {}), '(n_t)\n', (3449, 3454), True, 'import numpy as np\n'), ((3621, 3667), 'geopandas.overlay', 'gpd.overlay', (['source_df', 'target_df'], {'how': '"""union"""'}), "(source_df, target_df, how='union')\n", (3632, 3667), True, 'import geopandas as gpd\n'), ((3706, 3726), 'numpy.zeros', 'np.zeros', (['(n_s, n_u)'], {}), '((n_s, n_u))\n', (3714, 3726), True, 'import numpy as np\n'), ((3811, 3831), 'numpy.zeros', 'np.zeros', (['(n_u, n_t)'], {}), '((n_u, n_t))\n', (3819, 3831), True, 'import numpy as np\n'), ((7155, 7187), 'tobler.util.util._check_crs', '_check_crs', (['source_df', 'target_df'], {}), '(source_df, target_df)\n', (7165, 7187), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((7560, 7577), 'scipy.sparse.diags', 'diags', (['[den]', '[0]'], {}), '([den], [0])\n', (7565, 7577), False, 'from scipy.sparse import dok_matrix, diags, coo_matrix\n'), ((8290, 8307), 'scipy.sparse.diags', 'diags', (['[den]', '[0]'], {}), '([den], [0])\n', (8295, 8307), False, 'from scipy.sparse import dok_matrix, diags, coo_matrix\n'), ((8999, 9021), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (9008, 9021), True, 'import pandas as pd\n'), ((11334, 11366), 'tobler.util.util._check_crs', '_check_crs', (['source_df', 'target_df'], {}), '(source_df, target_df)\n', (11344, 11366), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((12220, 12234), 'numpy.dot', 'np.dot', (['SU', 'UT'], {}), '(SU, UT)\n', (12226, 12234), True, 'import numpy as np\n'), ((12271, 12306), 'numpy.diag', 'np.diag', (['(1.0 / (area + (area == 0)))'], {}), '(1.0 / (area + (area == 0)))\n', (12278, 12306), True, 'import numpy as np\n'), ((12321, 12336), 'numpy.dot', 'np.dot', (['ST', 'den'], {}), '(ST, den)\n', (12327, 12336), True, 'import numpy as np\n'), ((12894, 12916), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (12903, 12916), True, 'import pandas as pd\n'), ((14868, 14900), 'tobler.util.util._check_crs', '_check_crs', (['source_df', 'target_df'], {}), '(source_df, target_df)\n', (14878, 14900), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((15081, 15095), 'numpy.arange', 'np.arange', (['n_s'], {}), '(n_s)\n', (15090, 15095), True, 'import numpy as np\n'), ((15109, 15123), 'numpy.arange', 'np.arange', (['n_t'], {}), '(n_t)\n', (15118, 15123), True, 'import numpy as np\n'), ((15295, 15341), 'geopandas.overlay', 'gpd.overlay', (['source_df', 'target_df'], {'how': '"""union"""'}), "(source_df, target_df, how='union')\n", (15306, 15341), True, 'import geopandas as gpd\n'), ((15396, 15490), 'warnings.warn', 'warnings.warn', (['"""The CRS for the generated union will be set to be the same as source_df."""'], {}), "(\n 'The CRS for the generated union will be set to be the same as source_df.')\n", (15409, 15490), False, 'import warnings\n'), ((16081, 16101), 'numpy.zeros', 'np.zeros', (['(n_s, n_u)'], {}), '((n_s, n_u))\n', (16089, 16101), True, 'import numpy as np\n'), ((16186, 16206), 'numpy.zeros', 'np.zeros', (['(n_u, n_t)'], {}), '((n_u, n_t))\n', (16194, 16206), True, 'import numpy as np\n'), ((8015, 8036), 'numpy.asarray', 'np.asarray', (['extensive'], {}), '(extensive)\n', (8025, 8036), True, 'import numpy as np\n'), ((8057, 8076), 'numpy.array', 'np.array', (['extensive'], {}), '(extensive)\n', (8065, 8076), True, 'import numpy as np\n'), ((8097, 8151), 'pandas.DataFrame', 'pd.DataFrame', (['extensive.T'], {'columns': 'extensive_variables'}), '(extensive.T, columns=extensive_variables)\n', (8109, 8151), True, 'import pandas as pd\n'), ((8775, 8796), 'numpy.asarray', 'np.asarray', (['intensive'], {}), '(intensive)\n', (8785, 8796), True, 'import numpy as np\n'), ((8817, 8871), 'pandas.DataFrame', 'pd.DataFrame', (['intensive.T'], {'columns': 'intensive_variables'}), '(intensive.T, columns=intensive_variables)\n', (8829, 8871), True, 'import pandas as pd\n'), ((11678, 11694), 'numpy.diag', 'np.diag', (['(1 / den)'], {}), '(1 / den)\n', (11685, 11694), True, 'import numpy as np\n'), ((12115, 12134), 'numpy.array', 'np.array', (['extensive'], {}), '(extensive)\n', (12123, 12134), True, 'import numpy as np\n'), ((12155, 12209), 'pandas.DataFrame', 'pd.DataFrame', (['extensive.T'], {'columns': 'extensive_variables'}), '(extensive.T, columns=extensive_variables)\n', (12167, 12209), True, 'import pandas as pd\n'), ((12672, 12691), 'numpy.array', 'np.array', (['intensive'], {}), '(intensive)\n', (12680, 12691), True, 'import numpy as np\n'), ((12712, 12766), 'pandas.DataFrame', 'pd.DataFrame', (['intensive.T'], {'columns': 'intensive_variables'}), '(intensive.T, columns=intensive_variables)\n', (12724, 12766), True, 'import pandas as pd\n'), ((7757, 7788), 'tobler.util.util._nan_check', '_nan_check', (['source_df', 'variable'], {}), '(source_df, variable)\n', (7767, 7788), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((7808, 7839), 'tobler.util.util._inf_check', '_inf_check', (['source_df', 'variable'], {}), '(source_df, variable)\n', (7818, 7839), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((8449, 8480), 'tobler.util.util._nan_check', '_nan_check', (['source_df', 'variable'], {}), '(source_df, variable)\n', (8459, 8480), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((8500, 8531), 'tobler.util.util._inf_check', '_inf_check', (['source_df', 'variable'], {}), '(source_df, variable)\n', (8510, 8531), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((8624, 8642), 'scipy.sparse.diags', 'diags', (['[vals]', '[0]'], {}), '([vals], [0])\n', (8629, 8642), False, 'from scipy.sparse import dok_matrix, diags, coo_matrix\n'), ((11825, 11856), 'tobler.util.util._nan_check', '_nan_check', (['source_df', 'variable'], {}), '(source_df, variable)\n', (11835, 11856), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((11876, 11907), 'tobler.util.util._inf_check', '_inf_check', (['source_df', 'variable'], {}), '(source_df, variable)\n', (11886, 11907), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((11987, 12008), 'numpy.dot', 'np.dot', (['estimates', 'UT'], {}), '(estimates, UT)\n', (11993, 12008), True, 'import numpy as np\n'), ((12448, 12479), 'tobler.util.util._nan_check', '_nan_check', (['source_df', 'variable'], {}), '(source_df, variable)\n', (12458, 12479), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((12499, 12530), 'tobler.util.util._inf_check', '_inf_check', (['source_df', 'variable'], {}), '(source_df, variable)\n', (12509, 12530), False, 'from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n'), ((4029, 4051), 'numpy.isnan', 'np.isnan', (["row['_left']"], {}), "(row['_left'])\n", (4037, 4051), True, 'import numpy as np\n'), ((4060, 4083), 'numpy.isnan', 'np.isnan', (["row['_right']"], {}), "(row['_right'])\n", (4068, 4083), True, 'import numpy as np\n'), ((11939, 11952), 'numpy.diag', 'np.diag', (['vals'], {}), '(vals)\n', (11946, 11952), True, 'import numpy as np\n'), ((16405, 16427), 'numpy.isnan', 'np.isnan', (["row['_left']"], {}), "(row['_left'])\n", (16413, 16427), True, 'import numpy as np\n'), ((16436, 16459), 'numpy.isnan', 'np.isnan', (["row['_right']"], {}), "(row['_right'])\n", (16444, 16459), True, 'import numpy as np\n'), ((7864, 7882), 'scipy.sparse.diags', 'diags', (['[vals]', '[0]'], {}), '([vals], [0])\n', (7869, 7882), False, 'from scipy.sparse import dok_matrix, diags, coo_matrix\n')]
|
import numpy as np
import pytest
import apexpy
import tempfile
import os
import h5py
from ttools import create_dataset, config, io, utils
map_periods = [np.timedelta64(10, 'm'), np.timedelta64(30, 'm'), np.timedelta64(1, 'h'), np.timedelta64(2, 'h')]
@pytest.fixture
def times():
yield np.datetime64('2010-01-01T00:00:00') + np.arange(100) * np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_assemble_args(times, map_period):
mlat = np.arange(10)
mlt = np.arange(10)
ssmlon = np.random.rand(times.shape[0])
mlt, mlat = np.meshgrid(mlt, mlat)
mlat = mlat[None, :, :] * np.ones((times.shape[0], 1, 1))
mlt = mlt[None, :, :] * np.ones((times.shape[0], 1, 1))
tec = np.random.rand(*mlat.shape)
bin_edges = np.arange(-.5, 10)
bins = [bin_edges, bin_edges]
args = create_dataset.assemble_binning_args(mlat, mlt, tec, times, ssmlon, bins, map_period)
assert len(args) == np.ceil((times[-1] - times[0]) / map_period)
assert args[0][3][0] == times[0]
assert args[-1][3][0] + map_period >= times[-1]
assert args[-1][3][0] < times[-1]
assert args[-1][3][-1] == times[-1]
for i in range(len(args) - 1):
assert args[i][3][-1] == args[i + 1][3][0] - np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_process_file(madrigal_data_dir, map_period):
"""not that good of a test: wait for bugs and add asserts
"""
start_date = np.datetime64('2012-06-08')
end_date = np.datetime64('2012-06-13')
converter = apexpy.Apex()
mlat, mlon = create_dataset.get_mag_grid(config.madrigal_lat, config.madrigal_lon, converter)
bin_edges = np.arange(-.5, 10)
bins = [bin_edges + 30, bin_edges]
times, tec, ssmlon, n, std = create_dataset.process_file(start_date, end_date, mlat, mlon, converter, bins,
map_period, madrigal_data_dir)
assert times.shape[0] == tec.shape[0] == n.shape[0] == std.shape[0] == ssmlon.shape[0]
assert np.isnan(tec[times < np.datetime64('2012-06-10')]).all()
assert np.isnan(tec[times >= np.datetime64('2012-06-11')]).all()
assert np.isfinite(tec[(times >= np.datetime64('2012-06-10')) * (times < np.datetime64('2012-06-11'))]).any()
assert not np.isnan(tec).all(axis=(0, 1)).any()
assert not np.isnan(tec).all(axis=(0, 2)).any()
def test_calculate_bins():
mlat = np.arange(10)[None, :, None] * np.ones((1, 1, 10))
mlt = np.arange(10)[None, None, :] * np.ones((1, 10, 1))
tec = np.zeros((1, 10, 10))
tec[0, 0, 0] = 10
tec[0, 0, -1] = 20
tec[0, -1, 0] = 30
times = ssmlon = np.ones(1) * np.nan
be = np.array([-.5, 4.5, 9.5])
bins = [be, be]
out_t, out_tec, out_ssm, out_n, out_std = create_dataset.calculate_bins(mlat.ravel(), mlt.ravel(), tec.ravel(),
times, ssmlon, bins)
assert np.isnan(out_t)
assert np.isnan(out_ssm)
assert out_tec.shape == (2, 2)
assert out_tec[0, 0] == 10 / 25
assert out_tec[0, 1] == 20 / 25
assert out_tec[1, 0] == 30 / 25
assert out_tec[1, 1] == 0
assert np.all(out_n == 25)
def test_process_dataset():
start_date = np.datetime64("2012-03-07")
end_date = np.datetime64("2012-03-08")
file_dt = np.timedelta64(12, 'h')
mlat_bins = np.array([35, 45, 55, 65])
mlt_bins = np.array([-1.5, -.5, .5, 1.5])
def fn_pattern(date):
return f"{date.astype('datetime64[h]')}.h5"
dates = np.arange(start_date, end_date, file_dt)
with tempfile.TemporaryDirectory() as tempdir:
files = [os.path.join(tempdir, fn_pattern(d)) for d in dates]
create_dataset.process_dataset(start_date, end_date, mlat_bins, mlt_bins, apex_dt=np.timedelta64(365, 'D'),
file_dt=file_dt, output_dir=tempdir, file_name_pattern=fn_pattern)
grid_fn = os.path.join(tempdir, 'grid.h5')
assert os.path.exists(grid_fn)
with h5py.File(grid_fn, 'r') as f:
mlt_vals = f['mlt'][()]
mlat_vals = f['mlat'][()]
assert np.all(mlt_vals == [-1, 0, 1])
assert np.all(mlat_vals == [40, 50, 60])
for f, d in zip(files, dates):
assert os.path.exists(f)
tec, times, ssmlon, n, std = io.open_tec_file(f)
assert tec.shape == (12, 3, 3)
assert utils.datetime64_to_timestamp(d) == times[0]
|
[
"numpy.random.rand",
"numpy.array",
"numpy.arange",
"os.path.exists",
"numpy.datetime64",
"numpy.meshgrid",
"ttools.create_dataset.process_file",
"numpy.ceil",
"numpy.ones",
"h5py.File",
"numpy.isnan",
"ttools.io.open_tec_file",
"numpy.timedelta64",
"apexpy.Apex",
"tempfile.TemporaryDirectory",
"ttools.utils.datetime64_to_timestamp",
"ttools.create_dataset.assemble_binning_args",
"os.path.join",
"pytest.mark.parametrize",
"numpy.zeros",
"ttools.create_dataset.get_mag_grid",
"numpy.all"
] |
[((376, 426), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""map_period"""', 'map_periods'], {}), "('map_period', map_periods)\n", (399, 426), False, 'import pytest\n'), ((1278, 1328), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""map_period"""', 'map_periods'], {}), "('map_period', map_periods)\n", (1301, 1328), False, 'import pytest\n'), ((155, 178), 'numpy.timedelta64', 'np.timedelta64', (['(10)', '"""m"""'], {}), "(10, 'm')\n", (169, 178), True, 'import numpy as np\n'), ((180, 203), 'numpy.timedelta64', 'np.timedelta64', (['(30)', '"""m"""'], {}), "(30, 'm')\n", (194, 203), True, 'import numpy as np\n'), ((205, 227), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""h"""'], {}), "(1, 'h')\n", (219, 227), True, 'import numpy as np\n'), ((229, 251), 'numpy.timedelta64', 'np.timedelta64', (['(2)', '"""h"""'], {}), "(2, 'h')\n", (243, 251), True, 'import numpy as np\n'), ((481, 494), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (490, 494), True, 'import numpy as np\n'), ((505, 518), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (514, 518), True, 'import numpy as np\n'), ((532, 562), 'numpy.random.rand', 'np.random.rand', (['times.shape[0]'], {}), '(times.shape[0])\n', (546, 562), True, 'import numpy as np\n'), ((579, 601), 'numpy.meshgrid', 'np.meshgrid', (['mlt', 'mlat'], {}), '(mlt, mlat)\n', (590, 601), True, 'import numpy as np\n'), ((734, 761), 'numpy.random.rand', 'np.random.rand', (['*mlat.shape'], {}), '(*mlat.shape)\n', (748, 761), True, 'import numpy as np\n'), ((778, 797), 'numpy.arange', 'np.arange', (['(-0.5)', '(10)'], {}), '(-0.5, 10)\n', (787, 797), True, 'import numpy as np\n'), ((842, 931), 'ttools.create_dataset.assemble_binning_args', 'create_dataset.assemble_binning_args', (['mlat', 'mlt', 'tec', 'times', 'ssmlon', 'bins', 'map_period'], {}), '(mlat, mlt, tec, times, ssmlon, bins,\n map_period)\n', (878, 931), False, 'from ttools import create_dataset, config, io, utils\n'), ((1470, 1497), 'numpy.datetime64', 'np.datetime64', (['"""2012-06-08"""'], {}), "('2012-06-08')\n", (1483, 1497), True, 'import numpy as np\n'), ((1513, 1540), 'numpy.datetime64', 'np.datetime64', (['"""2012-06-13"""'], {}), "('2012-06-13')\n", (1526, 1540), True, 'import numpy as np\n'), ((1557, 1570), 'apexpy.Apex', 'apexpy.Apex', ([], {}), '()\n', (1568, 1570), False, 'import apexpy\n'), ((1588, 1673), 'ttools.create_dataset.get_mag_grid', 'create_dataset.get_mag_grid', (['config.madrigal_lat', 'config.madrigal_lon', 'converter'], {}), '(config.madrigal_lat, config.madrigal_lon, converter\n )\n', (1615, 1673), False, 'from ttools import create_dataset, config, io, utils\n'), ((1685, 1704), 'numpy.arange', 'np.arange', (['(-0.5)', '(10)'], {}), '(-0.5, 10)\n', (1694, 1704), True, 'import numpy as np\n'), ((1776, 1889), 'ttools.create_dataset.process_file', 'create_dataset.process_file', (['start_date', 'end_date', 'mlat', 'mlon', 'converter', 'bins', 'map_period', 'madrigal_data_dir'], {}), '(start_date, end_date, mlat, mlon, converter,\n bins, map_period, madrigal_data_dir)\n', (1803, 1889), False, 'from ttools import create_dataset, config, io, utils\n'), ((2555, 2576), 'numpy.zeros', 'np.zeros', (['(1, 10, 10)'], {}), '((1, 10, 10))\n', (2563, 2576), True, 'import numpy as np\n'), ((2695, 2721), 'numpy.array', 'np.array', (['[-0.5, 4.5, 9.5]'], {}), '([-0.5, 4.5, 9.5])\n', (2703, 2721), True, 'import numpy as np\n'), ((2965, 2980), 'numpy.isnan', 'np.isnan', (['out_t'], {}), '(out_t)\n', (2973, 2980), True, 'import numpy as np\n'), ((2992, 3009), 'numpy.isnan', 'np.isnan', (['out_ssm'], {}), '(out_ssm)\n', (3000, 3009), True, 'import numpy as np\n'), ((3194, 3213), 'numpy.all', 'np.all', (['(out_n == 25)'], {}), '(out_n == 25)\n', (3200, 3213), True, 'import numpy as np\n'), ((3261, 3288), 'numpy.datetime64', 'np.datetime64', (['"""2012-03-07"""'], {}), "('2012-03-07')\n", (3274, 3288), True, 'import numpy as np\n'), ((3304, 3331), 'numpy.datetime64', 'np.datetime64', (['"""2012-03-08"""'], {}), "('2012-03-08')\n", (3317, 3331), True, 'import numpy as np\n'), ((3346, 3369), 'numpy.timedelta64', 'np.timedelta64', (['(12)', '"""h"""'], {}), "(12, 'h')\n", (3360, 3369), True, 'import numpy as np\n'), ((3386, 3412), 'numpy.array', 'np.array', (['[35, 45, 55, 65]'], {}), '([35, 45, 55, 65])\n', (3394, 3412), True, 'import numpy as np\n'), ((3428, 3460), 'numpy.array', 'np.array', (['[-1.5, -0.5, 0.5, 1.5]'], {}), '([-1.5, -0.5, 0.5, 1.5])\n', (3436, 3460), True, 'import numpy as np\n'), ((3551, 3591), 'numpy.arange', 'np.arange', (['start_date', 'end_date', 'file_dt'], {}), '(start_date, end_date, file_dt)\n', (3560, 3591), True, 'import numpy as np\n'), ((632, 663), 'numpy.ones', 'np.ones', (['(times.shape[0], 1, 1)'], {}), '((times.shape[0], 1, 1))\n', (639, 663), True, 'import numpy as np\n'), ((692, 723), 'numpy.ones', 'np.ones', (['(times.shape[0], 1, 1)'], {}), '((times.shape[0], 1, 1))\n', (699, 723), True, 'import numpy as np\n'), ((952, 996), 'numpy.ceil', 'np.ceil', (['((times[-1] - times[0]) / map_period)'], {}), '((times[-1] - times[0]) / map_period)\n', (959, 996), True, 'import numpy as np\n'), ((2464, 2483), 'numpy.ones', 'np.ones', (['(1, 1, 10)'], {}), '((1, 1, 10))\n', (2471, 2483), True, 'import numpy as np\n'), ((2525, 2544), 'numpy.ones', 'np.ones', (['(1, 10, 1)'], {}), '((1, 10, 1))\n', (2532, 2544), True, 'import numpy as np\n'), ((2666, 2676), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (2673, 2676), True, 'import numpy as np\n'), ((3602, 3631), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3629, 3631), False, 'import tempfile\n'), ((3955, 3987), 'os.path.join', 'os.path.join', (['tempdir', '"""grid.h5"""'], {}), "(tempdir, 'grid.h5')\n", (3967, 3987), False, 'import os\n'), ((4003, 4026), 'os.path.exists', 'os.path.exists', (['grid_fn'], {}), '(grid_fn)\n', (4017, 4026), False, 'import os\n'), ((4159, 4189), 'numpy.all', 'np.all', (['(mlt_vals == [-1, 0, 1])'], {}), '(mlt_vals == [-1, 0, 1])\n', (4165, 4189), True, 'import numpy as np\n'), ((4205, 4238), 'numpy.all', 'np.all', (['(mlat_vals == [40, 50, 60])'], {}), '(mlat_vals == [40, 50, 60])\n', (4211, 4238), True, 'import numpy as np\n'), ((294, 330), 'numpy.datetime64', 'np.datetime64', (['"""2010-01-01T00:00:00"""'], {}), "('2010-01-01T00:00:00')\n", (307, 330), True, 'import numpy as np\n'), ((2433, 2446), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2442, 2446), True, 'import numpy as np\n'), ((2494, 2507), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2503, 2507), True, 'import numpy as np\n'), ((4040, 4063), 'h5py.File', 'h5py.File', (['grid_fn', '"""r"""'], {}), "(grid_fn, 'r')\n", (4049, 4063), False, 'import h5py\n'), ((4298, 4315), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (4312, 4315), False, 'import os\n'), ((4357, 4376), 'ttools.io.open_tec_file', 'io.open_tec_file', (['f'], {}), '(f)\n', (4373, 4376), False, 'from ttools import create_dataset, config, io, utils\n'), ((333, 347), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (342, 347), True, 'import numpy as np\n'), ((350, 372), 'numpy.timedelta64', 'np.timedelta64', (['(5)', '"""m"""'], {}), "(5, 'm')\n", (364, 372), True, 'import numpy as np\n'), ((1252, 1274), 'numpy.timedelta64', 'np.timedelta64', (['(5)', '"""m"""'], {}), "(5, 'm')\n", (1266, 1274), True, 'import numpy as np\n'), ((3804, 3828), 'numpy.timedelta64', 'np.timedelta64', (['(365)', '"""D"""'], {}), "(365, 'D')\n", (3818, 3828), True, 'import numpy as np\n'), ((4439, 4471), 'ttools.utils.datetime64_to_timestamp', 'utils.datetime64_to_timestamp', (['d'], {}), '(d)\n', (4468, 4471), False, 'from ttools import create_dataset, config, io, utils\n'), ((2070, 2097), 'numpy.datetime64', 'np.datetime64', (['"""2012-06-10"""'], {}), "('2012-06-10')\n", (2083, 2097), True, 'import numpy as np\n'), ((2139, 2166), 'numpy.datetime64', 'np.datetime64', (['"""2012-06-11"""'], {}), "('2012-06-11')\n", (2152, 2166), True, 'import numpy as np\n'), ((2304, 2317), 'numpy.isnan', 'np.isnan', (['tec'], {}), '(tec)\n', (2312, 2317), True, 'import numpy as np\n'), ((2356, 2369), 'numpy.isnan', 'np.isnan', (['tec'], {}), '(tec)\n', (2364, 2369), True, 'import numpy as np\n'), ((2212, 2239), 'numpy.datetime64', 'np.datetime64', (['"""2012-06-10"""'], {}), "('2012-06-10')\n", (2225, 2239), True, 'import numpy as np\n'), ((2252, 2279), 'numpy.datetime64', 'np.datetime64', (['"""2012-06-11"""'], {}), "('2012-06-11')\n", (2265, 2279), True, 'import numpy as np\n')]
|
"""Randomize the minitaur_gym_alternating_leg_env when reset() is called.
The randomization include swing_offset, extension_offset of all legs that mimics
bent legs, desired_pitch from user input, battery voltage and motor damping.
"""
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
parentdir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
import tf.compat.v1 as tf
from pybullet_envs.minitaur.envs import env_randomizer_base
# Absolute range.
NUM_LEGS = 4
BATTERY_VOLTAGE_RANGE = (14.8, 16.8)
MOTOR_VISCOUS_DAMPING_RANGE = (0, 0.01)
class MinitaurAlternatingLegsEnvRandomizer(env_randomizer_base.EnvRandomizerBase):
"""A randomizer that changes the minitaur_gym_alternating_leg_env."""
def __init__(self,
perturb_swing_bound=0.1,
perturb_extension_bound=0.1,
perturb_desired_pitch_bound=0.01):
super(MinitaurAlternatingLegsEnvRandomizer, self).__init__()
self.perturb_swing_bound = perturb_swing_bound
self.perturb_extension_bound = perturb_extension_bound
self.perturb_desired_pitch_bound = perturb_desired_pitch_bound
def randomize_env(self, env):
perturb_magnitude = np.random.uniform(low=-self.perturb_swing_bound,
high=self.perturb_swing_bound,
size=NUM_LEGS)
env.set_swing_offset(perturb_magnitude)
tf.logging.info("swing_offset: {}".format(perturb_magnitude))
perturb_magnitude = np.random.uniform(low=-self.perturb_extension_bound,
high=self.perturb_extension_bound,
size=NUM_LEGS)
env.set_extension_offset(perturb_magnitude)
tf.logging.info("extension_offset: {}".format(perturb_magnitude))
perturb_magnitude = np.random.uniform(low=-self.perturb_desired_pitch_bound,
high=self.perturb_desired_pitch_bound)
env.set_desired_pitch(perturb_magnitude)
tf.logging.info("desired_pitch: {}".format(perturb_magnitude))
randomized_battery_voltage = np.random.uniform(BATTERY_VOLTAGE_RANGE[0],
BATTERY_VOLTAGE_RANGE[1])
env.minitaur.SetBatteryVoltage(randomized_battery_voltage)
tf.logging.info("battery_voltage: {}".format(randomized_battery_voltage))
randomized_motor_damping = np.random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0],
MOTOR_VISCOUS_DAMPING_RANGE[1])
env.minitaur.SetMotorViscousDamping(randomized_motor_damping)
tf.logging.info("motor_damping: {}".format(randomized_motor_damping))
|
[
"os.path.dirname",
"inspect.currentframe",
"os.sys.path.insert",
"numpy.random.uniform"
] |
[((457, 489), 'os.sys.path.insert', 'os.sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (475, 489), False, 'import os, inspect\n'), ((372, 399), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (387, 399), False, 'import os, inspect\n'), ((429, 455), 'os.path.dirname', 'os.path.dirname', (['parentdir'], {}), '(parentdir)\n', (444, 455), False, 'import os, inspect\n'), ((1317, 1416), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.perturb_swing_bound)', 'high': 'self.perturb_swing_bound', 'size': 'NUM_LEGS'}), '(low=-self.perturb_swing_bound, high=self.\n perturb_swing_bound, size=NUM_LEGS)\n', (1334, 1416), True, 'import numpy as np\n'), ((1631, 1738), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.perturb_extension_bound)', 'high': 'self.perturb_extension_bound', 'size': 'NUM_LEGS'}), '(low=-self.perturb_extension_bound, high=self.\n perturb_extension_bound, size=NUM_LEGS)\n', (1648, 1738), True, 'import numpy as np\n'), ((1961, 2061), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.perturb_desired_pitch_bound)', 'high': 'self.perturb_desired_pitch_bound'}), '(low=-self.perturb_desired_pitch_bound, high=self.\n perturb_desired_pitch_bound)\n', (1978, 2061), True, 'import numpy as np\n'), ((2245, 2314), 'numpy.random.uniform', 'np.random.uniform', (['BATTERY_VOLTAGE_RANGE[0]', 'BATTERY_VOLTAGE_RANGE[1]'], {}), '(BATTERY_VOLTAGE_RANGE[0], BATTERY_VOLTAGE_RANGE[1])\n', (2262, 2314), True, 'import numpy as np\n'), ((2539, 2624), 'numpy.random.uniform', 'np.random.uniform', (['MOTOR_VISCOUS_DAMPING_RANGE[0]', 'MOTOR_VISCOUS_DAMPING_RANGE[1]'], {}), '(MOTOR_VISCOUS_DAMPING_RANGE[0],\n MOTOR_VISCOUS_DAMPING_RANGE[1])\n', (2556, 2624), True, 'import numpy as np\n'), ((318, 340), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (338, 340), False, 'import os, inspect\n')]
|
"""
The TensorProductState class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import functools as _functools
import itertools as _itertools
import numpy as _np
from pygsti.modelmembers.states.state import State as _State
from pygsti.modelmembers import modelmember as _modelmember, term as _term
from pygsti.baseobjs import statespace as _statespace
from pygsti.tools import listtools as _lt
from pygsti.tools import matrixtools as _mt
class TensorProductState(_State):
"""
A state vector that is a tensor-product of other state vectors.
Parameters
----------
factors : list of States
a list of the component states to take the tensor product of.
state_space : StateSpace, optional
The state space for this operation.
"""
def __init__(self, factors, state_space):
assert(len(factors) > 0), "Must have at least one factor!"
self.factors = factors # do *not* copy - needs to reference common objects
evotype = self.factors[0]._evotype
rep = evotype.create_tensorproduct_state_rep([f._rep for f in factors], state_space)
_State.__init__(self, rep, evotype)
self.init_gpindices() # initialize our gpindices based on sub-members
self._update_rep() # initializes rep data
#Note: no to_memoized_dict needed, as ModelMember version does all we need.
@classmethod
def _from_memoized_dict(cls, mm_dict, serial_memo):
state_space = _statespace.StateSpace.from_nice_serialization(mm_dict['state_space'])
factors = [serial_memo[i] for i in mm_dict['submembers']]
return cls(factors, state_space)
def submembers(self):
"""
Get the ModelMember-derived objects contained in this one.
Returns
-------
list
"""
return self.factors # factor POVM object
def _update_rep(self):
self._rep.reps_have_changed()
@property
def parameter_labels(self):
"""
An array of labels (usually strings) describing this model member's parameters.
"""
vl = _np.empty(self.num_params, dtype=object)
for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices):
vl[factor_local_inds] = factor_state.parameter_labels
return vl
def to_dense(self, on_space='minimal', scratch=None):
"""
Return this state vector as a (dense) numpy array.
The memory in `scratch` maybe used when it is not-None.
Parameters
----------
on_space : {'minimal', 'Hilbert', 'HilbertSchmidt'}
The space that the returned dense operation acts upon. For unitary matrices and bra/ket vectors,
use `'Hilbert'`. For superoperator matrices and super-bra/super-ket vectors use `'HilbertSchmidt'`.
`'minimal'` means that `'Hilbert'` is used if possible given this operator's evolution type, and
otherwise `'HilbertSchmidt'` is used.
scratch : numpy.ndarray, optional
scratch space available for use.
Returns
-------
numpy.ndarray
"""
return self._rep.to_dense(on_space)
def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys=False):
"""
Get the `order`-th order Taylor-expansion terms of this state vector.
This function either constructs or returns a cached list of the terms at
the given order. Each term is "rank-1", meaning that it is a state
preparation followed by or POVM effect preceded by actions on a
density matrix `rho` of the form:
`rho -> A rho B`
The coefficients of these terms are typically polynomials of the
State's parameters, where the polynomial's variable indices index the
*global* parameters of the State's parent (usually a :class:`Model`)
, not the State's local parameter array (i.e. that returned from
`to_vector`).
Parameters
----------
order : int
The order of terms to get.
max_polynomial_vars : int, optional
maximum number of variables the created polynomials can have.
return_coeff_polys : bool
Whether a parallel list of locally-indexed (using variable indices
corresponding to *this* object's parameters rather than its parent's)
polynomial coefficients should be returned as well.
Returns
-------
terms : list
A list of :class:`RankOneTerm` objects.
coefficients : list
Only present when `return_coeff_polys == True`.
A list of *compact* polynomial objects, meaning that each element
is a `(vtape,ctape)` 2-tuple formed by concatenating together the
output of :method:`Polynomial.compact`.
"""
terms = []
fnq = [int(round(_np.log2(f.dim))) // 2 for f in self.factors] # num of qubits per factor
# assumes density matrix evolution
total_nQ = sum(fnq) # total number of qubits
for p in _lt.partition_into(order, len(self.factors)):
factor_lists = [self.factors[i].taylor_order_terms(pi, max_polynomial_vars) for i, pi in enumerate(p)]
# When possible, create COLLAPSED factor_lists so each factor has just a single
# (State) pre & post op, which can be formed into the new terms'
# TensorProdState ops.
# - DON'T collapse stabilizer states & clifford ops - can't for POVMs
collapsible = False # bool(self._evotype =="svterm") # need to use reps for collapsing now... TODO?
if collapsible:
factor_lists = [[t.collapse_vec() for t in fterms] for fterms in factor_lists]
for factors in _itertools.product(*factor_lists):
# create a term with a TensorProdState - Note we always create
# "prep"-mode vectors, since even when self._prep_or_effect == "effect" these
# vectors are created with factor (prep- or effect-type) States not factor POVMs
# we workaround this by still allowing such "prep"-mode
# TensorProdStates to be represented as effects (i.e. in torep('effect'...) works)
coeff = _functools.reduce(lambda x, y: x.mult(y), [f.coeff for f in factors])
pre_rep = self._evotype.create_tensorproduct_state_rep(
[f.pre_state for f in factors if (f.pre_state is not None)], self.state_space)
post_rep = self._evotype.create_tensorproduct_state_rep(
[f.post_state for f in factors if (f.post_state is not None)], self.state_space)
term = _term.RankOnePolynomialPrepTerm.create_from(coeff, pre_rep, post_rep,
self._evotype, self.state_space)
if not collapsible: # then may need to add more ops. Assume factor ops are clifford gates
# Embed each factors ops according to their target qubit(s) and just daisy chain them
ss = _statespace.QubitSpace(total_nQ); curQ = 0
for f, nq in zip(factors, fnq):
targetLabels = tuple(range(curQ, curQ + nq)); curQ += nq
term._rep.pre_ops.extend([self._evotype.create_embedded_rep(ss, targetLabels, op)
for op in f.pre_ops]) # embed and add ops
term._rep.post_ops.extend([self._evotype.create_embedded_rep(ss, targetLabels, op)
for op in f.post_ops]) # embed and add ops
terms.append(term)
if return_coeff_polys:
def _decompose_indices(x):
return tuple(_modelmember._decompose_gpindices(
self.gpindices, _np.array(x, _np.int64)))
poly_coeffs = [t.coeff.map_indices(_decompose_indices) for t in terms] # with *local* indices
tapes = [poly.compact(complex_coeff_tape=True) for poly in poly_coeffs]
if len(tapes) > 0:
vtape = _np.concatenate([t[0] for t in tapes])
ctape = _np.concatenate([t[1] for t in tapes])
else:
vtape = _np.empty(0, _np.int64)
ctape = _np.empty(0, complex)
coeffs_as_compact_polys = (vtape, ctape)
#self.local_term_poly_coeffs[order] = coeffs_as_compact_polys #FUTURE?
return terms, coeffs_as_compact_polys
else:
return terms # Cache terms in FUTURE?
@property
def num_params(self):
"""
Get the number of independent parameters which specify this state vector.
Returns
-------
int
the number of independent parameters.
"""
return len(self.gpindices_as_array())
def to_vector(self):
"""
Get the state vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
v = _np.empty(self.num_params, 'd')
for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices):
v[factor_local_inds] = factor_state.to_vector()
return v
def from_vector(self, v, close=False, dirty_value=True):
"""
Initialize the state vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of state vector parameters. Length
must == num_params()
close : bool, optional
Whether `v` is close to this state vector's current
set of parameters. Under some circumstances, when this
is true this call can be completed more quickly.
dirty_value : bool, optional
The value to set this object's "dirty flag" to before exiting this
call. This is passed as an argument so it can be updated *recursively*.
Leave this set to `True` unless you know what you're doing.
Returns
-------
None
"""
for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices):
factor_state.from_vector(v[factor_local_inds], close, dirty_value)
#Update representation, which may be a dense matrix or
# just fast-kron arrays or a stabilizer state.
self._update_rep() # TODO - how does this apply to state reps??
def deriv_wrt_params(self, wrt_filter=None):
"""
The element-wise derivative this state vector.
Construct a matrix whose columns are the derivatives of the state vector
with respect to a single param. Thus, each column is of length
dimension and there is one column per state vector parameter.
An empty 2D array in the StaticState case (num_params == 0).
Parameters
----------
wrt_filter : list or numpy.ndarray
List of parameter indices to take derivative with respect to.
(None means to use all the this operation's parameters.)
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
typ = self.factors[0].to_dense(on_space='minimal').dtype if len(self.factors) > 0 else 'd'
#HACK to deal with fact that output of to_dense is really what is differentiated
# but this may not match self.dim == self.state_space.dim, e.g. for pure state vecs.
dims = [len(fct.to_dense(on_space='minimal')) for fct in self.factors]
dim = int(_np.product(dims))
derivMx = _np.zeros((dim, self.num_params), typ)
#Product rule to compute jacobian
# loop over the spamvec/povm we differentiate wrt:
for i, (fct, fct_local_inds, fct_dim) in enumerate(zip(self.factors, self._submember_rpindices, dims)):
vec = fct
if vec.num_params == 0: continue # no contribution
deriv = vec.deriv_wrt_params(None) # TODO: use filter?? / make relative to this gate...
deriv.shape = (fct_dim, vec.num_params)
if i > 0: # factors before ith
pre = self.factors[0].to_dense(on_space='minimal')
for vecA in self.factors[1:i]:
pre = _np.kron(pre, vecA.to_dense(on_space='minimal'))
deriv = _np.kron(pre[:, None], deriv) # add a dummy 1-dim to 'pre' and do kron properly...
if i + 1 < len(self.factors): # factors after ith
post = self.factors[i + 1].to_dense(on_space='minimal')
for vecA in self.factors[i + 2:]:
post = _np.kron(post, vecA.to_dense(on_space='minimal'))
deriv = _np.kron(deriv, post[:, None]) # add a dummy 1-dim to 'post' and do kron properly...
assert(fct_local_inds is not None), \
"Error: gpindices has not been initialized for factor %d - cannot compute derivative!" % i
derivMx[:, fct_local_inds] += deriv
derivMx.shape = (dim, self.num_params) # necessary?
if wrt_filter is None:
return derivMx
else:
return _np.take(derivMx, wrt_filter, axis=1)
def has_nonzero_hessian(self):
"""
Whether this state vector has a non-zero Hessian with respect to its parameters.
Returns
-------
bool
"""
return False
def __str__(self):
s = "Tensor product %s vector with length %d\n" % (self._prep_or_effect, self.dim)
#ar = self.to_dense()
#s += _mt.mx_to_string(ar, width=4, prec=2)
# factors are just other States
s += " x ".join([_mt.mx_to_string(fct.to_dense(on_space='minimal'), width=4, prec=2) for fct in self.factors])
return s
|
[
"numpy.product",
"itertools.product",
"pygsti.modelmembers.states.state.State.__init__",
"numpy.take",
"numpy.kron",
"numpy.zeros",
"numpy.array",
"numpy.empty",
"pygsti.baseobjs.statespace.QubitSpace",
"numpy.concatenate",
"pygsti.modelmembers.term.RankOnePolynomialPrepTerm.create_from",
"numpy.log2",
"pygsti.baseobjs.statespace.StateSpace.from_nice_serialization"
] |
[((1791, 1826), 'pygsti.modelmembers.states.state.State.__init__', '_State.__init__', (['self', 'rep', 'evotype'], {}), '(self, rep, evotype)\n', (1806, 1826), True, 'from pygsti.modelmembers.states.state import State as _State\n'), ((2134, 2204), 'pygsti.baseobjs.statespace.StateSpace.from_nice_serialization', '_statespace.StateSpace.from_nice_serialization', (["mm_dict['state_space']"], {}), "(mm_dict['state_space'])\n", (2180, 2204), True, 'from pygsti.baseobjs import statespace as _statespace\n'), ((2764, 2804), 'numpy.empty', '_np.empty', (['self.num_params'], {'dtype': 'object'}), '(self.num_params, dtype=object)\n', (2773, 2804), True, 'import numpy as _np\n'), ((9902, 9933), 'numpy.empty', '_np.empty', (['self.num_params', '"""d"""'], {}), "(self.num_params, 'd')\n", (9911, 9933), True, 'import numpy as _np\n'), ((12509, 12547), 'numpy.zeros', '_np.zeros', (['(dim, self.num_params)', 'typ'], {}), '((dim, self.num_params), typ)\n', (12518, 12547), True, 'import numpy as _np\n'), ((6499, 6532), 'itertools.product', '_itertools.product', (['*factor_lists'], {}), '(*factor_lists)\n', (6517, 6532), True, 'import itertools as _itertools\n'), ((12471, 12488), 'numpy.product', '_np.product', (['dims'], {}), '(dims)\n', (12482, 12488), True, 'import numpy as _np\n'), ((14076, 14113), 'numpy.take', '_np.take', (['derivMx', 'wrt_filter'], {'axis': '(1)'}), '(derivMx, wrt_filter, axis=1)\n', (14084, 14113), True, 'import numpy as _np\n'), ((7437, 7544), 'pygsti.modelmembers.term.RankOnePolynomialPrepTerm.create_from', '_term.RankOnePolynomialPrepTerm.create_from', (['coeff', 'pre_rep', 'post_rep', 'self._evotype', 'self.state_space'], {}), '(coeff, pre_rep, post_rep, self.\n _evotype, self.state_space)\n', (7480, 7544), True, 'from pygsti.modelmembers import modelmember as _modelmember, term as _term\n'), ((8904, 8942), 'numpy.concatenate', '_np.concatenate', (['[t[0] for t in tapes]'], {}), '([t[0] for t in tapes])\n', (8919, 8942), True, 'import numpy as _np\n'), ((8967, 9005), 'numpy.concatenate', '_np.concatenate', (['[t[1] for t in tapes]'], {}), '([t[1] for t in tapes])\n', (8982, 9005), True, 'import numpy as _np\n'), ((9048, 9071), 'numpy.empty', '_np.empty', (['(0)', '_np.int64'], {}), '(0, _np.int64)\n', (9057, 9071), True, 'import numpy as _np\n'), ((9096, 9117), 'numpy.empty', '_np.empty', (['(0)', 'complex'], {}), '(0, complex)\n', (9105, 9117), True, 'import numpy as _np\n'), ((13260, 13289), 'numpy.kron', '_np.kron', (['pre[:, None]', 'deriv'], {}), '(pre[:, None], deriv)\n', (13268, 13289), True, 'import numpy as _np\n'), ((13631, 13661), 'numpy.kron', '_np.kron', (['deriv', 'post[:, None]'], {}), '(deriv, post[:, None])\n', (13639, 13661), True, 'import numpy as _np\n'), ((7847, 7879), 'pygsti.baseobjs.statespace.QubitSpace', '_statespace.QubitSpace', (['total_nQ'], {}), '(total_nQ)\n', (7869, 7879), True, 'from pygsti.baseobjs import statespace as _statespace\n'), ((5597, 5612), 'numpy.log2', '_np.log2', (['f.dim'], {}), '(f.dim)\n', (5605, 5612), True, 'import numpy as _np\n'), ((8631, 8654), 'numpy.array', '_np.array', (['x', '_np.int64'], {}), '(x, _np.int64)\n', (8640, 8654), True, 'import numpy as _np\n')]
|
"""Treadmill hierarchical scheduler.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import collections
import datetime
import heapq
import itertools
import logging
import operator
import sys
import time
import enum
import numpy as np
import six
_LOGGER = logging.getLogger(__name__)
MAX_PRIORITY = 100
DEFAULT_RANK = 100
_UNPLACED_RANK = sys.maxsize
DIMENSION_COUNT = None
_MAX_UTILIZATION = float('inf')
_GLOBAL_ORDER_BASE = time.mktime((2014, 1, 1, 0, 0, 0, 0, 0, 0))
# 21 day
DEFAULT_SERVER_UPTIME = 21 * 24 * 60 * 60
# 1 day
MIN_SERVER_UPTIME = 1 * 24 * 60 * 60
# 7 days
DEFAULT_MAX_APP_LEASE = 7 * 24 * 60 * 60
# Default partition threshold
DEFAULT_THRESHOLD = 0.9
# pylint: disable=C0302,too-many-lines
def _bit_count(value):
"""Returns number of bits set.
"""
count = 0
while value:
value &= value - 1
count += 1
return count
def zero_capacity():
"""Returns zero capacity vector.
"""
assert DIMENSION_COUNT is not None, 'Dimension count not set.'
return np.zeros(DIMENSION_COUNT)
def eps_capacity():
"""Returns eps capacity vector.
"""
assert DIMENSION_COUNT is not None, 'Dimension count not set.'
return np.array(
[np.finfo(float).eps for _x in range(0, DIMENSION_COUNT)]
)
def _global_order():
"""Use timestamp in nanoseconds, from Jan 1st 2014, to break tie in
scheduling conflicts for apps of the same priority, in a FIFO fashion.
"""
# Take the current EPOCH in nanosec
global_order = int(time.time() * 1000000) - _GLOBAL_ORDER_BASE
return global_order
def utilization(demand, allocated, available):
"""Calculates utilization score.
"""
return np.max(np.subtract(demand, allocated) / available)
def _all(oper, left, right):
"""Short circuit all for ndarray.
"""
return all(
oper(ai, bi)
for ai, bi in six.moves.zip(left, right)
)
def _any(oper, left, right):
"""Short circuit any for ndarray.
"""
return any(
oper(ai, bi)
for ai, bi in six.moves.zip(left, right)
)
def _any_eq(left, right):
"""Short circuit any eq for ndarray.
"""
return _any(operator.eq, left, right)
def _any_isclose(left, right):
"""Short circuit any isclose for ndarray.
"""
return _any(np.isclose, left, right)
def _any_lt(left, right):
"""Short circuit any lt for ndarray.
"""
return _any(operator.lt, left, right)
def _any_le(left, right):
"""Short circuit any le for ndarray.
"""
return _any(operator.le, left, right)
def _any_gt(left, right):
"""Short circuit any gt for ndarray.
"""
return _any(operator.gt, left, right)
def _any_ge(left, right):
"""Short circuit any ge for ndarray.
"""
return _any(operator.ge, left, right)
def _all_eq(left, right):
"""Short circuit all eq for ndarray.
"""
return _all(operator.eq, left, right)
def _all_isclose(left, right):
"""Short circuit all isclose for ndarray.
"""
return _all(np.isclose, left, right)
def _all_lt(left, right):
"""Short circuit all lt for ndarray.
"""
return _all(operator.lt, left, right)
def _all_le(left, right):
"""Short circuit all le for ndarray.
"""
return _all(operator.le, left, right)
def _all_gt(left, right):
"""Short circuit all gt for ndarray.
"""
return _all(operator.gt, left, right)
def _all_ge(left, right):
"""Short circuit all ge for ndarray.
"""
return _all(operator.ge, left, right)
class IdentityGroup:
"""Identity group.
"""
__slots__ = (
'available',
'count',
)
def __init__(self, count=0):
self.count = count
self.available = set(range(0, count))
def acquire(self):
"""Return next available identity or None.
"""
if self.available:
return self.available.pop()
else:
return None
def release(self, ident):
"""Mark identity as available.
"""
if ident < self.count:
self.available.add(ident)
def adjust(self, count):
"""Adjust identities with new count.
If count is larger, add additional identities to the set.
If count is lower, remove identities that are no longer valid.
All apps that have invalid identities will be adjusted in the
schedule cycle.
"""
if count >= self.count:
self.available ^= set(six.moves.xrange(self.count, count))
else:
self.available -= set(six.moves.xrange(count, self.count))
self.count = count
class State(enum.Enum):
"""Enumeration of node/server states.
"""
# Ready to accept new applications.
# TODO: Fix attribute name
up = 'up' # pylint: disable=invalid-name
# Applications need to be migrated.
down = 'down'
# Existing applications can stay, but will not accept new.
frozen = 'frozen'
class Affinity:
"""Model affinity and affinity limits.
"""
__slots__ = (
'name',
'limits',
'constraints',
)
def __init__(self, name, limits=None):
self.name = name
self.limits = collections.defaultdict(lambda: float('inf'))
if limits:
self.limits.update(limits)
# freeze affinity shape constraints.
self.constraints = tuple([self.name] + sorted(self.limits.values()))
class Application:
"""Application object.
"""
__slots__ = (
'global_order',
'name',
'demand',
'affinity',
'priority',
'allocation',
'data_retention_timeout',
'server',
'lease',
'identity',
'identity_group',
'identity_group_ref',
'schedule_once',
'evicted',
'placement_expiry',
'renew',
'unschedule',
'final_rank',
'final_util',
'constraints',
)
def __init__(self, name, priority, demand, affinity,
affinity_limits=None,
data_retention_timeout=0,
lease=0,
identity_group=None,
identity=None,
schedule_once=False):
self.global_order = _global_order()
self.allocation = None
self.server = None
self.name = name
self.affinity = Affinity(affinity, affinity_limits)
self.priority = priority
self.demand = np.array(demand, dtype=float)
self.data_retention_timeout = data_retention_timeout
self.lease = lease
self.identity_group = identity_group
self.identity = identity
self.identity_group_ref = None
self.schedule_once = schedule_once
self.evicted = False
self.unschedule = False
self.placement_expiry = None
self.renew = False
def shape(self):
"""Return tuple of application (constraints, demand).
Application shape is tuple of constraints that affect application
placement. Currently this includes affinity constraints and app lease
time.
"""
constraints = (self.affinity.constraints + (self.lease,))
if self.allocation:
constraints += self.allocation.constraints
return constraints, self.demand
def acquire_identity(self):
"""Try to acquire identity if belong to the group.
Returns True if successfull or if identity group is none.
"""
if not self.identity_group_ref:
return True
if self.identity is None:
self.identity = self.identity_group_ref.acquire()
_LOGGER.info('Acquired identity: %s: %s - %s',
self.name, self.identity_group, self.identity)
return self.identity is not None
def release_identity(self):
"""Release app identity.
"""
if self.identity_group_ref and self.identity is not None:
self.identity_group_ref.release(self.identity)
self.identity = None
def force_set_identity(self, identity):
"""Force identity of the app.
"""
if identity is not None:
assert self.identity_group_ref
self.identity = identity
self.identity_group_ref.available.discard(identity)
def has_identity(self):
"""Checks if app has identity if identity group is specified.
"""
return self.identity_group_ref is None or self.identity is not None
@property
def traits(self):
"""The app traits are derived from allocation.
"""
if self.allocation is None:
return 0
else:
return self.allocation.traits
@six.add_metaclass(abc.ABCMeta)
class Strategy:
"""Base class for all placement strategies.
"""
@abc.abstractmethod
def suggested_node(self):
"""Suggested node that should be tried first.
"""
pass
@abc.abstractmethod
def next_node(self):
"""Next node to try, if previous suggestion was rejected.
"""
pass
class SpreadStrategy(Strategy):
"""Spread strategy will suggest new node for each subsequent placement.
"""
__slots__ = (
'current_idx',
'node',
)
def __init__(self, node):
self.current_idx = 0
self.node = node
def suggested_node(self):
"""Suggest next node from the cycle.
"""
for _ in six.moves.xrange(0, len(self.node.children)):
if self.current_idx == len(self.node.children):
self.current_idx = 0
current = self.node.children[self.current_idx]
self.current_idx += 1
if current:
return current
# Not a single non-none node.
return None
def next_node(self):
"""Suggest next node from the cycle.
"""
return self.suggested_node()
class PackStrategy(Strategy):
"""Pack strategy will suggest same node until it is full.
"""
__slots__ = (
'current_idx',
'node',
)
def __init__(self, node):
self.current_idx = 0
self.node = node
def suggested_node(self):
"""Suggest same node as previous placement.
"""
for _ in six.moves.xrange(0, len(self.node.children)):
if self.current_idx == len(self.node.children):
self.current_idx = 0
node = self.node.children[self.current_idx]
if node:
return node
return None
def next_node(self):
"""Suggest next node from the cycle.
"""
self.current_idx += 1
return self.suggested_node()
class TraitSet:
"""Hierarchical set of traits.
"""
__slots__ = (
'self_traits',
'children_traits',
'traits',
)
def __init__(self, traits=0):
if not traits:
traits = 0
# Private traits.
assert isinstance(traits, six.integer_types)
self.self_traits = traits
# Union of all children traits.
self.children_traits = dict()
self._recalculate()
def _recalculate(self):
"""Calculate combined set of all traits.
"""
self.traits = self.self_traits
for trait in six.itervalues(self.children_traits):
self.traits |= trait
def has(self, traits):
"""Check if all traits are present.
"""
return (self.traits & traits) == traits
def add(self, child, traits):
"""Add a child with given traits.
"""
# Update children traits.
self.children_traits[child] = traits
self._recalculate()
def remove(self, child):
"""Remove child traits from the list.
"""
if child in self.children_traits:
del self.children_traits[child]
self._recalculate()
def is_same(self, other):
"""Compares own traits, ignore child.
"""
return self.self_traits == other.self_traits
class AffinityCounter:
"""Manages affinity count.
"""
__slots__ = (
'affinity_counter',
)
def __init__(self):
self.affinity_counter = collections.Counter()
class Node:
"""Abstract placement node.
"""
__slots__ = (
'name',
'level',
'free_capacity',
'parent',
'children',
'children_by_name',
'traits',
'labels',
'affinity_counters',
'valid_until',
'_state',
'_state_since',
)
def __init__(self, name, traits, level, valid_until=0):
self.name = name
self.level = level
self.free_capacity = zero_capacity()
self.parent = None
self.children = list()
self.children_by_name = dict()
self.traits = TraitSet(traits)
self.labels = set()
self.affinity_counters = collections.Counter()
self.valid_until = valid_until
self._state = State.up
self._state_since = time.time()
def empty(self):
"""Return true if there are no children.
"""
return not bool(self.children_by_name)
def children_iter(self):
"""Iterate over active children.
"""
for child in self.children:
if child:
yield child
def get_state(self):
"""Returns tuple of (state, since).
"""
return self. _state, self._state_since
def set_state(self, state, since):
"""Sets the state and time since.
"""
if self._state is not state:
self._state_since = since
self._state = state
_LOGGER.debug('state: %s - (%s, %s)',
self.name, self._state, self._state_since)
@property
def state(self):
"""Return current state.
"""
return self._state
@state.setter
def state(self, new_state):
"""Set node state and records time.
"""
self.set_state(new_state, time.time())
def add_child_traits(self, node):
"""Recursively add child traits up.
"""
self.traits.add(node.name, node.traits.traits)
if self.parent:
self.parent.remove_child_traits(self.name)
self.parent.add_child_traits(self)
def adjust_valid_until(self, child_valid_until):
"""Recursively adjust valid until time.
"""
if child_valid_until:
self.valid_until = max(self.valid_until, child_valid_until)
else:
if self.empty():
self.valid_until = 0
else:
self.valid_until = max([node.valid_until
for node in self.children_iter()])
if self.parent:
self.parent.adjust_valid_until(child_valid_until)
def remove_child_traits(self, node_name):
"""Recursively remove child traits up.
"""
self.traits.remove(node_name)
if self.parent:
self.parent.remove_child_traits(self.name)
self.parent.add_child_traits(self)
def reset_children(self):
"""Reset children to empty list.
"""
for child in self.children_iter():
child.parent = None
self.children = list()
self.children_by_name = dict()
def add_node(self, node):
"""Add child node, set the traits and propagate traits up.
"""
assert node.parent is None
assert node.name not in self.children_by_name
node.parent = self
self.children.append(node)
self.children_by_name[node.name] = node
self.add_child_traits(node)
self.increment_affinity(node.affinity_counters)
self.add_labels(node.labels)
self.adjust_valid_until(node.valid_until)
def add_labels(self, labels):
"""Recursively add labels to self and parents.
"""
self.labels.update(labels)
if self.parent:
self.parent.add_labels(self.labels)
def remove_node(self, node):
"""Remove child node and adjust the traits.
"""
assert node.name in self.children_by_name
del self.children_by_name[node.name]
for idx in six.moves.xrange(0, len(self.children)):
if self.children[idx] == node:
self.children[idx] = None
self.remove_child_traits(node.name)
self.decrement_affinity(node.affinity_counters)
self.adjust_valid_until(None)
node.parent = None
return node
def remove_node_by_name(self, nodename):
"""Removes node by name.
"""
assert nodename in self.children_by_name
return self.remove_node(self.children_by_name[nodename])
def check_app_constraints(self, app):
"""Find app placement on the node.
"""
if app.allocation is not None:
if app.allocation.label not in self.labels:
_LOGGER.info('Missing label: %s on %s', app.allocation.label,
self.name)
return False
if app.traits != 0 and not self.traits.has(app.traits):
_LOGGER.info('Missing traits: %s on %s', app.traits, self.name)
return False
if not self.check_app_affinity_limit(app):
return False
if _any_gt(app.demand, self.free_capacity):
_LOGGER.info('Not enough free capacity: %s', self.free_capacity)
return False
return True
def check_app_affinity_limit(self, app):
"""Check app affinity limits
"""
count = self.affinity_counters[app.affinity.name]
limit = app.affinity.limits[self.level]
return count < limit
def put(self, _app):
"""Abstract method, should never be called.
"""
raise Exception('Not implemented.')
def size(self, label):
"""Returns total capacity of the children.
"""
if self.empty() or label not in self.labels:
return eps_capacity()
return np.sum([
n.size(label) for n in self.children_iter()], 0)
def members(self):
"""Return set of all leaf node names.
"""
names = dict()
for node in self.children_iter():
names.update(node.members())
return names
def increment_affinity(self, counters):
"""Increment affinity counters recursively.
"""
self.affinity_counters.update(counters)
if self.parent:
self.parent.increment_affinity(counters)
def decrement_affinity(self, counters):
"""Decrement affinity counters recursively.
"""
self.affinity_counters.subtract(counters)
if self.parent:
self.parent.decrement_affinity(counters)
class Bucket(Node):
"""Collection of nodes/buckets.
"""
__slots__ = (
'affinity_strategies',
'traits',
)
_default_strategy_t = SpreadStrategy
def __init__(self, name, traits=0, level=None):
super(Bucket, self).__init__(name, traits, level)
self.affinity_strategies = dict()
self.traits = TraitSet(traits)
def set_affinity_strategy(self, affinity, strategy_t):
"""Initilaizes placement strategy for given affinity.
"""
self.affinity_strategies[affinity] = strategy_t(self)
def get_affinity_strategy(self, affinity):
"""Returns placement strategy for the affinity, defaults to spread.
"""
if affinity not in self.affinity_strategies:
self.set_affinity_strategy(affinity, Bucket._default_strategy_t)
return self.affinity_strategies[affinity]
def adjust_capacity_up(self, new_capacity):
"""Node can only increase capacity.
"""
self.free_capacity = np.maximum(self.free_capacity, new_capacity)
if self.parent:
self.parent.adjust_capacity_up(self.free_capacity)
def adjust_capacity_down(self, prev_capacity=None):
"""Called when capacity is decreased.
"""
if self.empty():
self.free_capacity = zero_capacity()
if self.parent:
self.parent.adjust_capacity_down()
else:
if prev_capacity is not None and _all_lt(prev_capacity,
self.free_capacity):
return
free_capacity = zero_capacity()
for child_node in self.children_iter():
if child_node.state is not State.up:
continue
free_capacity = np.maximum(free_capacity,
child_node.free_capacity)
# If resulting free_capacity is less the previous, we need to
# adjust the parent, otherwise, nothing needs to be done.
prev_capacity = self.free_capacity.copy()
if _any_lt(free_capacity, self.free_capacity):
self.free_capacity = free_capacity
if self.parent:
self.parent.adjust_capacity_down(prev_capacity)
def add_node(self, node):
"""Adds node to the bucket.
"""
super(Bucket, self).add_node(node)
self.adjust_capacity_up(node.free_capacity)
def remove_node(self, node):
"""Removes node from the bucket.
"""
super(Bucket, self).remove_node(node)
# if _any_isclose(self.free_capacity, node.free_capacity):
self.adjust_capacity_down(node.free_capacity)
return node
def put(self, app):
"""Try to put app on one of the nodes that belong to the bucket.
"""
# Check if it is feasible to put app on some node low in the
# hierarchy
_LOGGER.debug('bucket.put: %s => %s', app.name, self.name)
if not self.check_app_constraints(app):
return False
strategy = self.get_affinity_strategy(app.affinity.name)
node = strategy.suggested_node()
if node is None:
_LOGGER.debug('All nodes in the bucket deleted.')
return False
nodename0 = node.name
first = True
while True:
# End of iteration.
if not first and node.name == nodename0:
_LOGGER.debug('Finished iterating on: %s.', self.name)
break
first = False
_LOGGER.debug('Trying node: %s:', node.name)
if node.state is not State.up:
_LOGGER.debug('Node not up: %s, %s', node.name, node.state)
else:
if node.put(app):
return True
node = strategy.next_node()
return False
class Server(Node):
"""Server object, final app placement.
"""
__slots__ = (
'init_capacity',
'apps',
'up_since',
'presence_id',
)
def __init__(self, name, capacity, up_since=0, valid_until=0,
traits=0, label=None, presence_id=None):
super(Server, self).__init__(name, traits=traits, level='server',
valid_until=valid_until)
self.labels = set([label])
self.init_capacity = np.array(capacity, dtype=float)
self.free_capacity = self.init_capacity.copy()
self.apps = dict()
self.up_since = up_since
self.presence_id = presence_id
def __str__(self):
return 'server: %s %s' % (self.name, self.init_capacity)
def is_same(self, other):
"""Compares capacity and traits against another server.
valid_until is ignored, as server comes up after reboot will have
different valid_until value.
"""
return (self.labels == other.labels and
_all_eq(self.init_capacity, other.init_capacity) and
self.traits.is_same(other.traits))
def put(self, app):
"""Tries to put the app on the server.
"""
assert app.name not in self.apps
_LOGGER.debug('server.put: %s => %s', app.name, self.name)
if not self.check_app_lifetime(app):
return False
if not self.check_app_constraints(app):
return False
prev_capacity = self.free_capacity.copy()
self.free_capacity -= app.demand
self.apps[app.name] = app
self.increment_affinity([app.affinity.name])
app.server = self.name
if self.parent:
self.parent.adjust_capacity_down(prev_capacity)
if app.placement_expiry is None:
app.placement_expiry = time.time() + app.lease
return True
def restore(self, app, placement_expiry=None):
"""Put app back on the server, ignore app lifetime.
"""
_LOGGER.debug('server.restore: %s => %s (%s)',
app.name, self.name, placement_expiry)
lease = app.lease
# If not explicit
if placement_expiry is None:
placement_expiry = app.placement_expiry
app.lease = 0
rc = self.put(app)
app.lease = lease
app.placement_expiry = placement_expiry
return rc
def renew(self, app):
"""Try to extend the placement for app lease.
"""
can_renew = self.check_app_lifetime(app)
if can_renew:
app.placement_expiry = time.time() + app.lease
return can_renew
def check_app_lifetime(self, app):
"""Check if the app lease fits until server is rebooted.
"""
# app with 0 lease can be placed anywhere (ignore potentially
# expired servers)
if not app.lease:
return True
return time.time() + app.lease < self.valid_until
def remove(self, app_name):
"""Removes app from the server.
"""
assert app_name in self.apps
app = self.apps[app_name]
del self.apps[app_name]
app.server = None
app.evicted = True
app.unschedule = False
app.placement_expiry = None
self.free_capacity += app.demand
self.decrement_affinity([app.affinity.name])
if self.parent:
self.parent.adjust_capacity_up(self.free_capacity)
def remove_all(self):
"""Remove all apps.
"""
# iterate over copy of the keys, as we are removing them in the loop.
for appname in list(self.apps):
self.remove(appname)
def size(self, label):
"""Return server capacity.
"""
if label not in self.labels:
return eps_capacity()
return self.init_capacity
def members(self):
"""Return set of all leaf node names.
"""
return {self.name: self}
def set_state(self, state, since):
"""Change host state.
"""
if self.state is state:
return
super(Server, self).set_state(state, since)
if state == State.up:
if self.parent:
self.parent.adjust_capacity_up(self.free_capacity)
elif state in (State.down, State.frozen):
if self.parent:
self.parent.adjust_capacity_down(self.free_capacity)
else:
raise Exception('Invalid state: ' % state)
class Allocation:
"""Allocation manages queue of apps sharing same reserved capacity.
In reality allocation is tied to grn via application proid.
Applications within the allocation are organized by application priority.
Allocations are ranked, and the rank is used to globally order applications
from different allocations into global queue.
Default allocation has rank 100. Defining allocation with lower rank will
result in all it's applications to be evaluated first regardless of
utilization. This is used to model "system" applications that should be
always present regardless of utilization.
Allocation queue can be capped with max_utilization parameter. If set, it
will specify the max_utilization which will be considered for scheduling.
"""
__slots__ = (
'reserved',
'rank',
'rank_adjustment',
'traits',
'label',
'max_utilization',
'apps',
'sub_allocations',
'path',
'constraints',
)
def __init__(self, reserved=None, rank=None, traits=None,
max_utilization=None, partition=None):
self.set_reserved(reserved)
self.rank = None
self.rank_adjustment = 0
self.traits = 0
self.label = partition
self.max_utilization = _MAX_UTILIZATION
self.reserved = zero_capacity()
self.set_max_utilization(max_utilization)
self.set_traits(traits)
self.update(reserved, rank, 0)
self.apps = dict()
self.sub_allocations = dict()
self.path = []
# Freeze shape constraintes.
self.constraints = (self.label, self.traits,)
@property
def name(self):
"""Returns full allocation name.
"""
return '/'.join(self.path)
def set_reserved(self, reserved):
"""Update reserved capacity.
"""
if reserved is None:
self.reserved = zero_capacity()
elif isinstance(reserved, int):
assert reserved == 0
self.reserved = zero_capacity()
elif isinstance(reserved, float):
assert reserved == 0.0
self.reserved = zero_capacity()
elif isinstance(reserved, list):
assert len(reserved) == DIMENSION_COUNT
self.reserved = np.array(reserved, dtype=float)
elif isinstance(reserved, np.ndarray):
self.reserved = reserved
else:
assert 'Unsupported type: %r' % type(reserved)
def update(self, reserved, rank, rank_adjustment, max_utilization=None):
"""Updates allocation.
"""
if rank is not None:
self.rank = rank
else:
self.rank = DEFAULT_RANK
if rank_adjustment is not None:
self.rank_adjustment = rank_adjustment
self.set_reserved(reserved)
self.set_max_utilization(max_utilization)
def set_max_utilization(self, max_utilization):
"""Sets max_utilization, accounting for default None value.
"""
if max_utilization is not None:
self.max_utilization = max_utilization
else:
self.max_utilization = _MAX_UTILIZATION
def set_traits(self, traits):
"""Set traits, account for default None value.
"""
if not traits:
self.traits = 0
else:
self.traits = traits
def add(self, app):
"""Add application to the allocation queue.
Once added, the scheduler will make an attempt to place the app on one
of the cell nodes.
"""
# Check that there are no duplicate app names.
if app.name in self.apps:
_LOGGER.warning(
'Duplicate app on alllocation queue: %s', app.name
)
return
app.allocation = self
self.apps[app.name] = app
def remove(self, name):
"""Remove application from the allocation queue.
"""
if name in self.apps:
self.apps[name].allocation = None
del self.apps[name]
def priv_utilization_queue(self):
"""Returns tuples for sorted by global utilization.
Apps in the queue are ordered by priority, insertion order.
Adding or removing maintains invariant that apps utilization
monotonically increases as well.
Returns local prioritization queue in a tuple where first element is
utilization ratio, so that this queue is suitable for merging into
global priority queue.
"""
def _app_key(app):
"""Compares apps by priority, state, global index
"""
return (-app.priority, 0 if app.server else 1,
app.global_order, app.name)
prio_queue = sorted(six.viewvalues(self.apps), key=_app_key)
acc_demand = zero_capacity()
available = self.reserved + np.finfo(float).eps
util_before = utilization(acc_demand, self.reserved, available)
for app in prio_queue:
acc_demand = acc_demand + app.demand
util_after = utilization(acc_demand, self.reserved, available)
# Priority 0 apps are treated specially - utilization is set to
# max float.
#
# This ensures that they are at the end of the all queues.
if app.priority == 0:
util_before = _MAX_UTILIZATION
util_after = _MAX_UTILIZATION
# All things equal, already scheduled applications have priority
# over pending.
pending = 0 if app.server else 1
if util_after <= self.max_utilization - 1:
rank = self.rank
if util_before < 0:
rank -= self.rank_adjustment
else:
rank = _UNPLACED_RANK
entry = (rank, util_before, util_after, pending, app.global_order,
app)
util_before = util_after
yield entry
def utilization_queue(self, free_capacity, visitor=None):
"""Returns utilization queue including the sub-allocs.
All app queues from self and sub-allocs are merged in standard order,
and then utilization is recalculated based on total reserved capacity
of this alloc and sub-allocs combined.
The function maintains invariant that any app (self or inside sub-alloc
with utilization < 1 will remain with utilzation < 1.
"""
total_reserved = self.total_reserved()
queues = [
alloc.utilization_queue(free_capacity, visitor)
for alloc in six.itervalues(self.sub_allocations)
]
queues.append(self.priv_utilization_queue())
acc_demand = zero_capacity()
available = total_reserved + free_capacity + np.finfo(float).eps
util_before = utilization(acc_demand, total_reserved, available)
for item in heapq.merge(*queues):
rank, _u_before, _u_after, pending, order, app = item
acc_demand = acc_demand + app.demand
util_after = utilization(acc_demand, total_reserved, available)
if app.priority == 0:
util_before = _MAX_UTILIZATION
util_after = _MAX_UTILIZATION
# - lower rank allocations take precedence.
# - for same rank, utilization takes precedence
# - False < True, so for apps with same utilization we prefer
# those that already running (False == not pending)
# - Global order
entry = (rank, util_before, util_after, pending, order, app)
if visitor:
visitor(self, entry, acc_demand)
util_before = util_after
yield entry
def total_reserved(self):
"""Total reserved capacity including sub-allocs.
"""
return six.moves.reduce(
lambda acc, alloc: acc + alloc.total_reserved(),
six.itervalues(self.sub_allocations),
self.reserved
)
def add_sub_alloc(self, name, alloc):
"""Add child allocation.
"""
self.sub_allocations[name] = alloc
assert not alloc.path
alloc.path = self.path + [name]
alloc.label = self.label
def remove_sub_alloc(self, name):
"""Remove chlid allocation.
"""
if name in self.sub_allocations:
del self.sub_allocations[name]
def get_sub_alloc(self, name):
"""Return sub allocation, create empty if it does not exist.
"""
if name not in self.sub_allocations:
self.add_sub_alloc(name, Allocation())
return self.sub_allocations[name]
def all_apps(self):
"""Return all apps in allocation and sub-allocations."""
all_apps = list(six.itervalues(self.apps))
for alloc in six.itervalues(self.sub_allocations):
all_apps.extend(alloc.all_apps())
return all_apps
class Partition:
"""Cell partition.
"""
__slots__ = (
'allocation',
'max_server_uptime',
'max_lease',
'threshold',
'label',
'_reboot_buckets',
'_reboot_dates',
'_reboot_last',
)
def __init__(self, max_server_uptime=None, max_lease=None, threshold=None,
label=None, reboot_schedule=None, now=None):
self.label = label
self.allocation = Allocation(partition=label)
# Default -
if not max_server_uptime:
max_server_uptime = DEFAULT_SERVER_UPTIME
if not max_lease:
max_lease = DEFAULT_MAX_APP_LEASE
if not threshold:
threshold = DEFAULT_THRESHOLD
self.max_server_uptime = max_server_uptime
self.max_lease = max_lease
self.threshold = threshold
if not reboot_schedule:
# reboot every day
reboot_schedule = {day: (23, 59, 59) for day in range(7)}
if not now:
now = time.time()
self._reboot_dates = reboot_dates(
reboot_schedule,
start_date=datetime.date.fromtimestamp(now)
)
self._reboot_buckets = []
self._reboot_last = now
self.tick(now)
def _find_bucket(self, timestamp):
"""Try to find bucket with given timestamp.
"""
for bucket in self._reboot_buckets:
if bucket.timestamp == timestamp:
return bucket
return None
def add(self, server, timestamp=None):
"""Add server.
"""
bucket = None
if timestamp:
bucket = self._find_bucket(timestamp)
# servers with larger than max lifetime should be rebooted at
# the next opportunity
if (self._reboot_buckets[0].timestamp >
server.up_since + DEFAULT_SERVER_UPTIME):
bucket = self._reboot_buckets[0]
if not bucket:
bucket = min(reversed(self._reboot_buckets),
key=lambda b: b.cost(server))
bucket.add(server)
def remove(self, server):
"""Remove server.
"""
for bucket in self._reboot_buckets:
bucket.remove(server)
def tick(self, now):
"""Do per-tick-bookkeeping.
"""
while self._reboot_last <= now + DEFAULT_SERVER_UPTIME:
bucket = RebootBucket(next(self._reboot_dates))
self._reboot_buckets.append(bucket)
self._reboot_last = bucket.timestamp
while self._reboot_buckets[0].timestamp < now:
self._reboot_buckets.pop(0)
class PartitionDict(dict):
"""Dict that creates partitions on demand.
We use this instead of collections.defaultdict so that we can provide
the new partition with its label, to be propagated to its allocations.
"""
def __missing__(self, label):
"""Create a new partition, passing the label to its constructor.
"""
self[label] = Partition(label=label)
return self[label]
# pylint: disable=invalid-name
def reboot_dates(schedule, start_date=None):
"""Generate list of valid reboot dates.
"""
date = datetime.date.today()
if start_date:
date = start_date
while True:
weekday = date.weekday()
if weekday in schedule:
h, m, s = schedule[weekday]
yield time.mktime((date.year, date.month, date.day,
h, m, s, 0, 0, 0))
date += datetime.timedelta(days=1)
class RebootBucket:
"""Bucket of servers to be rebooted at the same time.
"""
__slots__ = (
'timestamp',
'servers',
)
def __init__(self, timestamp):
self.timestamp = timestamp
self.servers = []
def add(self, server):
"""Add server to this bucket.
"""
self.servers.append(server)
server.valid_until = self.timestamp
_LOGGER.info('Setting valid until on server: %s %s',
server.name, server.valid_until)
def remove(self, server):
"""Remove server from this bucket.
"""
try:
self.servers.remove(server)
except ValueError:
pass
def cost(self, server):
"""The cost of adding server to this bucket.
"""
if self.timestamp > server.up_since + DEFAULT_SERVER_UPTIME:
return float('inf')
if self.timestamp < server.up_since + MIN_SERVER_UPTIME:
return float('inf')
return len(self.servers)
class PlacementFeasibilityTracker:
"""Tracks similar apps placement failures."""
def __init__(self):
self.recorder = dict()
def feasible(self, app):
"""Checks if it is feasible to satisfy demand."""
constraints, demand = app.shape()
if constraints in self.recorder:
# If demand is >= than recorded failure, placement is not feasible.
if _all_ge(demand, self.recorder[constraints]):
return False
return True
def adjust(self, app):
"""Adjust info about failed placement."""
constraints, demand = app.shape()
if constraints not in self.recorder:
self.recorder[constraints] = demand
else:
if _all_le(demand, self.recorder[constraints]):
self.recorder[constraints] = demand
class Cell(Bucket):
"""Top level node.
"""
__slots__ = (
'partitions',
'next_event_at',
'apps',
'identity_groups',
)
def __init__(self, name):
super(Cell, self).__init__(name, traits=0, level='cell')
self.partitions = PartitionDict()
self.apps = dict()
self.identity_groups = collections.defaultdict(IdentityGroup)
self.next_event_at = np.inf
def add_app(self, allocation, app):
"""Adds application to the scheduled list.
"""
assert allocation is not None
if app.allocation:
app.allocation.remove(app.name)
allocation.add(app)
self.apps[app.name] = app
if app.identity_group:
app.identity_group_ref = self.identity_groups[app.identity_group]
def remove_app(self, appname):
"""Remove app from scheduled list.
"""
if appname not in self.apps:
return
app = self.apps[appname]
servers = self.members()
if app.server in servers:
servers[app.server].remove(app.name)
if app.allocation:
app.allocation.remove(app.name)
app.release_identity()
del self.apps[appname]
def configure_identity_group(self, name, count):
"""Add identity group to the cell.
"""
if name not in self.identity_groups:
self.identity_groups[name] = IdentityGroup(count)
else:
self.identity_groups[name].adjust(count)
def remove_identity_group(self, name):
"""Remove identity group.
"""
ident_group = self.identity_groups.get(name)
if ident_group:
in_use = False
for app in six.itervalues(self.apps):
if app.identity_group_ref == ident_group:
ident_group.adjust(0)
in_use = True
break
if not in_use:
del self.identity_groups[name]
def _fix_invalid_placements(self, queue, servers):
"""If app is placed on non-existent server, set server to None.
"""
for app in queue:
if app.server and app.server not in servers:
app.server = None
app.evicted = True
app.release_identity()
def _record_rank_and_util(self, queue):
"""Set final rank and utilization for all apps in the queue.
"""
for item in queue:
rank = item[0]
util = item[1]
app = item[-1]
app.final_rank = rank
app.final_util = util
def _fix_invalid_identities(self, queue, servers):
"""Check that app identity is valid for given identity group.
"""
for app in queue:
if app.identity is not None and app.identity_group_ref is not None:
# Can happen if identity group was adjusted to lower count.
if app.identity >= app.identity_group_ref.count:
# Can't release identity as it is invalid.
_LOGGER.info('Identity exceeds limit: %s - %s, limit %s',
app.name, app.identity,
app.identity_group_ref.count)
app.identity = None
# Invalidate any existing placement.
if app.server:
servers[app.server].remove(app.name)
def _handle_inactive_servers(self, servers):
"""Migrate apps from inactive servers.
"""
self.next_event_at = np.inf
for server in six.itervalues(servers):
state, since = server.get_state()
to_be_moved = []
if state == State.down:
_LOGGER.debug('Server state is down: %s', server.name)
for name, app in six.iteritems(server.apps):
if app.data_retention_timeout is None:
expires_at = 0
else:
expires_at = since + app.data_retention_timeout
if expires_at <= time.time():
_LOGGER.debug('Expired placement: %s', name)
app.release_identity()
to_be_moved.append(name)
else:
_LOGGER.debug('Keep placement: %s until %s',
name, expires_at)
self.next_event_at = min(expires_at,
self.next_event_at)
elif state == State.frozen:
_LOGGER.debug('Server state is frozen: %s', server.name)
to_be_moved = [
name for name, app in six.iteritems(server.apps)
if app.unschedule
]
for name in to_be_moved:
server.remove(name)
def _find_placements(self, queue, servers):
"""Run the queue and find placements.
"""
# TODO: refactor to get rid of warnings.
#
# pylint: disable=too-many-branches,too-many-statements
#
# At this point, if app.server is defined, it points to attached
# server.
evicted = dict()
reversed_queue = queue[::-1]
placement_tracker = PlacementFeasibilityTracker()
for app in queue:
_LOGGER.debug('scheduling %s', app.name)
if app.final_rank == _UNPLACED_RANK:
if app.server:
assert app.server in servers
assert app.has_identity()
servers[app.server].remove(app.name)
app.release_identity()
continue
restore = {}
if app.renew:
assert app.server
assert app.has_identity()
assert app.server in servers
server = servers[app.server]
if not server.renew(app):
# Save information that will be used to restore placement
# in case renewal fails.
_LOGGER.debug('Cannot renew app %s on server %s',
app.name, app.server)
restore['server'] = server
restore['placement_expiry'] = app.placement_expiry
server.remove(app.name)
# At this point app was either renewed on the same server, or
# temporarily removed from server if renew failed.
#
# If placement will be found, renew should remain False. If
# placement will not be found, renew will be set to True when
# placement is restored to the server it was running.
app.renew = False
if app.server:
assert app.server in servers
assert app.has_identity()
continue
assert app.server is None
if not app.acquire_identity():
_LOGGER.info('Unable to acquire identity: %s, %s', app.name,
app.identity_group)
continue
# If app was evicted before, try to restore to the same node.
if app in evicted:
assert app.has_identity()
evicted_from, app_expiry = evicted[app]
del evicted[app]
if evicted_from.restore(app, app_expiry):
app.evicted = False
continue
assert app.server is None
if app.schedule_once and app.evicted:
continue
# Check if placement is feasible.
if not placement_tracker.feasible(app):
_LOGGER.info(
'Placement not feasible: %s %r', app.name, app.shape()
)
continue
if not self.put(app):
# There is not enough capacity, from the end of the queue,
# evict apps, freeing capacity.
for evicted_app in reversed_queue:
# We reached the app we can't place
if evicted_app == app:
break
# The app is not yet placed, skip
if not evicted_app.server:
continue
assert evicted_app.server in servers
evicted_app_server = servers[evicted_app.server]
# Do not consider servers that are not up.
if evicted_app_server.state is not State.up:
continue
evicted[evicted_app] = (evicted_app_server,
evicted_app.placement_expiry)
evicted_app_server.remove(evicted_app.name)
# TODO: we need to check affinity limit constraints on
# each level, all the way to the top.
if evicted_app_server.put(app):
break
# Placement failed.
if not app.server:
# If renewal attempt failed, restore previous placement and
# expiry date.
if restore:
restore['server'].restore(app, restore['placement_expiry'])
app.renew = True
else:
app.release_identity()
placement_tracker.adjust(app)
def schedule_alloc(self, allocation, servers):
"""Run the scheduler for given allocation.
"""
begin = time.time()
size = self.size(allocation.label)
util_queue = list(allocation.utilization_queue(size))
self._record_rank_and_util(util_queue)
queue = [item[-1] for item in util_queue]
self._find_placements(queue, servers)
_LOGGER.info('Scheduled %s (%d) apps in %r',
allocation.label,
len(queue),
time.time() - begin)
def schedule(self):
"""Run the scheduler.
"""
begin = time.time()
all_apps = []
for label, partition in six.iteritems(self.partitions):
allocation = partition.allocation
all_apps.extend(allocation.all_apps())
before = [(app.name, app.server, app.placement_expiry)
for app in all_apps]
servers = self.members()
self._fix_invalid_placements(six.viewvalues(self.apps), servers)
self._handle_inactive_servers(servers)
self._fix_invalid_identities(six.viewvalues(self.apps), servers)
for label, partition in six.iteritems(self.partitions):
allocation = partition.allocation
allocation.label = label
self.schedule_alloc(allocation, servers)
after = [(app.server, app.placement_expiry)
for app in all_apps]
placement = [
tuple(itertools.chain(b, a))
for b, a in six.moves.zip(before, after)
]
for appname, s_before, exp_before, s_after, exp_after in placement:
if s_before != s_after:
_LOGGER.info('New placement: %s - %s => %s',
appname, s_before, s_after)
else:
if exp_before != exp_after:
_LOGGER.info('Renewed: %s [%s] - %s => %s',
appname, s_before, exp_before, exp_after)
_LOGGER.info('Total scheduler time for %s apps: %r (sec)',
len(all_apps),
time.time() - begin)
return placement
def resolve_reboot_conflicts(self):
"""Adjust server exipiration time to avoid conflicts.
"""
pass
def dumps(cell):
"""Serializes cell to string.
"""
del cell
return ''
def loads(data):
"""Loads scheduler from string.
"""
del data
assert False, 'not implemented.'
|
[
"logging.getLogger",
"itertools.chain",
"six.itervalues",
"heapq.merge",
"numpy.array",
"datetime.date.fromtimestamp",
"six.moves.xrange",
"datetime.timedelta",
"numpy.subtract",
"numpy.maximum",
"six.moves.zip",
"six.viewvalues",
"numpy.finfo",
"datetime.date.today",
"time.time",
"time.mktime",
"six.add_metaclass",
"collections.Counter",
"numpy.zeros",
"collections.defaultdict",
"six.iteritems"
] |
[((377, 404), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (394, 404), False, 'import logging\n'), ((551, 594), 'time.mktime', 'time.mktime', (['(2014, 1, 1, 0, 0, 0, 0, 0, 0)'], {}), '((2014, 1, 1, 0, 0, 0, 0, 0, 0))\n', (562, 594), False, 'import time\n'), ((8874, 8904), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (8891, 8904), False, 'import six\n'), ((1147, 1172), 'numpy.zeros', 'np.zeros', (['DIMENSION_COUNT'], {}), '(DIMENSION_COUNT)\n', (1155, 1172), True, 'import numpy as np\n'), ((39765, 39786), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (39784, 39786), False, 'import datetime\n'), ((6599, 6628), 'numpy.array', 'np.array', (['demand'], {'dtype': 'float'}), '(demand, dtype=float)\n', (6607, 6628), True, 'import numpy as np\n'), ((11480, 11516), 'six.itervalues', 'six.itervalues', (['self.children_traits'], {}), '(self.children_traits)\n', (11494, 11516), False, 'import six\n'), ((12396, 12417), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (12415, 12417), False, 'import collections\n'), ((13106, 13127), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (13125, 13127), False, 'import collections\n'), ((13226, 13237), 'time.time', 'time.time', ([], {}), '()\n', (13235, 13237), False, 'import time\n'), ((20062, 20106), 'numpy.maximum', 'np.maximum', (['self.free_capacity', 'new_capacity'], {}), '(self.free_capacity, new_capacity)\n', (20072, 20106), True, 'import numpy as np\n'), ((23473, 23504), 'numpy.array', 'np.array', (['capacity'], {'dtype': 'float'}), '(capacity, dtype=float)\n', (23481, 23504), True, 'import numpy as np\n'), ((34511, 34531), 'heapq.merge', 'heapq.merge', (['*queues'], {}), '(*queues)\n', (34522, 34531), False, 'import heapq\n'), ((36447, 36483), 'six.itervalues', 'six.itervalues', (['self.sub_allocations'], {}), '(self.sub_allocations)\n', (36461, 36483), False, 'import six\n'), ((40086, 40112), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (40104, 40112), False, 'import datetime\n'), ((42352, 42390), 'collections.defaultdict', 'collections.defaultdict', (['IdentityGroup'], {}), '(IdentityGroup)\n', (42375, 42390), False, 'import collections\n'), ((45645, 45668), 'six.itervalues', 'six.itervalues', (['servers'], {}), '(servers)\n', (45659, 45668), False, 'import six\n'), ((51700, 51711), 'time.time', 'time.time', ([], {}), '()\n', (51709, 51711), False, 'import time\n'), ((52213, 52224), 'time.time', 'time.time', ([], {}), '()\n', (52222, 52224), False, 'import time\n'), ((52280, 52310), 'six.iteritems', 'six.iteritems', (['self.partitions'], {}), '(self.partitions)\n', (52293, 52310), False, 'import six\n'), ((52772, 52802), 'six.iteritems', 'six.iteritems', (['self.partitions'], {}), '(self.partitions)\n', (52785, 52802), False, 'import six\n'), ((1820, 1850), 'numpy.subtract', 'np.subtract', (['demand', 'allocated'], {}), '(demand, allocated)\n', (1831, 1850), True, 'import numpy as np\n'), ((14223, 14234), 'time.time', 'time.time', ([], {}), '()\n', (14232, 14234), False, 'import time\n'), ((32345, 32370), 'six.viewvalues', 'six.viewvalues', (['self.apps'], {}), '(self.apps)\n', (32359, 32370), False, 'import six\n'), ((35551, 35587), 'six.itervalues', 'six.itervalues', (['self.sub_allocations'], {}), '(self.sub_allocations)\n', (35565, 35587), False, 'import six\n'), ((36399, 36424), 'six.itervalues', 'six.itervalues', (['self.apps'], {}), '(self.apps)\n', (36413, 36424), False, 'import six\n'), ((37584, 37595), 'time.time', 'time.time', ([], {}), '()\n', (37593, 37595), False, 'import time\n'), ((43744, 43769), 'six.itervalues', 'six.itervalues', (['self.apps'], {}), '(self.apps)\n', (43758, 43769), False, 'import six\n'), ((52583, 52608), 'six.viewvalues', 'six.viewvalues', (['self.apps'], {}), '(self.apps)\n', (52597, 52608), False, 'import six\n'), ((52703, 52728), 'six.viewvalues', 'six.viewvalues', (['self.apps'], {}), '(self.apps)\n', (52717, 52728), False, 'import six\n'), ((1336, 1351), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1344, 1351), True, 'import numpy as np\n'), ((1640, 1651), 'time.time', 'time.time', ([], {}), '()\n', (1649, 1651), False, 'import time\n'), ((2000, 2026), 'six.moves.zip', 'six.moves.zip', (['left', 'right'], {}), '(left, right)\n', (2013, 2026), False, 'import six\n'), ((2169, 2195), 'six.moves.zip', 'six.moves.zip', (['left', 'right'], {}), '(left, right)\n', (2182, 2195), False, 'import six\n'), ((4600, 4635), 'six.moves.xrange', 'six.moves.xrange', (['self.count', 'count'], {}), '(self.count, count)\n', (4616, 4635), False, 'import six\n'), ((4685, 4720), 'six.moves.xrange', 'six.moves.xrange', (['count', 'self.count'], {}), '(count, self.count)\n', (4701, 4720), False, 'import six\n'), ((20853, 20904), 'numpy.maximum', 'np.maximum', (['free_capacity', 'child_node.free_capacity'], {}), '(free_capacity, child_node.free_capacity)\n', (20863, 20904), True, 'import numpy as np\n'), ((24844, 24855), 'time.time', 'time.time', ([], {}), '()\n', (24853, 24855), False, 'import time\n'), ((25613, 25624), 'time.time', 'time.time', ([], {}), '()\n', (25622, 25624), False, 'import time\n'), ((25943, 25954), 'time.time', 'time.time', ([], {}), '()\n', (25952, 25954), False, 'import time\n'), ((32460, 32475), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (32468, 32475), True, 'import numpy as np\n'), ((34205, 34241), 'six.itervalues', 'six.itervalues', (['self.sub_allocations'], {}), '(self.sub_allocations)\n', (34219, 34241), False, 'import six\n'), ((34397, 34412), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (34405, 34412), True, 'import numpy as np\n'), ((37692, 37724), 'datetime.date.fromtimestamp', 'datetime.date.fromtimestamp', (['now'], {}), '(now)\n', (37719, 37724), False, 'import datetime\n'), ((39973, 40037), 'time.mktime', 'time.mktime', (['(date.year, date.month, date.day, h, m, s, 0, 0, 0)'], {}), '((date.year, date.month, date.day, h, m, s, 0, 0, 0))\n', (39984, 40037), False, 'import time\n'), ((45886, 45912), 'six.iteritems', 'six.iteritems', (['server.apps'], {}), '(server.apps)\n', (45899, 45912), False, 'import six\n'), ((52109, 52120), 'time.time', 'time.time', ([], {}), '()\n', (52118, 52120), False, 'import time\n'), ((53072, 53093), 'itertools.chain', 'itertools.chain', (['b', 'a'], {}), '(b, a)\n', (53087, 53093), False, 'import itertools\n'), ((53119, 53147), 'six.moves.zip', 'six.moves.zip', (['before', 'after'], {}), '(before, after)\n', (53132, 53147), False, 'import six\n'), ((53715, 53726), 'time.time', 'time.time', ([], {}), '()\n', (53724, 53726), False, 'import time\n'), ((29858, 29889), 'numpy.array', 'np.array', (['reserved'], {'dtype': 'float'}), '(reserved, dtype=float)\n', (29866, 29889), True, 'import numpy as np\n'), ((46148, 46159), 'time.time', 'time.time', ([], {}), '()\n', (46157, 46159), False, 'import time\n'), ((46794, 46820), 'six.iteritems', 'six.iteritems', (['server.apps'], {}), '(server.apps)\n', (46807, 46820), False, 'import six\n')]
|
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
from tensorflow.contrib import learn
from input_helpers import InputHelper
# Parameters
# ==================================================
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_string("eval_filepath", "match_valid.tsv", "Evaluate on this data (Default: None)")
tf.flags.DEFINE_string("vocab_filepath", "runs/1479874609/checkpoints/vocab", "Load training time vocabulary (Default: None)")
tf.flags.DEFINE_string("model", "runs/1479874609/checkpoints/model-32000", "Load trained model checkpoint (Default: None)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
if FLAGS.eval_filepath==None or FLAGS.vocab_filepath==None or FLAGS.model==None :
print("Eval or Vocab filepaths are empty.")
exit()
# load data and map id-transform based on training time vocabulary
inpH = InputHelper()
x1_test,x2_test,y_test = inpH.getTestDataSet(FLAGS.eval_filepath, FLAGS.vocab_filepath, 30)
print("\nEvaluating...\n")
# Evaluation
# ==================================================
checkpoint_file = FLAGS.model
print(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
sess.run(tf.initialize_all_variables())
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x1 = graph.get_operation_by_name("input_x1").outputs[0]
input_x2 = graph.get_operation_by_name("input_x2").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/distance").outputs[0]
accuracy = graph.get_operation_by_name("accuracy/accuracy").outputs[0]
sim = graph.get_operation_by_name("accuracy/temp_sim").outputs[0]
#emb = graph.get_operation_by_name("embedding/W").outputs[0]
#embedded_chars = tf.nn.embedding_lookup(emb,input_x)
# Generate batches for one epoch
batches = inpH.batch_iter(list(zip(x1_test,x2_test,y_test)), 2*FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
all_d=[]
for db in batches:
x1_dev_b,x2_dev_b,y_dev_b = zip(*db)
batch_predictions, batch_acc, sim = sess.run([predictions,accuracy,sim], {input_x1: x1_dev_b, input_x2: x2_dev_b, input_y:y_dev_b, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
print(batch_predictions)
all_d = np.concatenate([all_d, sim])
print("DEV acc {}".format(batch_acc))
for ex in all_predictions:
print(ex)
correct_predictions = float(np.mean(all_d == y_test))
print("Accuracy: {:g}".format(correct_predictions))
|
[
"tensorflow.flags.DEFINE_string",
"tensorflow.Graph",
"tensorflow.ConfigProto",
"tensorflow.initialize_all_variables",
"numpy.mean",
"tensorflow.flags.DEFINE_boolean",
"tensorflow.Session",
"numpy.concatenate",
"tensorflow.flags.DEFINE_integer",
"input_helpers.InputHelper"
] |
[((265, 334), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""Batch Size (default: 64)"""'], {}), "('batch_size', 64, 'Batch Size (default: 64)')\n", (288, 334), True, 'import tensorflow as tf\n'), ((335, 425), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""checkpoint_dir"""', '""""""', '"""Checkpoint directory from training run"""'], {}), "('checkpoint_dir', '',\n 'Checkpoint directory from training run')\n", (357, 425), True, 'import tensorflow as tf\n'), ((422, 525), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""eval_filepath"""', '"""match_valid.tsv"""', '"""Evaluate on this data (Default: None)"""'], {}), "('eval_filepath', 'match_valid.tsv',\n 'Evaluate on this data (Default: None)')\n", (444, 525), True, 'import tensorflow as tf\n'), ((522, 656), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""vocab_filepath"""', '"""runs/1479874609/checkpoints/vocab"""', '"""Load training time vocabulary (Default: None)"""'], {}), "('vocab_filepath',\n 'runs/1479874609/checkpoints/vocab',\n 'Load training time vocabulary (Default: None)')\n", (544, 656), True, 'import tensorflow as tf\n'), ((649, 776), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""model"""', '"""runs/1479874609/checkpoints/model-32000"""', '"""Load trained model checkpoint (Default: None)"""'], {}), "('model', 'runs/1479874609/checkpoints/model-32000',\n 'Load trained model checkpoint (Default: None)')\n", (671, 776), True, 'import tensorflow as tf\n'), ((792, 887), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""allow_soft_placement"""', '(True)', '"""Allow device soft device placement"""'], {}), "('allow_soft_placement', True,\n 'Allow device soft device placement')\n", (815, 887), True, 'import tensorflow as tf\n'), ((884, 977), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Log placement of ops on devices"""'], {}), "('log_device_placement', False,\n 'Log placement of ops on devices')\n", (907, 977), True, 'import tensorflow as tf\n'), ((1367, 1380), 'input_helpers.InputHelper', 'InputHelper', ([], {}), '()\n', (1378, 1380), False, 'from input_helpers import InputHelper\n'), ((1629, 1639), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1637, 1639), True, 'import tensorflow as tf\n'), ((1684, 1800), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'FLAGS.allow_soft_placement', 'log_device_placement': 'FLAGS.log_device_placement'}), '(allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n', (1698, 1800), True, 'import tensorflow as tf\n'), ((1821, 1852), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (1831, 1852), True, 'import tensorflow as tf\n'), ((2034, 2063), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2061, 2063), True, 'import tensorflow as tf\n'), ((3374, 3426), 'numpy.concatenate', 'np.concatenate', (['[all_predictions, batch_predictions]'], {}), '([all_predictions, batch_predictions])\n', (3388, 3426), True, 'import numpy as np\n'), ((3484, 3512), 'numpy.concatenate', 'np.concatenate', (['[all_d, sim]'], {}), '([all_d, sim])\n', (3498, 3512), True, 'import numpy as np\n'), ((3657, 3681), 'numpy.mean', 'np.mean', (['(all_d == y_test)'], {}), '(all_d == y_test)\n', (3664, 3681), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from keras import regularizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
from keras.preprocessing import image
import os
import numpy as np
import matplotlib.pyplot as plt
# defining classes
def soil(result):
soil_type=""
if result[0]==2:
soil_type="Red soil"
elif result[0]==1:
soil_type="Black soil"
else:
soil_type="Alluvial soil"
return soil_type
# Adding dataset paths
PATH = 'new_datasets'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
test_dir = os.path.join(PATH, 'test')
train_red_dir = os.path.join(train_dir, 'Red_soil')
validation_red_dir = os.path.join(validation_dir, 'Red_soil')
train_black_dir = os.path.join(train_dir, 'Black_soil')
validation_black_dir = os.path.join(validation_dir, 'Black_soil')
train_all_dir = os.path.join(train_dir, 'Alluvial_soil')
validation_all_dir = os.path.join(validation_dir, 'Alluvial_soil')
num_soil_tr = len(os.listdir(train_red_dir)) + len(os.listdir(train_black_dir)) +len(os.listdir(train_all_dir))
num_soil_val = len(os.listdir(validation_red_dir)) + len(os.listdir(validation_black_dir)) + len((os.listdir(validation_all_dir)))
print("Total training images = ",num_soil_tr)
print("Total validation images = ",num_soil_val)
# hyperparameters
batch_size = 100
epochs = 15
IMG_HEIGHT = 128
IMG_WIDTH = 128
classes_num=3
# data generators
train_image_generator = ImageDataGenerator(rescale=1./255)
validation_image_generator = ImageDataGenerator(rescale=1./255)
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical')
val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
directory=validation_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
shuffle=True,
class_mode='categorical')
# defining the model
model = Sequential([
Conv2D(16, 5, activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.2),
Conv2D(32, 5, activation='relu'),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.2),
Conv2D(64, 5, activation='relu'),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.3),
Flatten(),
Dense(32, activation='relu'),
Dense(classes_num, activation='softmax')
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit_generator(
train_data_gen,
steps_per_epoch= num_soil_tr// batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=num_soil_val // batch_size
)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
# training and validation graphs
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
model.save('new_soil_classify.h5')
# for testing trained model with images differnent class
image_path="red.jpg"
img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img)
img = np.expand_dims(img, axis=0)
result=model.predict_classes(img)
plt.title(result[0])
plt.show()
image_path1="black.jpg"
img1 = image.load_img(image_path1, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img1)
img1 = np.expand_dims(img1, axis=0)
result=model.predict_classes(img1)
plt.title(result[0])
plt.show()
image_path="all.jpg"
img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img)
img = np.expand_dims(img, axis=0)
result=model.predict_classes(img)
plt.title(result[0])
plt.show()
|
[
"matplotlib.pyplot.imshow",
"os.listdir",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten",
"matplotlib.pyplot.plot",
"os.path.join",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"keras.preprocessing.image.load_img",
"tensorflow.keras.layers.Dropout",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Dense",
"numpy.expand_dims",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((834, 861), 'os.path.join', 'os.path.join', (['PATH', '"""train"""'], {}), "(PATH, 'train')\n", (846, 861), False, 'import os\n'), ((879, 911), 'os.path.join', 'os.path.join', (['PATH', '"""validation"""'], {}), "(PATH, 'validation')\n", (891, 911), False, 'import os\n'), ((923, 949), 'os.path.join', 'os.path.join', (['PATH', '"""test"""'], {}), "(PATH, 'test')\n", (935, 949), False, 'import os\n'), ((967, 1002), 'os.path.join', 'os.path.join', (['train_dir', '"""Red_soil"""'], {}), "(train_dir, 'Red_soil')\n", (979, 1002), False, 'import os\n'), ((1024, 1064), 'os.path.join', 'os.path.join', (['validation_dir', '"""Red_soil"""'], {}), "(validation_dir, 'Red_soil')\n", (1036, 1064), False, 'import os\n'), ((1084, 1121), 'os.path.join', 'os.path.join', (['train_dir', '"""Black_soil"""'], {}), "(train_dir, 'Black_soil')\n", (1096, 1121), False, 'import os\n'), ((1145, 1187), 'os.path.join', 'os.path.join', (['validation_dir', '"""Black_soil"""'], {}), "(validation_dir, 'Black_soil')\n", (1157, 1187), False, 'import os\n'), ((1205, 1245), 'os.path.join', 'os.path.join', (['train_dir', '"""Alluvial_soil"""'], {}), "(train_dir, 'Alluvial_soil')\n", (1217, 1245), False, 'import os\n'), ((1267, 1312), 'os.path.join', 'os.path.join', (['validation_dir', '"""Alluvial_soil"""'], {}), "(validation_dir, 'Alluvial_soil')\n", (1279, 1312), False, 'import os\n'), ((1793, 1830), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (1811, 1830), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((1858, 1895), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (1876, 1895), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3744, 3770), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3754, 3770), True, 'import matplotlib.pyplot as plt\n'), ((3771, 3791), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3782, 3791), True, 'import matplotlib.pyplot as plt\n'), ((3792, 3846), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'acc'], {'label': '"""Training Accuracy"""'}), "(epochs_range, acc, label='Training Accuracy')\n", (3800, 3846), True, 'import matplotlib.pyplot as plt\n'), ((3847, 3907), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_acc'], {'label': '"""Validation Accuracy"""'}), "(epochs_range, val_acc, label='Validation Accuracy')\n", (3855, 3907), True, 'import matplotlib.pyplot as plt\n'), ((3908, 3937), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3918, 3937), True, 'import matplotlib.pyplot as plt\n'), ((3938, 3983), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), "('Training and Validation Accuracy')\n", (3947, 3983), True, 'import matplotlib.pyplot as plt\n'), ((3985, 4005), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3996, 4005), True, 'import matplotlib.pyplot as plt\n'), ((4006, 4057), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'loss'], {'label': '"""Training Loss"""'}), "(epochs_range, loss, label='Training Loss')\n", (4014, 4057), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4115), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_loss'], {'label': '"""Validation Loss"""'}), "(epochs_range, val_loss, label='Validation Loss')\n", (4066, 4115), True, 'import matplotlib.pyplot as plt\n'), ((4116, 4145), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4126, 4145), True, 'import matplotlib.pyplot as plt\n'), ((4146, 4187), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (4155, 4187), True, 'import matplotlib.pyplot as plt\n'), ((4188, 4198), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4196, 4198), True, 'import matplotlib.pyplot as plt\n'), ((4323, 4386), 'keras.preprocessing.image.load_img', 'image.load_img', (['image_path'], {'target_size': '(IMG_HEIGHT, IMG_WIDTH)'}), '(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))\n', (4337, 4386), False, 'from keras.preprocessing import image\n'), ((4387, 4402), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4397, 4402), True, 'import matplotlib.pyplot as plt\n'), ((4409, 4436), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (4423, 4436), True, 'import numpy as np\n'), ((4471, 4491), 'matplotlib.pyplot.title', 'plt.title', (['result[0]'], {}), '(result[0])\n', (4480, 4491), True, 'import matplotlib.pyplot as plt\n'), ((4492, 4502), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4500, 4502), True, 'import matplotlib.pyplot as plt\n'), ((4536, 4600), 'keras.preprocessing.image.load_img', 'image.load_img', (['image_path1'], {'target_size': '(IMG_HEIGHT, IMG_WIDTH)'}), '(image_path1, target_size=(IMG_HEIGHT, IMG_WIDTH))\n', (4550, 4600), False, 'from keras.preprocessing import image\n'), ((4601, 4617), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img1'], {}), '(img1)\n', (4611, 4617), True, 'import matplotlib.pyplot as plt\n'), ((4625, 4653), 'numpy.expand_dims', 'np.expand_dims', (['img1'], {'axis': '(0)'}), '(img1, axis=0)\n', (4639, 4653), True, 'import numpy as np\n'), ((4689, 4709), 'matplotlib.pyplot.title', 'plt.title', (['result[0]'], {}), '(result[0])\n', (4698, 4709), True, 'import matplotlib.pyplot as plt\n'), ((4710, 4720), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4718, 4720), True, 'import matplotlib.pyplot as plt\n'), ((4749, 4812), 'keras.preprocessing.image.load_img', 'image.load_img', (['image_path'], {'target_size': '(IMG_HEIGHT, IMG_WIDTH)'}), '(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))\n', (4763, 4812), False, 'from keras.preprocessing import image\n'), ((4813, 4828), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4823, 4828), True, 'import matplotlib.pyplot as plt\n'), ((4835, 4862), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (4849, 4862), True, 'import numpy as np\n'), ((4897, 4917), 'matplotlib.pyplot.title', 'plt.title', (['result[0]'], {}), '(result[0])\n', (4906, 4917), True, 'import matplotlib.pyplot as plt\n'), ((4918, 4928), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4926, 4928), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1424), 'os.listdir', 'os.listdir', (['train_all_dir'], {}), '(train_all_dir)\n', (1409, 1424), False, 'import os\n'), ((1524, 1554), 'os.listdir', 'os.listdir', (['validation_all_dir'], {}), '(validation_all_dir)\n', (1534, 1554), False, 'import os\n'), ((2783, 2855), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(5)'], {'activation': '"""relu"""', 'input_shape': '(IMG_HEIGHT, IMG_WIDTH, 3)'}), "(16, 5, activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3))\n", (2789, 2855), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((2861, 2891), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (2873, 2891), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((2897, 2909), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2904, 2909), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((2915, 2947), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(5)'], {'activation': '"""relu"""'}), "(32, 5, activation='relu')\n", (2921, 2947), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((2953, 2983), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (2965, 2983), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((2989, 3001), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2996, 3001), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3007, 3039), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(5)'], {'activation': '"""relu"""'}), "(64, 5, activation='relu')\n", (3013, 3039), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3045, 3075), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (3057, 3075), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3081, 3093), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (3088, 3093), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3099, 3108), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3106, 3108), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3114, 3142), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (3119, 3142), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3148, 3188), 'tensorflow.keras.layers.Dense', 'Dense', (['classes_num'], {'activation': '"""softmax"""'}), "(classes_num, activation='softmax')\n", (3153, 3188), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((1332, 1357), 'os.listdir', 'os.listdir', (['train_red_dir'], {}), '(train_red_dir)\n', (1342, 1357), False, 'import os\n'), ((1365, 1392), 'os.listdir', 'os.listdir', (['train_black_dir'], {}), '(train_black_dir)\n', (1375, 1392), False, 'import os\n'), ((1445, 1475), 'os.listdir', 'os.listdir', (['validation_red_dir'], {}), '(validation_red_dir)\n', (1455, 1475), False, 'import os\n'), ((1483, 1515), 'os.listdir', 'os.listdir', (['validation_black_dir'], {}), '(validation_black_dir)\n', (1493, 1515), False, 'import os\n')]
|
"""Machine Learning"""
import importlib
import numpy as np
import pandas as pd
import json
from jsonschema import validate
from sklearn.pipeline import make_pipeline
from timeflux.core.node import Node
from timeflux.core.exceptions import ValidationError, WorkerInterrupt
from timeflux.helpers.background import Task
from timeflux.helpers.port import make_event, match_events, get_meta
from timeflux.helpers.clock import now, min_time, max_time
# Statuses
IDLE = 0
ACCUMULATING = 1
FITTING = 2
READY = 3
class Pipeline(Node):
"""Fit, transform and predict.
Training on continuous data is always unsupervised.
Training on epoched data can either be supervised or unsupervised.
If fit is `False`, input events are ignored, and initital training is not performed.
Automatically set to False if mode is either 'fit_predict' or fit_transform'.
Automatically set to True if mode is either 'predict', 'predict_proba' or 'predict_log_proba'.
Attributes:
i (Port): Continuous data input, expects DataFrame.
i_* (Port): Epoched data input, expects DataFrame.
i_training (Port): Continuous training data input, expects DataFrame.
i_training_* (Port): Epoched training data input, expects DataFrame.
i_events (Port): Event input, expects DataFrame.
o (Port): Continuous data output, provides DataFrame.
o_* (Port): Epoched data output, provides DataFrame.
o_events (Port): Event output, provides DataFrame.
Args:
steps (dict): Pipeline steps and settings
fit (bool):
mode ('predict'|'predict_proba'|'predict_log_proba'|'transform'|'fit_predict'|'fit_transform'):
meta_label (str|tuple|None):
event_start_accumulation (str):
event_stop_accumulation (str):
event_start_training (str):
event_reset (str):
buffer_size (str):
passthrough (bool):
resample (bool):
resample_direction ('right'|'left'|'both'):
resample_rate (None|float):
model: Load a pickle model - NOT IMPLEMENTED
cv: Cross-validation - NOT IMPLEMENTED
"""
def __init__(
self,
steps,
fit=True,
mode="predict",
meta_label=("epoch", "context", "target"),
event_start_accumulation="accumulation_starts",
event_stop_accumulation="accumulation_stops",
event_start_training="training_starts",
event_reset=None,
buffer_size="5s",
passthrough=False,
resample=False,
resample_direction="right",
resample_rate=None,
model=None,
cv=None,
use_task = True,
):
# TODO: validation
# TODO: model loading from file
# TODO: cross-validation
# TODO: provide more context for errors
self.fit = fit
self.mode = mode
self.meta_label = meta_label
self.event_start_accumulation = event_start_accumulation
self.event_stop_accumulation = event_stop_accumulation
self.event_start_training = event_start_training
self.event_reset = event_reset
self.passthrough = passthrough
self.resample = resample
self.resample_direction = resample_direction
self.resample_rate = resample_rate
self.use_task = use_task
self._buffer_size = pd.Timedelta(buffer_size)
self._make_pipeline(steps)
self._reset()
def update(self):
# Let's get ready
self._clear()
# Reset
if self.event_reset:
matches = match_events(self.i_events, self.event_reset)
if matches is not None:
self.logger.debug("Reset")
if self._task is not None:
if self._status == FITTING:
self._task.stop()
self._reset()
# Are we dealing with continuous data or epochs?
if self._dimensions is None:
port_name = "i_training" if self.fit else "i"
if getattr(self, port_name).ready():
self._dimensions = 2
elif len(list(self.iterate(port_name + "_*"))) > 0:
self._dimensions = 3
# Set the accumulation boundaries
if self._accumulation_start is None:
matches = match_events(self.i_events, self.event_start_accumulation)
if matches is not None:
self._accumulation_start = matches.index.values[0]
self._status = ACCUMULATING
self.logger.debug("Start accumulation")
if self._accumulation_stop is None:
matches = match_events(self.i_events, self.event_stop_accumulation)
if matches is not None:
self._accumulation_stop = matches.index.values[0]
self.logger.debug("Stop accumulation")
# Always buffer a few seconds, in case the start event is coming late
if self._status == IDLE:
start = (now() - self._buffer_size).to_datetime64()
stop = max_time()
self._accumulate(start, stop)
# Accumulate between boundaries
if self._status == ACCUMULATING:
start = self._accumulation_start
stop = self._accumulation_stop if self._accumulation_stop else max_time()
self._accumulate(start, stop)
# Should we start fitting the model?
if self._status < FITTING:
if match_events(self.i_events, self.event_start_training) is not None:
self._status = FITTING
self.logger.debug("Start training")
if self.use_task:
self._task = Task(
self._pipeline, "fit", self._X_train, self._y_train
).start()
else:
try:
self._pipeline = self._pipeline.fit(self._X_train, self._y_train)
self._fitted_success = True
except Exception as error:
self._fitted_success = False
# Is the model ready?
if self._status == FITTING:
ready_to_proceed = False
if self.use_task:
status = self._task.status()
if status:
ready_to_proceed = True
else:
ready_to_proceed = True
if ready_to_proceed:
if self.use_task:
success = status["success"]
else:
success = self._fitted_success
if success:
if self.use_task:
self._pipeline = status["instance"]
self.logger.debug(f"Model fitted in {status['time']} seconds")
else:
self.logger.debug(f"Model fitted")
self._status = READY
# TODO: this can potentially be overwritten in _send()
self.o_events.data = make_event("ready")
else:
if self.use_task:
self.logger.error(
f"An error occured while fitting: {status['exception'].args[0]}"
)
self.logger.debug(
"\nTraceback (most recent call last):\n"
+ "".join(status["traceback"])
)
else:
self.logger.error(
f"An error occured while fitting"
)
raise WorkerInterrupt()
# Run the pipeline
if self._status == READY:
self._receive()
if self._X is not None:
args = [self._X]
if self.mode.startswith("fit"):
args.append(self._y)
# TODO: optionally loop through epochs instead of sending them all at once
self._out = getattr(self._pipeline, self.mode)(*args)
# Set output streams
self._send()
def terminate(self):
# Kill the fit subprocess
if self._task is not None:
self._task.stop()
def _reset(self):
self._X_train = None
self._y_train = None
self._X_train_indices = np.array([], dtype=np.datetime64)
self._accumulation_start = None
self._accumulation_stop = None
self._dimensions = None
self._shape = ()
self._task = None
if self.mode.startswith("fit"):
self.fit = False
elif self.mode.startswith("predict"):
self.fit = True
if self.fit:
self._status = IDLE
else:
self._status = READY
def _clear(self):
self._X = None
self._y = None
self._X_indices = []
self._X_columns = []
self._X_meta = None
self._out = None
def _make_pipeline(self, steps):
schema = {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"module": {"type": "string"},
"class": {"type": "string"},
"args": {"type": "object"},
},
"required": ["module", "class"],
},
}
try:
validate(instance=steps, schema=schema)
except Exception as error:
raise ValidationError("steps", error.message)
pipeline = []
for step in steps:
try:
args = step["args"] if "args" in step else {}
m = importlib.import_module(step["module"])
c = getattr(m, step["class"])
i = c(**args)
pipeline.append(i)
except ImportError as error:
raise ValidationError("steps", f"could not import '{step['module']}'")
except AttributeError as error:
raise ValidationError(
"steps", f"could not find class '{step['class']}'"
)
except TypeError as error:
raise ValidationError(
"steps",
f"could not instantiate class '{step['class']}' with the given params",
)
# TODO: memory and verbose args
self._pipeline = make_pipeline(*pipeline, memory=None, verbose=False)
def _accumulate(self, start, stop):
# Do nothing if no fitting required
if not self.fit:
return
# Set defaults
indices = np.array([], dtype=np.datetime64)
# Accumulate continuous data
if self._dimensions == 2:
if self.i_training.ready():
data = self.i_training.data
mask = (data.index >= start) & (data.index < stop)
data = data[mask]
if not data.empty:
if self._X_train is None:
self._X_train = data.values
self._shape = self._X_train.shape[1]
indices = data.index.values
else:
if data.shape[1] == self._shape:
self._X_train = np.vstack((self._X_train, data.values))
indices = data.index.values
else:
self.logger.warning("Invalid shape")
# Accumulate epoched data
if self._dimensions == 3:
for _, _, port in self.iterate("i_training_*"):
if port.ready():
index = port.data.index.values[0]
if index >= start and index < stop:
data = port.data.values
label = get_meta(port, self.meta_label)
if self._shape and (data.shape != self._shape):
self.logger.warning("Invalid shape")
continue
if self.meta_label is not None and label is None:
self.logger.warning("Invalid label")
continue
if self._X_train is None:
self._X_train = np.array([data])
self._shape = self._X_train.shape[1:]
else:
self._X_train = np.vstack((self._X_train, [data]))
indices = np.append(indices, index)
if label is not None:
if self._y_train is None:
self._y_train = np.array([label])
else:
self._y_train = np.append(self._y_train, [label])
# Store indices
if indices.size != 0:
self._X_train_indices = np.append(self._X_train_indices, indices)
# Trim
if self._X_train is not None:
mask = (self._X_train_indices >= start) & (self._X_train_indices < stop)
self._X_train = self._X_train[mask]
self._X_train_indices = self._X_train_indices[mask]
if self._y_train is not None:
self._y_train = self._y_train[mask]
def _receive(self):
# Continuous data
if self._dimensions == 2:
if self.i.ready():
if not self._X_columns:
self._X_columns = list(self.i.data.columns)
if self._shape and (self.i.data.shape[1] != self._shape):
self.logger.warning("Invalid shape")
else:
self._X = self.i.data.values
self._X_indices = self.i.data.index.values
self._X_meta = self.i.meta
# Epochs
if self._dimensions == 3:
for name, _, port in self.iterate("i_*"):
if port.ready() and "training" not in name and "events" not in name:
data = port.data.values
meta = port.meta
indices = port.data.index.values
label = get_meta(port, self.meta_label)
if not self._X_columns:
self._X_columns = list(port.data.columns)
if self._shape and (data.shape != self._shape):
self.logger.warning("Invalid shape")
continue
if not self.fit and self.meta_label is not None and label is None:
self.logger.warning("Invalid label")
continue
if self._X is None:
self._X = []
if self._y is None and label is not None:
self._y = []
if self._X_meta is None:
self._X_meta = []
self._X.append(data)
self._X_indices.append(indices)
self._X_meta.append(meta)
if label is not None:
self._y.append(label)
def _send(self):
# Passthrough
if self._status < READY and self.passthrough:
inputs = []
for _, suffix, port in self.iterate("i*"):
if not suffix.startswith("_training") and not suffix.startswith(
"_events"
):
inputs.append((suffix, port))
for suffix, src_port in inputs:
dst_port = getattr(self, "o" + suffix)
dst_port.data = src_port.data
dst_port.meta = src_port.meta
# Model
if self._out is not None:
if "predict" in self.mode:
# Send events
if len(self._X_indices) == len(self._out):
# TODO: skip JSON serialization?
data = [
[self.mode, json.dumps({"result": self._np_to_native(result)})]
for result in self._out
]
times = (
self._X_indices
if self._dimensions == 2
else np.asarray(self._X_indices)[:, 0]
) # Keep the first timestamp of each epoch
names = ["label", "data"]
meta = (
self._X_meta
if self._dimensions == 2
else {"epochs": self._X_meta}
) # port.meta should always be an object
self.o_events.set(data, times, names, meta)
else:
self.logger.warning(
"Number of predictions inconsistent with input length"
)
else:
# Send data
if self._dimensions == 2:
try:
self.o.data = self._reindex(
self._out, self._X_indices, self._X_columns
)
self.o.meta = self._X_meta
except Exception as e:
self.logger.warning(getattr(e, "message", repr(e)))
if self._dimensions == 3:
if len(self._X_indices) == len(self._out):
for i, (data, times) in enumerate(
zip(self._out, self._X_indices)
):
try:
getattr(self, "o_" + str(i)).data = self._reindex(
data, times, self._X_columns
)
getattr(self, "o_" + str(i)).meta = self._X_meta[i]
except Exception as e:
self.logger.warning(getattr(e, "message", repr(e)))
else:
self.logger.warning(
"Number of transforms inconsistent with number of epochs"
)
def _np_to_native(self, data):
"""Convert numpy scalars and objects to native types."""
return getattr(data, "tolist", lambda: data)()
def _reindex(self, data, times, columns):
if len(data) != len(times):
if self.resample:
# Resample at a specific frequency
kwargs = {"periods": len(data)}
if self.resample_rate is None:
kwargs["freq"] = pd.infer_freq(times)
kwargs["freq"] = pd.tseries.frequencies.to_offset(kwargs["freq"])
else:
kwargs["freq"] = pd.DateOffset(seconds=1 / self.resample_rate)
if self.resample_direction == "right":
kwargs["start"] = times[0]
elif self.resample_direction == "left":
kwargs["end"] = times[-1]
else:
def middle(a):
return int(np.ceil(len(a) / 2)) - 1
kwargs["start"] = times[middle(times)] - (
middle(data) * kwargs["freq"]
)
times = pd.date_range(**kwargs)
else:
# Linearly arange between first and last
times = pd.date_range(start=times[0], end=times[-1], periods=len(data))
return pd.DataFrame(data, times, columns)
|
[
"timeflux.core.exceptions.WorkerInterrupt",
"timeflux.helpers.clock.max_time",
"pandas.infer_freq",
"numpy.array",
"timeflux.helpers.clock.now",
"timeflux.core.exceptions.ValidationError",
"pandas.date_range",
"timeflux.helpers.background.Task",
"timeflux.helpers.port.make_event",
"numpy.asarray",
"numpy.vstack",
"pandas.DataFrame",
"importlib.import_module",
"sklearn.pipeline.make_pipeline",
"jsonschema.validate",
"pandas.DateOffset",
"timeflux.helpers.port.match_events",
"timeflux.helpers.port.get_meta",
"pandas.Timedelta",
"numpy.append",
"pandas.tseries.frequencies.to_offset"
] |
[((3357, 3382), 'pandas.Timedelta', 'pd.Timedelta', (['buffer_size'], {}), '(buffer_size)\n', (3369, 3382), True, 'import pandas as pd\n'), ((8422, 8455), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.datetime64'}), '([], dtype=np.datetime64)\n', (8430, 8455), True, 'import numpy as np\n'), ((10524, 10576), 'sklearn.pipeline.make_pipeline', 'make_pipeline', (['*pipeline'], {'memory': 'None', 'verbose': '(False)'}), '(*pipeline, memory=None, verbose=False)\n', (10537, 10576), False, 'from sklearn.pipeline import make_pipeline\n'), ((10749, 10782), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.datetime64'}), '([], dtype=np.datetime64)\n', (10757, 10782), True, 'import numpy as np\n'), ((19682, 19716), 'pandas.DataFrame', 'pd.DataFrame', (['data', 'times', 'columns'], {}), '(data, times, columns)\n', (19694, 19716), True, 'import pandas as pd\n'), ((3580, 3625), 'timeflux.helpers.port.match_events', 'match_events', (['self.i_events', 'self.event_reset'], {}), '(self.i_events, self.event_reset)\n', (3592, 3625), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((4318, 4376), 'timeflux.helpers.port.match_events', 'match_events', (['self.i_events', 'self.event_start_accumulation'], {}), '(self.i_events, self.event_start_accumulation)\n', (4330, 4376), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((4646, 4703), 'timeflux.helpers.port.match_events', 'match_events', (['self.i_events', 'self.event_stop_accumulation'], {}), '(self.i_events, self.event_stop_accumulation)\n', (4658, 4703), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((5056, 5066), 'timeflux.helpers.clock.max_time', 'max_time', ([], {}), '()\n', (5064, 5066), False, 'from timeflux.helpers.clock import now, min_time, max_time\n'), ((9510, 9549), 'jsonschema.validate', 'validate', ([], {'instance': 'steps', 'schema': 'schema'}), '(instance=steps, schema=schema)\n', (9518, 9549), False, 'from jsonschema import validate\n'), ((13057, 13098), 'numpy.append', 'np.append', (['self._X_train_indices', 'indices'], {}), '(self._X_train_indices, indices)\n', (13066, 13098), True, 'import numpy as np\n'), ((5311, 5321), 'timeflux.helpers.clock.max_time', 'max_time', ([], {}), '()\n', (5319, 5321), False, 'from timeflux.helpers.clock import now, min_time, max_time\n'), ((5460, 5514), 'timeflux.helpers.port.match_events', 'match_events', (['self.i_events', 'self.event_start_training'], {}), '(self.i_events, self.event_start_training)\n', (5472, 5514), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((9603, 9642), 'timeflux.core.exceptions.ValidationError', 'ValidationError', (['"""steps"""', 'error.message'], {}), "('steps', error.message)\n", (9618, 9642), False, 'from timeflux.core.exceptions import ValidationError, WorkerInterrupt\n'), ((9791, 9830), 'importlib.import_module', 'importlib.import_module', (["step['module']"], {}), "(step['module'])\n", (9814, 9830), False, 'import importlib\n'), ((19478, 19501), 'pandas.date_range', 'pd.date_range', ([], {}), '(**kwargs)\n', (19491, 19501), True, 'import pandas as pd\n'), ((7065, 7084), 'timeflux.helpers.port.make_event', 'make_event', (['"""ready"""'], {}), "('ready')\n", (7075, 7084), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((7704, 7721), 'timeflux.core.exceptions.WorkerInterrupt', 'WorkerInterrupt', ([], {}), '()\n', (7719, 7721), False, 'from timeflux.core.exceptions import ValidationError, WorkerInterrupt\n'), ((10005, 10069), 'timeflux.core.exceptions.ValidationError', 'ValidationError', (['"""steps"""', 'f"""could not import \'{step[\'module\']}\'"""'], {}), '(\'steps\', f"could not import \'{step[\'module\']}\'")\n', (10020, 10069), False, 'from timeflux.core.exceptions import ValidationError, WorkerInterrupt\n'), ((10136, 10203), 'timeflux.core.exceptions.ValidationError', 'ValidationError', (['"""steps"""', 'f"""could not find class \'{step[\'class\']}\'"""'], {}), '(\'steps\', f"could not find class \'{step[\'class\']}\'")\n', (10151, 10203), False, 'from timeflux.core.exceptions import ValidationError, WorkerInterrupt\n'), ((10303, 10403), 'timeflux.core.exceptions.ValidationError', 'ValidationError', (['"""steps"""', 'f"""could not instantiate class \'{step[\'class\']}\' with the given params"""'], {}), '(\'steps\',\n f"could not instantiate class \'{step[\'class\']}\' with the given params")\n', (10318, 10403), False, 'from timeflux.core.exceptions import ValidationError, WorkerInterrupt\n'), ((14330, 14361), 'timeflux.helpers.port.get_meta', 'get_meta', (['port', 'self.meta_label'], {}), '(port, self.meta_label)\n', (14338, 14361), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((18780, 18800), 'pandas.infer_freq', 'pd.infer_freq', (['times'], {}), '(times)\n', (18793, 18800), True, 'import pandas as pd\n'), ((18838, 18886), 'pandas.tseries.frequencies.to_offset', 'pd.tseries.frequencies.to_offset', (["kwargs['freq']"], {}), "(kwargs['freq'])\n", (18870, 18886), True, 'import pandas as pd\n'), ((18946, 18991), 'pandas.DateOffset', 'pd.DateOffset', ([], {'seconds': '(1 / self.resample_rate)'}), '(seconds=1 / self.resample_rate)\n', (18959, 18991), True, 'import pandas as pd\n'), ((4994, 4999), 'timeflux.helpers.clock.now', 'now', ([], {}), '()\n', (4997, 4999), False, 'from timeflux.helpers.clock import now, min_time, max_time\n'), ((11956, 11987), 'timeflux.helpers.port.get_meta', 'get_meta', (['port', 'self.meta_label'], {}), '(port, self.meta_label)\n', (11964, 11987), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((12658, 12683), 'numpy.append', 'np.append', (['indices', 'index'], {}), '(indices, index)\n', (12667, 12683), True, 'import numpy as np\n'), ((5686, 5743), 'timeflux.helpers.background.Task', 'Task', (['self._pipeline', '"""fit"""', 'self._X_train', 'self._y_train'], {}), "(self._pipeline, 'fit', self._X_train, self._y_train)\n", (5690, 5743), False, 'from timeflux.helpers.background import Task\n'), ((11413, 11452), 'numpy.vstack', 'np.vstack', (['(self._X_train, data.values)'], {}), '((self._X_train, data.values))\n', (11422, 11452), True, 'import numpy as np\n'), ((12432, 12448), 'numpy.array', 'np.array', (['[data]'], {}), '([data])\n', (12440, 12448), True, 'import numpy as np\n'), ((12589, 12623), 'numpy.vstack', 'np.vstack', (['(self._X_train, [data])'], {}), '((self._X_train, [data]))\n', (12598, 12623), True, 'import numpy as np\n'), ((16421, 16448), 'numpy.asarray', 'np.asarray', (['self._X_indices'], {}), '(self._X_indices)\n', (16431, 16448), True, 'import numpy as np\n'), ((12832, 12849), 'numpy.array', 'np.array', (['[label]'], {}), '([label])\n', (12840, 12849), True, 'import numpy as np\n'), ((12932, 12965), 'numpy.append', 'np.append', (['self._y_train', '[label]'], {}), '(self._y_train, [label])\n', (12941, 12965), True, 'import numpy as np\n')]
|
# Copyright 2020 by <NAME>, Solis-Lemus Lab, WID.
# All rights reserved.
# This file is part of the BioKlustering Website.
import pandas as pd
from Bio import SeqIO
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.cluster import MeanShift
from sklearn import preprocessing
import numpy as np
import os
from .helpers import plotly_dash_show_plot
def parseFasta(data):
d = {fasta.id : str(fasta.seq) for fasta in SeqIO.parse(data, "fasta")}
pd.DataFrame([d])
s = pd.Series(d, name='Sequence')
s.index.name = 'ID'
s.reset_index()
return pd.DataFrame(s)
def kmerXTable(s, a, b):
tfid_vector = TfidfVectorizer(analyzer='char', ngram_range=(a,b))
s_hat = tfid_vector.fit_transform(s.Sequence)
kmerNames = tfid_vector.get_feature_names()
kmers = s_hat.toarray()
return pd.DataFrame(kmers,columns=kmerNames, index = s.index)
# credit to chunrong
def read_fasta_sequences(sequence_paths):
all_sequences = pd.DataFrame()
for path in sequence_paths:
path = os.path.join("media", path)
sequence = parseFasta(path)
all_sequences = pd.concat([all_sequences, sequence])
return all_sequences
def kmeans(userId, fasta, klength_min, klength_max, rNum, cNum, method):
inputData = read_fasta_sequences(fasta)
inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", ""))
kmerXTableInput = kmerXTable(inputData, klength_min, klength_max)
km = KMeans(random_state = rNum, n_clusters = cNum)
km.fit(kmerXTableInput)
y_hat = km.predict(kmerXTableInput)
plotly_kmertable = kmerXTableInput
if method == "PCA":
plotly_kmertable = preprocessing.normalize(kmerXTableInput)
plot_div = plotly_dash_show_plot(userId, plotly_kmertable, y_hat, "Unsupervised Kmeans", method)
inputData.insert(0, "Labels", y_hat)
return [[inputData], [plot_div]]
def kmeans_semiSupervised(userId, fasta, klength_min, klength_max, rNum, y_hat, method):
inputData = read_fasta_sequences(fasta)
inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", ""))
kmerXTableInput = kmerXTable(inputData, klength_min, klength_max)
PCAembedding = PCA(n_components=10)
NkmerXTableInput = preprocessing.normalize(kmerXTableInput)
PCAembedding_low = PCAembedding.fit_transform(NkmerXTableInput)
ms = MeanShift()
ms.fit(PCAembedding_low)
cluster_centers = ms.cluster_centers_
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
kmms = KMeans(init = cluster_centers, n_clusters = len(cluster_centers))
kmms_labels = kmms.fit_predict(PCAembedding_low)
# convert all clusters into two clusters
kmerXTableInput["pLabels"] = kmms_labels
kmerXTableInput["aLabels"] = y_hat.tolist()
newLabels_clusters_1 = kmerXTableInput[kmerXTableInput["aLabels"] == 1]["pLabels"].tolist()
newLabels_clusters_0 = kmerXTableInput[kmerXTableInput["aLabels"] == 0]["pLabels"].tolist()
newLabels = []
for label in kmms_labels:
if newLabels_clusters_1.count(label) > newLabels_clusters_0.count(label):
newLabels.append(1)
else:
newLabels.append(0)
kmerTable = kmerXTableInput.drop(columns=["pLabels", "aLabels"])
plotly_kmertable = kmerTable
plotly_labels = np.array(newLabels)
if method == "PCA":
plotly_kmertable = preprocessing.normalize(kmerTable)
plotly_div = plotly_dash_show_plot(userId, plotly_kmertable, plotly_labels, "Semi-supervised Kmeans", method)
inputData.insert(0, "Labels", newLabels)
return [[inputData], [plotly_div]]
|
[
"pandas.Series",
"sklearn.cluster.KMeans",
"sklearn.decomposition.PCA",
"os.path.join",
"warnings.catch_warnings",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer",
"warnings.simplefilter",
"Bio.SeqIO.parse",
"pandas.DataFrame",
"sklearn.cluster.MeanShift",
"sklearn.preprocessing.normalize",
"pandas.concat"
] |
[((546, 563), 'pandas.DataFrame', 'pd.DataFrame', (['[d]'], {}), '([d])\n', (558, 563), True, 'import pandas as pd\n'), ((573, 602), 'pandas.Series', 'pd.Series', (['d'], {'name': '"""Sequence"""'}), "(d, name='Sequence')\n", (582, 602), True, 'import pandas as pd\n'), ((658, 673), 'pandas.DataFrame', 'pd.DataFrame', (['s'], {}), '(s)\n', (670, 673), True, 'import pandas as pd\n'), ((718, 770), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': '"""char"""', 'ngram_range': '(a, b)'}), "(analyzer='char', ngram_range=(a, b))\n", (733, 770), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((907, 960), 'pandas.DataFrame', 'pd.DataFrame', (['kmers'], {'columns': 'kmerNames', 'index': 's.index'}), '(kmers, columns=kmerNames, index=s.index)\n', (919, 960), True, 'import pandas as pd\n'), ((1046, 1060), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1058, 1060), True, 'import pandas as pd\n'), ((1546, 1588), 'sklearn.cluster.KMeans', 'KMeans', ([], {'random_state': 'rNum', 'n_clusters': 'cNum'}), '(random_state=rNum, n_clusters=cNum)\n', (1552, 1588), False, 'from sklearn.cluster import KMeans\n'), ((2300, 2320), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(10)'}), '(n_components=10)\n', (2303, 2320), False, 'from sklearn.decomposition import PCA\n'), ((2344, 2384), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['kmerXTableInput'], {}), '(kmerXTableInput)\n', (2367, 2384), False, 'from sklearn import preprocessing\n'), ((2467, 2478), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {}), '()\n', (2476, 2478), False, 'from sklearn.cluster import MeanShift\n'), ((3450, 3469), 'numpy.array', 'np.array', (['newLabels'], {}), '(newLabels)\n', (3458, 3469), True, 'import numpy as np\n'), ((1108, 1135), 'os.path.join', 'os.path.join', (['"""media"""', 'path'], {}), "('media', path)\n", (1120, 1135), False, 'import os\n'), ((1196, 1232), 'pandas.concat', 'pd.concat', (['[all_sequences, sequence]'], {}), '([all_sequences, sequence])\n', (1205, 1232), True, 'import pandas as pd\n'), ((1753, 1793), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['kmerXTableInput'], {}), '(kmerXTableInput)\n', (1776, 1793), False, 'from sklearn import preprocessing\n'), ((2580, 2605), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2603, 2605), False, 'import warnings\n'), ((2615, 2646), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2636, 2646), False, 'import warnings\n'), ((3521, 3555), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['kmerTable'], {}), '(kmerTable)\n', (3544, 3555), False, 'from sklearn import preprocessing\n'), ((514, 540), 'Bio.SeqIO.parse', 'SeqIO.parse', (['data', '"""fasta"""'], {}), "(data, 'fasta')\n", (525, 540), False, 'from Bio import SeqIO\n')]
|
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
concat,
)
import pandas._testing as tm
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_ewm_pairwise_cov_corr(func, frame):
result = getattr(frame.ewm(span=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov(name):
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=5), name)(B)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
@pytest.mark.parametrize("min_periods", [0, 1, 2])
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov_min_periods(name, min_periods):
# GH 7898
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
empty = Series([], dtype=np.float64)
result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty)
tm.assert_series_equal(result, empty)
# check series of length 1
result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)(
Series([1.0])
)
tm.assert_series_equal(result, Series([np.NaN]))
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_different_input_array_raise_exception(name):
A = Series(np.random.randn(50), index=np.arange(50))
A[:10] = np.NaN
msg = "other must be a DataFrame or Series"
# exception raised is Exception
with pytest.raises(ValueError, match=msg):
getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
def create_mock_weights(obj, com, adjust, ignore_na):
if isinstance(obj, DataFrame):
if not len(obj.columns):
return DataFrame(index=obj.index, columns=obj.columns)
w = concat(
[
create_mock_series_weights(
obj.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na
)
for i, _ in enumerate(obj.columns)
],
axis=1,
)
w.index = obj.index
w.columns = obj.columns
return w
else:
return create_mock_series_weights(obj, com, adjust, ignore_na)
def create_mock_series_weights(s, com, adjust, ignore_na):
w = Series(np.nan, index=s.index)
alpha = 1.0 / (1.0 + com)
if adjust:
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1.0 / (1.0 - alpha), count)
count += 1
elif not ignore_na:
count += 1
else:
sum_wts = 0.0
prev_i = -1
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.0
else:
w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, count - prev_i)
sum_wts += w.iat[i]
prev_i = count
count += 1
elif not ignore_na:
count += 1
return w
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_mean(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
result = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
expected = (
x.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method="ffill")
)
expected[
x.expanding().count() < (max(min_periods, 1) if min_periods else 1)
] = np.nan
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_consistent(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding().count()
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_var_debiasing_factors(
consistency_data, adjust, ignore_na, min_periods
):
x, is_constant, no_nans = consistency_data
com = 3.0
# check variance debiasing factors
var_unbiased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=False)
var_biased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=True)
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method="ffill")
cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill")
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.0] = np.nan
var_debiasing_factors_x = numerator / denominator
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
if bias:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (
(x * x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.mean()
)
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var_constant(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if not bias:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_std(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_cov(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
cov_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_series_cov_corr(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if isinstance(x, Series):
var_x_plus_y = (
(x + x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.var(bias=bias)
)
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
var_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
cov_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x, bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
std_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if bias:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_x_times_y = (
(x * x)
.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
)
.mean()
)
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
|
[
"pandas.Series",
"pandas._testing.assert_series_equal",
"pytest.mark.parametrize",
"pandas._testing.assert_equal",
"pytest.raises",
"numpy.isnan",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.arange"
] |
[((128, 176), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', "['cov', 'corr']"], {}), "('func', ['cov', 'corr'])\n", (151, 176), False, 'import pytest\n'), ((520, 568), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['cov', 'corr']"], {}), "('name', ['cov', 'corr'])\n", (543, 568), False, 'import pytest\n'), ((893, 942), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2]'], {}), "('min_periods', [0, 1, 2])\n", (916, 942), False, 'import pytest\n'), ((944, 992), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['cov', 'corr']"], {}), "('name', ['cov', 'corr'])\n", (967, 992), False, 'import pytest\n'), ((1843, 1891), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['cov', 'corr']"], {}), "('name', ['cov', 'corr'])\n", (1866, 1891), False, 'import pytest\n'), ((3705, 3760), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (3728, 3760), False, 'import pytest\n'), ((4368, 4423), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (4391, 4423), False, 'import pytest\n'), ((5333, 5388), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (5356, 5388), False, 'import pytest\n'), ((6305, 6360), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (6328, 6360), False, 'import pytest\n'), ((6362, 6408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (6385, 6408), False, 'import pytest\n'), ((7127, 7182), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (7150, 7182), False, 'import pytest\n'), ((7184, 7230), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (7207, 7230), False, 'import pytest\n'), ((7912, 7967), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (7935, 7967), False, 'import pytest\n'), ((7969, 8015), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (7992, 8015), False, 'import pytest\n'), ((8557, 8612), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (8580, 8612), False, 'import pytest\n'), ((8614, 8660), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (8637, 8660), False, 'import pytest\n'), ((9205, 9260), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (9228, 9260), False, 'import pytest\n'), ((9262, 9308), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (9285, 9308), False, 'import pytest\n'), ((457, 516), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {'check_names': '(False)'}), '(result, expected, check_names=False)\n', (479, 516), True, 'import pandas._testing as tm\n'), ((1498, 1526), 'pandas.Series', 'Series', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (1504, 1526), False, 'from pandas import DataFrame, Series, concat\n'), ((1609, 1646), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'empty'], {}), '(result, empty)\n', (1631, 1646), True, 'import pandas._testing as tm\n'), ((2918, 2947), 'pandas.Series', 'Series', (['np.nan'], {'index': 's.index'}), '(np.nan, index=s.index)\n', (2924, 2947), False, 'from pandas import DataFrame, Series, concat\n'), ((6230, 6301), 'pandas._testing.assert_equal', 'tm.assert_equal', (['var_unbiased_x', '(var_biased_x * var_debiasing_factors_x)'], {}), '(var_unbiased_x, var_biased_x * var_debiasing_factors_x)\n', (6245, 6301), True, 'import pandas._testing as tm\n'), ((8516, 8553), 'pandas._testing.assert_equal', 'tm.assert_equal', (['var_x', '(std_x * std_x)'], {}), '(var_x, std_x * std_x)\n', (8531, 8553), True, 'import pandas._testing as tm\n'), ((9170, 9201), 'pandas._testing.assert_equal', 'tm.assert_equal', (['var_x', 'cov_x_x'], {}), '(var_x, cov_x_x)\n', (9185, 9201), True, 'import pandas._testing as tm\n'), ((613, 632), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (628, 632), True, 'import numpy as np\n'), ((671, 690), 'numpy.random.randn', 'np.random.randn', (['(48)'], {}), '(48)\n', (686, 690), True, 'import numpy as np\n'), ((1076, 1095), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (1091, 1095), True, 'import numpy as np\n'), ((1134, 1153), 'numpy.random.randn', 'np.random.randn', (['(48)'], {}), '(48)\n', (1149, 1153), True, 'import numpy as np\n'), ((1767, 1780), 'pandas.Series', 'Series', (['[1.0]'], {}), '([1.0])\n', (1773, 1780), False, 'from pandas import DataFrame, Series, concat\n'), ((1822, 1838), 'pandas.Series', 'Series', (['[np.NaN]'], {}), '([np.NaN])\n', (1828, 1838), False, 'from pandas import DataFrame, Series, concat\n'), ((1961, 1980), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (1976, 1980), True, 'import numpy as np\n'), ((2117, 2153), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2130, 2153), False, 'import pytest\n'), ((5156, 5189), 'pandas._testing.assert_equal', 'tm.assert_equal', (['mean_x', 'expected'], {}), '(mean_x, expected)\n', (5171, 5189), True, 'import pandas._testing as tm\n'), ((5294, 5329), 'pandas._testing.assert_equal', 'tm.assert_equal', (['corr_x_x', 'expected'], {}), '(corr_x_x, expected)\n', (5309, 5329), True, 'import pandas._testing as tm\n'), ((7072, 7121), 'pandas._testing.assert_equal', 'tm.assert_equal', (['var_x', '(mean_x2 - mean_x * mean_x)'], {}), '(var_x, mean_x2 - mean_x * mean_x)\n', (7087, 7121), True, 'import pandas._testing as tm\n'), ((7876, 7908), 'pandas._testing.assert_equal', 'tm.assert_equal', (['var_x', 'expected'], {}), '(var_x, expected)\n', (7891, 7908), True, 'import pandas._testing as tm\n'), ((10152, 10214), 'pandas._testing.assert_equal', 'tm.assert_equal', (['cov_x_y', '(0.5 * (var_x_plus_y - var_x - var_y))'], {}), '(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))\n', (10167, 10214), True, 'import pandas._testing as tm\n'), ((10693, 10745), 'pandas._testing.assert_equal', 'tm.assert_equal', (['corr_x_y', '(cov_x_y / (std_x * std_y))'], {}), '(corr_x_y, cov_x_y / (std_x * std_y))\n', (10708, 10745), True, 'import pandas._testing as tm\n'), ((640, 653), 'numpy.arange', 'np.arange', (['(50)'], {}), '(50)\n', (649, 653), True, 'import numpy as np\n'), ((805, 833), 'numpy.isnan', 'np.isnan', (['result.values[:14]'], {}), '(result.values[:14])\n', (813, 833), True, 'import numpy as np\n'), ((1103, 1116), 'numpy.arange', 'np.arange', (['(50)'], {}), '(50)\n', (1112, 1116), True, 'import numpy as np\n'), ((1369, 1397), 'numpy.isnan', 'np.isnan', (['result.values[:11]'], {}), '(result.values[:11])\n', (1377, 1397), True, 'import numpy as np\n'), ((1988, 2001), 'numpy.arange', 'np.arange', (['(50)'], {}), '(50)\n', (1997, 2001), True, 'import numpy as np\n'), ((2207, 2226), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (2222, 2226), True, 'import numpy as np\n'), ((2371, 2418), 'pandas.DataFrame', 'DataFrame', ([], {'index': 'obj.index', 'columns': 'obj.columns'}), '(index=obj.index, columns=obj.columns)\n', (2380, 2418), False, 'from pandas import DataFrame, Series, concat\n'), ((11353, 11411), 'pandas._testing.assert_equal', 'tm.assert_equal', (['cov_x_y', '(mean_x_times_y - mean_x * mean_y)'], {}), '(cov_x_y, mean_x_times_y - mean_x * mean_y)\n', (11368, 11411), True, 'import pandas._testing as tm\n'), ((855, 883), 'numpy.isnan', 'np.isnan', (['result.values[14:]'], {}), '(result.values[14:])\n', (863, 883), True, 'import numpy as np\n'), ((1419, 1447), 'numpy.isnan', 'np.isnan', (['result.values[11:]'], {}), '(result.values[11:])\n', (1427, 1447), True, 'import numpy as np\n'), ((1700, 1713), 'pandas.Series', 'Series', (['[1.0]'], {}), '([1.0])\n', (1706, 1713), False, 'from pandas import DataFrame, Series, concat\n')]
|
import math
import warnings
import numpy as np
import pandas as pd
from scipy.optimize import minimize
import scipy.stats
from scipy.stats import norm # edit
from scipy.special import log_ndtr
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error
def split_left_right_censored(x, y, cens):
counts = cens.value_counts()
if -1 not in counts and 1 not in counts:
warnings.warn("No censored observations; use regression methods for uncensored data")
xs = []
ys = []
for value in [-1, 0, 1]:
if value in counts:
split = cens == value
y_split = np.squeeze(y[split].values)
x_split = x[split].values
else:
y_split, x_split = None, None
xs.append(x_split)
ys.append(y_split)
return xs, ys
def tobit_neg_log_likelihood(xs, ys, params):
x_left, x_mid, x_right = xs
y_left, y_mid, y_right = ys
b = params[:-1]
# s = math.exp(params[-1])
s = params[-1]
to_cat = []
cens = False
if y_left is not None:
cens = True
left = (y_left - np.dot(x_left, b))
to_cat.append(left)
if y_right is not None:
cens = True
right = (np.dot(x_right, b) - y_right)
to_cat.append(right)
if cens:
concat_stats = np.concatenate(to_cat, axis=0) / s
log_cum_norm = scipy.stats.norm.logcdf(concat_stats) # log_ndtr(concat_stats)
cens_sum = log_cum_norm.sum()
else:
cens_sum = 0
if y_mid is not None:
mid_stats = (y_mid - np.dot(x_mid, b)) / s
mid = scipy.stats.norm.logpdf(mid_stats) - math.log(max(np.finfo('float').resolution, s))
mid_sum = mid.sum()
else:
mid_sum = 0
loglik = cens_sum + mid_sum
return - loglik
def tobit_neg_log_likelihood_der(xs, ys, params):
x_left, x_mid, x_right = xs
y_left, y_mid, y_right = ys
b = params[:-1]
# s = math.exp(params[-1]) # in censReg, not using chain rule as below; they optimize in terms of log(s)
s = params[-1]
beta_jac = np.zeros(len(b))
sigma_jac = 0
if y_left is not None:
left_stats = (y_left - np.dot(x_left, b)) / s
l_pdf = scipy.stats.norm.logpdf(left_stats)
l_cdf = log_ndtr(left_stats)
left_frac = np.exp(l_pdf - l_cdf)
beta_left = np.dot(left_frac, x_left / s)
beta_jac -= beta_left
left_sigma = np.dot(left_frac, left_stats)
sigma_jac -= left_sigma
if y_right is not None:
right_stats = (np.dot(x_right, b) - y_right) / s
r_pdf = scipy.stats.norm.logpdf(right_stats)
r_cdf = log_ndtr(right_stats)
right_frac = np.exp(r_pdf - r_cdf)
beta_right = np.dot(right_frac, x_right / s)
beta_jac += beta_right
right_sigma = np.dot(right_frac, right_stats)
sigma_jac -= right_sigma
if y_mid is not None:
mid_stats = (y_mid - np.dot(x_mid, b)) / s
beta_mid = np.dot(mid_stats, x_mid / s)
beta_jac += beta_mid
mid_sigma = (np.square(mid_stats) - 1).sum()
sigma_jac += mid_sigma
combo_jac = np.append(beta_jac, sigma_jac / s) # by chain rule, since the expression above is dloglik/dlogsigma
return -combo_jac
class TobitModel:
def __init__(self, fit_intercept=True):
self.fit_intercept = fit_intercept
self.ols_coef_ = None
self.ols_intercept = None
self.coef_ = None
self.intercept_ = None
self.sigma_ = None
def fit(self, x, y, cens, verbose=False):
"""
Fit a maximum-likelihood Tobit regression
:param x: Pandas DataFrame (n_samples, n_features): Data
:param y: Pandas Series (n_samples,): Target
:param cens: Pandas Series (n_samples,): -1 indicates left-censored samples, 0 for uncensored, 1 for right-censored
:param verbose: boolean, show info from minimization
:return:
"""
x_copy = x.copy()
if self.fit_intercept:
x_copy.insert(0, 'intercept', 1.0)
else:
x_copy.scale(with_mean=True, with_std=False, copy=False)
init_reg = LinearRegression(fit_intercept=False).fit(x_copy, y)
b0 = init_reg.coef_
y_pred = init_reg.predict(x_copy)
resid = y - y_pred
resid_var = np.var(resid)
s0 = np.sqrt(resid_var)
params0 = np.append(b0, s0)
xs, ys = split_left_right_censored(x_copy, y, cens)
result = minimize(lambda params: tobit_neg_log_likelihood(xs, ys, params), params0, method='BFGS',
jac=lambda params: tobit_neg_log_likelihood_der(xs, ys, params), options={'disp': verbose})
if verbose:
print(result)
self.ols_coef_ = b0[1:]
self.ols_intercept = b0[0]
if self.fit_intercept:
self.intercept_ = result.x[1]
self.coef_ = result.x[1:-1]
else:
self.coef_ = result.x[:-1]
self.intercept_ = 0
self.sigma_ = result.x[-1]
return self
def predict(self, x):
return self.intercept_ + np.dot(x, self.coef_)
def score(self, x, y, scoring_function=mean_absolute_error):
y_pred = np.dot(x, self.coef_)
return scoring_function(y, y_pred)
# EDIT - insert marginal effects function
def margins(self, x, k = 0):
"""
Marginal effects on dependent variable of a regressor, identified by coef
:param x: array with all regressors (independent variables) to make a prediction
:param k: coefficient corresponding to the regressor with respect to which we want to take the marginal effects
:return: an array with the marginal effects estimated at each observation's level
The marginal effect of regressor k on individual i's y is defined as the product of coef[k] and the normal cdf
evaluated at x_i * coeff[k] / sigma
"""
return self.coef_[k] * norm.cdf(self.predict(x) / self.sigma_)
|
[
"scipy.special.log_ndtr",
"numpy.sqrt",
"numpy.squeeze",
"numpy.append",
"numpy.exp",
"numpy.dot",
"numpy.square",
"numpy.concatenate",
"numpy.finfo",
"warnings.warn",
"sklearn.linear_model.LinearRegression",
"numpy.var"
] |
[((3180, 3214), 'numpy.append', 'np.append', (['beta_jac', '(sigma_jac / s)'], {}), '(beta_jac, sigma_jac / s)\n', (3189, 3214), True, 'import numpy as np\n'), ((443, 533), 'warnings.warn', 'warnings.warn', (['"""No censored observations; use regression methods for uncensored data"""'], {}), "(\n 'No censored observations; use regression methods for uncensored data')\n", (456, 533), False, 'import warnings\n'), ((2304, 2324), 'scipy.special.log_ndtr', 'log_ndtr', (['left_stats'], {}), '(left_stats)\n', (2312, 2324), False, 'from scipy.special import log_ndtr\n'), ((2345, 2366), 'numpy.exp', 'np.exp', (['(l_pdf - l_cdf)'], {}), '(l_pdf - l_cdf)\n', (2351, 2366), True, 'import numpy as np\n'), ((2387, 2416), 'numpy.dot', 'np.dot', (['left_frac', '(x_left / s)'], {}), '(left_frac, x_left / s)\n', (2393, 2416), True, 'import numpy as np\n'), ((2469, 2498), 'numpy.dot', 'np.dot', (['left_frac', 'left_stats'], {}), '(left_frac, left_stats)\n', (2475, 2498), True, 'import numpy as np\n'), ((2686, 2707), 'scipy.special.log_ndtr', 'log_ndtr', (['right_stats'], {}), '(right_stats)\n', (2694, 2707), False, 'from scipy.special import log_ndtr\n'), ((2729, 2750), 'numpy.exp', 'np.exp', (['(r_pdf - r_cdf)'], {}), '(r_pdf - r_cdf)\n', (2735, 2750), True, 'import numpy as np\n'), ((2772, 2803), 'numpy.dot', 'np.dot', (['right_frac', '(x_right / s)'], {}), '(right_frac, x_right / s)\n', (2778, 2803), True, 'import numpy as np\n'), ((2858, 2889), 'numpy.dot', 'np.dot', (['right_frac', 'right_stats'], {}), '(right_frac, right_stats)\n', (2864, 2889), True, 'import numpy as np\n'), ((3020, 3048), 'numpy.dot', 'np.dot', (['mid_stats', '(x_mid / s)'], {}), '(mid_stats, x_mid / s)\n', (3026, 3048), True, 'import numpy as np\n'), ((4376, 4389), 'numpy.var', 'np.var', (['resid'], {}), '(resid)\n', (4382, 4389), True, 'import numpy as np\n'), ((4403, 4421), 'numpy.sqrt', 'np.sqrt', (['resid_var'], {}), '(resid_var)\n', (4410, 4421), True, 'import numpy as np\n'), ((4440, 4457), 'numpy.append', 'np.append', (['b0', 's0'], {}), '(b0, s0)\n', (4449, 4457), True, 'import numpy as np\n'), ((5275, 5296), 'numpy.dot', 'np.dot', (['x', 'self.coef_'], {}), '(x, self.coef_)\n', (5281, 5296), True, 'import numpy as np\n'), ((667, 694), 'numpy.squeeze', 'np.squeeze', (['y[split].values'], {}), '(y[split].values)\n', (677, 694), True, 'import numpy as np\n'), ((1152, 1169), 'numpy.dot', 'np.dot', (['x_left', 'b'], {}), '(x_left, b)\n', (1158, 1169), True, 'import numpy as np\n'), ((1264, 1282), 'numpy.dot', 'np.dot', (['x_right', 'b'], {}), '(x_right, b)\n', (1270, 1282), True, 'import numpy as np\n'), ((1359, 1389), 'numpy.concatenate', 'np.concatenate', (['to_cat'], {'axis': '(0)'}), '(to_cat, axis=0)\n', (1373, 1389), True, 'import numpy as np\n'), ((5170, 5191), 'numpy.dot', 'np.dot', (['x', 'self.coef_'], {}), '(x, self.coef_)\n', (5176, 5191), True, 'import numpy as np\n'), ((1606, 1622), 'numpy.dot', 'np.dot', (['x_mid', 'b'], {}), '(x_mid, b)\n', (1612, 1622), True, 'import numpy as np\n'), ((2213, 2230), 'numpy.dot', 'np.dot', (['x_left', 'b'], {}), '(x_left, b)\n', (2219, 2230), True, 'import numpy as np\n'), ((2583, 2601), 'numpy.dot', 'np.dot', (['x_right', 'b'], {}), '(x_right, b)\n', (2589, 2601), True, 'import numpy as np\n'), ((2979, 2995), 'numpy.dot', 'np.dot', (['x_mid', 'b'], {}), '(x_mid, b)\n', (2985, 2995), True, 'import numpy as np\n'), ((4206, 4243), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (4222, 4243), False, 'from sklearn.linear_model import LinearRegression\n'), ((3100, 3120), 'numpy.square', 'np.square', (['mid_stats'], {}), '(mid_stats)\n', (3109, 3120), True, 'import numpy as np\n'), ((1692, 1709), 'numpy.finfo', 'np.finfo', (['"""float"""'], {}), "('float')\n", (1700, 1709), True, 'import numpy as np\n')]
|
#Writing MOOG parameter file for the parameter, abundance, and error calculations.
#The parameter file only needs to be written once, at beginning of the routine, because the output
#files are overwritten with each itereation of the routine, only minimal output data are needed.
#
#The user can choose to have the parameter file written to screen by choosing verbose=True
#The user can choose to have more detailed MOOG output by chooseing the appropriate values for the
#MOOG input parameters.
import numpy as np
def param_file(linelist,atmosphere=0,molecules=1,lines=0,flux=0,damp=0,plot=0,units=0,verbose=False):
if verbose:
print('abfind')
print('terminal \'x11\'')
print('standard_out \'moog_out.1\'')
print('summary_out \'moog_out.2\'')
print('model_in \'star.mod\'')
print('lines_in \'' + linelist + '\'')
print('atmosphere ' + str(atmosphere))
print('molecules ' + str(molecules))
print('lines ' + str(lines))
print('flux/int ' + str(flux))
print('damping ' + str(damp))
print('plot ' + str(plot))
print('units ' + str(units))
with open('batch.par', 'wt') as file:
file.write('abfind' + '\n')
file.write('terminal \'x11\'' + '\n')
file.write('standard_out \'moog_out.1\'' + '\n')
file.write('summary_out \'moog_out.2\'' + '\n')
file.write('model_in \'star.mod\'' + '\n')
file.write('lines_in \'' + linelist + '\'' + '\n')
file.write('atmosphere ' + str(atmosphere) + '\n')
file.write('molecules ' + str(molecules) + '\n')
file.write('lines ' + str(lines) + '\n')
file.write('flux/int ' + str(flux) + '\n')
file.write('damping ' + str(damp) + '\n')
file.write('plot ' + str(plot) + '\n')
file.write('units ' + str(units) + '\n')
#Function for creating the solar and stellar linelists
def linelist_create(star_in, sun_in, direc_path):
with open(direc_path + '/linelist_star.txt', 'w') as out_star:
with open(direc_path + '/linelist_sun.txt', 'w') as out_sun:
with open(star_in) as file_star:
with open(sun_in) as file_sun:
line_star = file_star.readline()
out_star.write(line_star) #accounts for comment line in linelist files
line_sun = file_sun.readline()
out_sun.write(line_sun) #accounts for comment line in linelist files
line = file_star.readlines()
line_s = file_sun.readlines()
for line_star in line:
line_star_split = line_star.split()
#if len(line_star_split) < 2: continue
for line_sun in line_s:
line_sun_split = line_sun.split()
#if len(line_sun_split) < 2: continue
if line_star_split[0] == line_sun_split[0] and line_star_split[1] == line_sun_split[1]:
out_star.write(line_star)
out_sun.write(line_sun)
continue
#Reads Moog output files, parsing elements and colums
def read_file(filename):
count = 0
elements = ['Fe I ', 'Fe II ', 'C I ', 'N I ', 'O I ', 'S I', 'K I ', 'Na I ', 'Mg I ', 'Al I ', 'Si I ', 'Ca I ', 'Sc II ', 'Ti I ', 'Ti II ', 'V ', 'Cr I ',
'Mn I ', 'Co I ', 'Ni I ', 'Cu I ', 'Zn I ', 'Ba II ']
dtype = [('wavelength', 'f8'),
('ID', 'f8'),
('EP', 'f8'),
('logGF', 'f8'),
('EWin', 'f8'),
('logRWin', 'f8'),
('abund', 'f8'),
('delavg', 'f8')]
abundances = []
el_found = []
with open(filename) as file:
while True:
count += 1
# Get next line from file
line = file.readline()
# if line is empty end of file is reached
if not line: break
for j, el in enumerate(elements):
species = 'Abundance Results for Species ' + el
if species in line:
new_arr = []
el_found.append(el)
line = file.readline().split()
line = file.readline().split()
while len(line) == 8:
new_arr.append(line)
line = file.readline().rstrip().split()
new_arr = np.array(new_arr)
new_arr = np.core.records.fromarrays(new_arr.T,dtype=dtype)
abundances.append(new_arr)
return el_found, abundances
|
[
"numpy.array",
"numpy.core.records.fromarrays"
] |
[((4833, 4850), 'numpy.array', 'np.array', (['new_arr'], {}), '(new_arr)\n', (4841, 4850), True, 'import numpy as np\n'), ((4881, 4931), 'numpy.core.records.fromarrays', 'np.core.records.fromarrays', (['new_arr.T'], {'dtype': 'dtype'}), '(new_arr.T, dtype=dtype)\n', (4907, 4931), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--plot", action="store_const", default=False, const=True)
args = parser.parse_args()
data = np.loadtxt("../data/data.csv", skiprows=1, usecols=list(range(1,8)), delimiter=",")[33:,:]
xdays = data[:,0] - np.mean(data[:,0])
deaths = data[:,-1]
print(xdays, deaths)
logdeaths = np.log(deaths)
slope, offset, rval, pval, stderr = linregress(xdays, logdeaths)
stderr = np.sqrt(np.sum((logdeaths-(slope*logdeaths+offset))**2) / (len(logdeaths)-2.)) / np.sqrt(np.sum((xdays - np.mean(xdays))**2))
if args.plot:
plt.plot(xdays, np.exp(offset + slope*xdays), 'C0-')
plt.plot(xdays, np.exp(offset + (slope+stderr)*xdays), 'C0--')
plt.plot(xdays, np.exp(offset + (slope-stderr)*xdays), 'C0--')
plt.plot(xdays, deaths, 'C0o')
plt.gca().set_yscale("log")
plt.show()
print("Slope: %.3e" % slope)
print("Doubling every: %.2f" % (np.log(2)/slope))
print("R-squared: %.3f" % (rval*rval))
print("Stderr: %.3e" % stderr)
|
[
"scipy.stats.linregress",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.gca",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.sum",
"matplotlib.pyplot.show"
] |
[((112, 137), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (135, 137), False, 'import argparse\n'), ((435, 449), 'numpy.log', 'np.log', (['deaths'], {}), '(deaths)\n', (441, 449), True, 'import numpy as np\n'), ((487, 515), 'scipy.stats.linregress', 'linregress', (['xdays', 'logdeaths'], {}), '(xdays, logdeaths)\n', (497, 515), False, 'from scipy.stats import linregress\n'), ((363, 382), 'numpy.mean', 'np.mean', (['data[:, 0]'], {}), '(data[:, 0])\n', (370, 382), True, 'import numpy as np\n'), ((861, 891), 'matplotlib.pyplot.plot', 'plt.plot', (['xdays', 'deaths', '"""C0o"""'], {}), "(xdays, deaths, 'C0o')\n", (869, 891), True, 'import matplotlib.pyplot as plt\n'), ((928, 938), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (936, 938), True, 'import matplotlib.pyplot as plt\n'), ((686, 716), 'numpy.exp', 'np.exp', (['(offset + slope * xdays)'], {}), '(offset + slope * xdays)\n', (692, 716), True, 'import numpy as np\n'), ((743, 784), 'numpy.exp', 'np.exp', (['(offset + (slope + stderr) * xdays)'], {}), '(offset + (slope + stderr) * xdays)\n', (749, 784), True, 'import numpy as np\n'), ((810, 851), 'numpy.exp', 'np.exp', (['(offset + (slope - stderr) * xdays)'], {}), '(offset + (slope - stderr) * xdays)\n', (816, 851), True, 'import numpy as np\n'), ((533, 588), 'numpy.sum', 'np.sum', (['((logdeaths - (slope * logdeaths + offset)) ** 2)'], {}), '((logdeaths - (slope * logdeaths + offset)) ** 2)\n', (539, 588), True, 'import numpy as np\n'), ((896, 905), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (903, 905), True, 'import matplotlib.pyplot as plt\n'), ((1001, 1010), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1007, 1010), True, 'import numpy as np\n'), ((630, 644), 'numpy.mean', 'np.mean', (['xdays'], {}), '(xdays)\n', (637, 644), True, 'import numpy as np\n')]
|
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
if len(sys.argv) != 3:
print('usage: python plot_performances.py <group_csv> <indiv_csv>')
exit()
group_file = sys.argv[1]
indiv_file = sys.argv[2]
# Load the data
df_group = pd.read_csv(group_file)
df_indiv = pd.read_csv(indiv_file)
df = pd.concat([df_group, df_indiv], sort=True)
# Prepare the data for plotting
plot_df = df.groupby(['model', 'id'], as_index=False)['hit'].agg('mean')
mfa_df = plot_df.loc[plot_df['model'] == 'MFA']
mfa_median = mfa_df['hit'].median()
plot_df = plot_df.loc[plot_df['model'] != 'MFA']
# Plot the data
sns.set(style='whitegrid', palette='colorblind')
plt.figure(figsize=(7, 3))
order = plot_df.groupby('model', as_index=False)['hit'].agg('median').sort_values('hit')['model']
colors = [('C0' if 'mReasoner' in x else 'C2') for x in order]
sns.boxplot(x='model', y='hit', data=plot_df, order=order, palette=colors)
plt.axhline(y=mfa_median, ls='--', color='C7', zorder=10)
plt.text(0.002, mfa_median + 0.015, 'MFA', color='C7', fontsize=10, transform=plt.gca().transAxes)
plt.xlabel('')
plt.yticks(np.arange(0, 1.1, 0.1))
plt.ylabel('Coverage Accuracy')
plt.tight_layout()
plt.savefig('visualizations/performances.pdf')
plt.show()
|
[
"seaborn.set",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"seaborn.boxplot",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.concat",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((292, 315), 'pandas.read_csv', 'pd.read_csv', (['group_file'], {}), '(group_file)\n', (303, 315), True, 'import pandas as pd\n'), ((327, 350), 'pandas.read_csv', 'pd.read_csv', (['indiv_file'], {}), '(indiv_file)\n', (338, 350), True, 'import pandas as pd\n'), ((356, 398), 'pandas.concat', 'pd.concat', (['[df_group, df_indiv]'], {'sort': '(True)'}), '([df_group, df_indiv], sort=True)\n', (365, 398), True, 'import pandas as pd\n'), ((655, 703), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""', 'palette': '"""colorblind"""'}), "(style='whitegrid', palette='colorblind')\n", (662, 703), True, 'import seaborn as sns\n'), ((704, 730), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 3)'}), '(figsize=(7, 3))\n', (714, 730), True, 'import matplotlib.pyplot as plt\n'), ((893, 967), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""model"""', 'y': '"""hit"""', 'data': 'plot_df', 'order': 'order', 'palette': 'colors'}), "(x='model', y='hit', data=plot_df, order=order, palette=colors)\n", (904, 967), True, 'import seaborn as sns\n'), ((969, 1026), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'mfa_median', 'ls': '"""--"""', 'color': '"""C7"""', 'zorder': '(10)'}), "(y=mfa_median, ls='--', color='C7', zorder=10)\n", (980, 1026), True, 'import matplotlib.pyplot as plt\n'), ((1127, 1141), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (1137, 1141), True, 'import matplotlib.pyplot as plt\n'), ((1177, 1208), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Coverage Accuracy"""'], {}), "('Coverage Accuracy')\n", (1187, 1208), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1228), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1226, 1228), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1275), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""visualizations/performances.pdf"""'], {}), "('visualizations/performances.pdf')\n", (1240, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1276, 1286), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1284, 1286), True, 'import matplotlib.pyplot as plt\n'), ((1153, 1175), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (1162, 1175), True, 'import numpy as np\n'), ((1105, 1114), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1112, 1114), True, 'import matplotlib.pyplot as plt\n')]
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import flopy
def run():
workspace = os.path.join("lake")
# make sure workspace directory exists
if not os.path.exists(workspace):
os.makedirs(workspace)
fext = "png"
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == "--pdf":
fext = "pdf"
# save the starting path
cwdpth = os.getcwd()
# change to the working directory
os.chdir(workspace)
# We are creating a square model with a specified head equal to `h1` along all boundaries.
# The head at the cell in the center in the top layer is fixed to `h2`. First, set the name
# of the model and the parameters of the model: the number of layers `Nlay`, the number of rows
# and columns `N`, lengths of the sides of the model `L`, aquifer thickness `H`, hydraulic
# conductivity `Kh`
name = "lake_example"
h1 = 100
h2 = 90
Nlay = 10
N = 101
L = 400.0
H = 50.0
Kh = 1.0
# Create a MODFLOW model and store it (in this case in the variable `ml`, but you can call it
# whatever you want). The modelname will be the name given to all MODFLOW files (input and output).
# The exe_name should be the full path to your MODFLOW executable. The version is either 'mf2k'
# for MODFLOW2000 or 'mf2005'for MODFLOW2005.
ml = flopy.modflow.Modflow(
modelname=name, exe_name="mf2005", version="mf2005"
)
# Define the discretization of the model. All layers are given equal thickness. The `bot` array
# is build from the `Hlay` values to indicate top and bottom of each layer, and `delrow` and
# `delcol` are computed from model size `L` and number of cells `N`. Once these are all computed,
# the Discretization file is built.
bot = np.linspace(-H / Nlay, -H, Nlay)
delrow = delcol = L / (N - 1)
dis = flopy.modflow.ModflowDis(
ml,
nlay=Nlay,
nrow=N,
ncol=N,
delr=delrow,
delc=delcol,
top=0.0,
botm=bot,
laycbd=0,
)
# Next we specify the boundary conditions and starting heads with the Basic package. The `ibound`
# array will be `1` in all cells in all layers, except for along the boundary and in the cell at
# the center in the top layer where it is set to `-1` to indicate fixed heads. The starting heads
# are used to define the heads in the fixed head cells (this is a steady simulation, so none of
# the other starting values matter). So we set the starting heads to `h1` everywhere, except for
# the head at the center of the model in the top layer.
Nhalf = int((N - 1) / 2)
ibound = np.ones((Nlay, N, N), dtype=int)
ibound[:, 0, :] = -1
ibound[:, -1, :] = -1
ibound[:, :, 0] = -1
ibound[:, :, -1] = -1
ibound[0, Nhalf, Nhalf] = -1
start = h1 * np.ones((N, N))
start[Nhalf, Nhalf] = h2
# create external ibound array and starting head files
files = []
hfile = f"{name}_strt.ref"
np.savetxt(hfile, start)
hfiles = []
for kdx in range(Nlay):
file = f"{name}_ib{kdx + 1:02d}.ref"
files.append(file)
hfiles.append(hfile)
np.savetxt(file, ibound[kdx, :, :], fmt="%5d")
bas = flopy.modflow.ModflowBas(ml, ibound=files, strt=hfiles)
# The aquifer properties (really only the hydraulic conductivity) are defined with the
# LPF package.
lpf = flopy.modflow.ModflowLpf(ml, hk=Kh)
# Finally, we need to specify the solver we want to use (PCG with default values), and the
# output control (using the default values). Then we are ready to write all MODFLOW input
# files and run MODFLOW.
pcg = flopy.modflow.ModflowPcg(ml)
oc = flopy.modflow.ModflowOc(ml)
ml.write_input()
ml.run_model()
# change back to the starting directory
os.chdir(cwdpth)
# Once the model has terminated normally, we can read the heads file. First, a link to the heads
# file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by
# specifying, in this case, the step number and period number for which we want to retrieve data.
# A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions
# are used to make contours of the layers or a cross-section.
hds = flopy.utils.HeadFile(os.path.join(workspace, f"{name}.hds"))
h = hds.get_data(kstpkper=(0, 0))
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[0], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%2.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake1.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[-1], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%1.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake2.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
z = np.linspace(-H / Nlay / 2, -H + H / Nlay / 2, Nlay)
c = plt.contour(x, z, h[:, 50, :], np.arange(90, 100.1, 0.2))
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake3.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
return 0
if __name__ == "__main__":
success = run()
|
[
"os.path.exists",
"numpy.ones",
"flopy.modflow.ModflowPcg",
"os.makedirs",
"matplotlib.pyplot.clabel",
"matplotlib.pyplot.gcf",
"os.path.join",
"flopy.modflow.ModflowDis",
"os.getcwd",
"os.chdir",
"numpy.linspace",
"flopy.modflow.ModflowBas",
"numpy.savetxt",
"flopy.modflow.ModflowLpf",
"flopy.modflow.ModflowOc",
"matplotlib.pyplot.axis",
"numpy.arange",
"flopy.modflow.Modflow"
] |
[((115, 135), 'os.path.join', 'os.path.join', (['"""lake"""'], {}), "('lake')\n", (127, 135), False, 'import os\n'), ((526, 537), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (535, 537), False, 'import os\n'), ((581, 600), 'os.chdir', 'os.chdir', (['workspace'], {}), '(workspace)\n', (589, 600), False, 'import os\n'), ((1491, 1565), 'flopy.modflow.Modflow', 'flopy.modflow.Modflow', ([], {'modelname': 'name', 'exe_name': '"""mf2005"""', 'version': '"""mf2005"""'}), "(modelname=name, exe_name='mf2005', version='mf2005')\n", (1512, 1565), False, 'import flopy\n'), ((1930, 1962), 'numpy.linspace', 'np.linspace', (['(-H / Nlay)', '(-H)', 'Nlay'], {}), '(-H / Nlay, -H, Nlay)\n', (1941, 1962), True, 'import numpy as np\n'), ((2007, 2122), 'flopy.modflow.ModflowDis', 'flopy.modflow.ModflowDis', (['ml'], {'nlay': 'Nlay', 'nrow': 'N', 'ncol': 'N', 'delr': 'delrow', 'delc': 'delcol', 'top': '(0.0)', 'botm': 'bot', 'laycbd': '(0)'}), '(ml, nlay=Nlay, nrow=N, ncol=N, delr=delrow, delc=\n delcol, top=0.0, botm=bot, laycbd=0)\n', (2031, 2122), False, 'import flopy\n'), ((2806, 2838), 'numpy.ones', 'np.ones', (['(Nlay, N, N)'], {'dtype': 'int'}), '((Nlay, N, N), dtype=int)\n', (2813, 2838), True, 'import numpy as np\n'), ((3147, 3171), 'numpy.savetxt', 'np.savetxt', (['hfile', 'start'], {}), '(hfile, start)\n', (3157, 3171), True, 'import numpy as np\n'), ((3383, 3438), 'flopy.modflow.ModflowBas', 'flopy.modflow.ModflowBas', (['ml'], {'ibound': 'files', 'strt': 'hfiles'}), '(ml, ibound=files, strt=hfiles)\n', (3407, 3438), False, 'import flopy\n'), ((3560, 3595), 'flopy.modflow.ModflowLpf', 'flopy.modflow.ModflowLpf', (['ml'], {'hk': 'Kh'}), '(ml, hk=Kh)\n', (3584, 3595), False, 'import flopy\n'), ((3825, 3853), 'flopy.modflow.ModflowPcg', 'flopy.modflow.ModflowPcg', (['ml'], {}), '(ml)\n', (3849, 3853), False, 'import flopy\n'), ((3863, 3890), 'flopy.modflow.ModflowOc', 'flopy.modflow.ModflowOc', (['ml'], {}), '(ml)\n', (3886, 3890), False, 'import flopy\n'), ((3980, 3996), 'os.chdir', 'os.chdir', (['cwdpth'], {}), '(cwdpth)\n', (3988, 3996), False, 'import os\n'), ((4594, 4614), 'numpy.linspace', 'np.linspace', (['(0)', 'L', 'N'], {}), '(0, L, N)\n', (4605, 4614), True, 'import numpy as np\n'), ((4678, 4704), 'matplotlib.pyplot.clabel', 'plt.clabel', (['c'], {'fmt': '"""%2.1f"""'}), "(c, fmt='%2.1f')\n", (4688, 4704), True, 'import matplotlib.pyplot as plt\n'), ((4709, 4727), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (4717, 4727), True, 'import matplotlib.pyplot as plt\n'), ((4742, 4782), 'os.path.join', 'os.path.join', (['workspace', 'f"""lake1.{fext}"""'], {}), "(workspace, f'lake1.{fext}')\n", (4754, 4782), False, 'import os\n'), ((4793, 4802), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4800, 4802), True, 'import matplotlib.pyplot as plt\n'), ((4881, 4901), 'numpy.linspace', 'np.linspace', (['(0)', 'L', 'N'], {}), '(0, L, N)\n', (4892, 4901), True, 'import numpy as np\n'), ((4966, 4992), 'matplotlib.pyplot.clabel', 'plt.clabel', (['c'], {'fmt': '"""%1.1f"""'}), "(c, fmt='%1.1f')\n", (4976, 4992), True, 'import matplotlib.pyplot as plt\n'), ((4997, 5015), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (5005, 5015), True, 'import matplotlib.pyplot as plt\n'), ((5030, 5070), 'os.path.join', 'os.path.join', (['workspace', 'f"""lake2.{fext}"""'], {}), "(workspace, f'lake2.{fext}')\n", (5042, 5070), False, 'import os\n'), ((5081, 5090), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5088, 5090), True, 'import matplotlib.pyplot as plt\n'), ((5165, 5216), 'numpy.linspace', 'np.linspace', (['(-H / Nlay / 2)', '(-H + H / Nlay / 2)', 'Nlay'], {}), '(-H / Nlay / 2, -H + H / Nlay / 2, Nlay)\n', (5176, 5216), True, 'import numpy as np\n'), ((5287, 5305), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (5295, 5305), True, 'import matplotlib.pyplot as plt\n'), ((5320, 5360), 'os.path.join', 'os.path.join', (['workspace', 'f"""lake3.{fext}"""'], {}), "(workspace, f'lake3.{fext}')\n", (5332, 5360), False, 'import os\n'), ((5371, 5380), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5378, 5380), True, 'import matplotlib.pyplot as plt\n'), ((190, 215), 'os.path.exists', 'os.path.exists', (['workspace'], {}), '(workspace)\n', (204, 215), False, 'import os\n'), ((225, 247), 'os.makedirs', 'os.makedirs', (['workspace'], {}), '(workspace)\n', (236, 247), False, 'import os\n'), ((2992, 3007), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (2999, 3007), True, 'import numpy as np\n'), ((3325, 3371), 'numpy.savetxt', 'np.savetxt', (['file', 'ibound[kdx, :, :]'], {'fmt': '"""%5d"""'}), "(file, ibound[kdx, :, :], fmt='%5d')\n", (3335, 3371), True, 'import numpy as np\n'), ((4504, 4542), 'os.path.join', 'os.path.join', (['workspace', 'f"""{name}.hds"""'], {}), "(workspace, f'{name}.hds')\n", (4516, 4542), False, 'import os\n'), ((4647, 4672), 'numpy.arange', 'np.arange', (['(90)', '(100.1)', '(0.2)'], {}), '(90, 100.1, 0.2)\n', (4656, 4672), True, 'import numpy as np\n'), ((4935, 4960), 'numpy.arange', 'np.arange', (['(90)', '(100.1)', '(0.2)'], {}), '(90, 100.1, 0.2)\n', (4944, 4960), True, 'import numpy as np\n'), ((5256, 5281), 'numpy.arange', 'np.arange', (['(90)', '(100.1)', '(0.2)'], {}), '(90, 100.1, 0.2)\n', (5265, 5281), True, 'import numpy as np\n')]
|
"""
preprocess of (single lead) ecg signal:
band pass --> remove baseline --> find rpeaks --> denoise (mainly deal with motion artefact)
TODO:
1. motion artefact detection,
and slice the signal into continuous (no motion artefact within) segments
2. to add
References:
-----------
[1] https://github.com/PIA-Group/BioSPPy
[2] to add
"""
import os, time
import multiprocessing as mp
from copy import deepcopy
from numbers import Real
from typing import Union, Optional, Any, List, Dict
import numpy as np
from easydict import EasyDict as ED
from scipy.ndimage.filters import median_filter
from scipy.signal.signaltools import resample
from scipy.io import savemat
# from scipy.signal import medfilt
# https://github.com/scipy/scipy/issues/9680
try:
from biosppy.signals.tools import filter_signal
except:
from references.biosppy.biosppy.signals.tools import filter_signal
from cfg import PreprocCfg
from .ecg_rpeaks import (
xqrs_detect, gqrs_detect, pantompkins,
hamilton_detect, ssf_detect, christov_detect, engzee_detect, gamboa_detect,
)
from .ecg_rpeaks_dl import seq_lab_net_detect
__all__ = [
"preprocess_signal",
"parallel_preprocess_signal",
"denoise_signal",
]
QRS_DETECTORS = {
"xqrs": xqrs_detect,
"gqrs": gqrs_detect,
"pantompkins": pantompkins,
"hamilton": hamilton_detect,
"ssf": ssf_detect,
"christov": christov_detect,
"engzee": engzee_detect,
"gamboa": gamboa_detect,
"seq_lab": seq_lab_net_detect,
}
DL_QRS_DETECTORS = [
"seq_lab",
]
def preprocess_signal(raw_sig:np.ndarray, fs:Real, config:Optional[ED]=None) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
raw_sig: ndarray,
the raw ecg signal
fs: real number,
sampling frequency of `raw_sig`
config: dict, optional,
extra process configuration,
`PreprocCfg` will be updated by this `config`
Returns:
--------
retval: dict,
with items
- 'filtered_ecg': the array of the processed ecg signal
- 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set
NOTE:
-----
output (`retval`) are resampled to have sampling frequency
equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs`
"""
filtered_ecg = raw_sig.copy()
cfg = deepcopy(PreprocCfg)
cfg.update(config or {})
if fs != cfg.fs:
filtered_ecg = resample(filtered_ecg, int(round(len(filtered_ecg)*cfg.fs/fs)))
# remove baseline
if 'baseline' in cfg.preproc:
window1 = 2 * (cfg.baseline_window1 // 2) + 1 # window size must be odd
window2 = 2 * (cfg.baseline_window2 // 2) + 1
baseline = median_filter(filtered_ecg, size=window1, mode='nearest')
baseline = median_filter(baseline, size=window2, mode='nearest')
filtered_ecg = filtered_ecg - baseline
# filter signal
if 'bandpass' in cfg.preproc:
filtered_ecg = filter_signal(
signal=filtered_ecg,
ftype='FIR',
band='bandpass',
order=int(0.3 * fs),
sampling_rate=fs,
frequency=cfg.filter_band,
)['signal']
if cfg.rpeaks and cfg.rpeaks.lower() not in DL_QRS_DETECTORS:
# dl detectors not for parallel computing using `mp`
detector = QRS_DETECTORS[cfg.rpeaks.lower()]
rpeaks = detector(sig=filtered_ecg, fs=fs).astype(int)
else:
rpeaks = np.array([], dtype=int)
retval = ED({
"filtered_ecg": filtered_ecg,
"rpeaks": rpeaks,
})
return retval
def parallel_preprocess_signal(raw_sig:np.ndarray, fs:Real, config:Optional[ED]=None, save_dir:Optional[str]=None, save_fmt:str='npy', verbose:int=0) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
raw_sig: ndarray,
the raw ecg signal
fs: real number,
sampling frequency of `raw_sig`
config: dict, optional,
extra process configuration,
`PreprocCfg` will `update` this `config`
save_dir: str, optional,
directory for saving the outcome ('filtered_ecg' and 'rpeaks')
save_fmt: str, default 'npy',
format of the save files, 'npy' or 'mat'
Returns:
--------
retval: dict,
with items
- 'filtered_ecg': the array of the processed ecg signal
- 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set
NOTE:
-----
output (`retval`) are resampled to have sampling frequency
equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs`
"""
start_time = time.time()
cfg = deepcopy(PreprocCfg)
cfg.update(config or {})
epoch_len = int(cfg.parallel_epoch_len * fs)
epoch_overlap_half = int(cfg.parallel_epoch_overlap * fs) // 2
epoch_overlap = 2 * epoch_overlap_half
epoch_forward = epoch_len - epoch_overlap
if len(raw_sig) <= 3 * epoch_len: # too short, no need for parallel computing
retval = preprocess_signal(raw_sig, fs, cfg)
if cfg.rpeaks and cfg.rpeaks.lower() in DL_QRS_DETECTORS:
rpeaks = QRS_DETECTORS[cfg.rpeaks.lower()](sig=raw_sig, fs=fs, verbose=verbose).astype(int)
retval.rpeaks = rpeaks
return retval
l_epoch = [
raw_sig[idx*epoch_forward: idx*epoch_forward + epoch_len] \
for idx in range((len(raw_sig)-epoch_overlap)//epoch_forward)
]
if cfg.parallel_keep_tail:
tail_start_idx = epoch_forward * len(l_epoch) + epoch_overlap
if len(raw_sig) - tail_start_idx < 30 * fs: # less than 30s, make configurable?
# append to the last epoch
l_epoch[-1] = np.append(l_epoch[-1], raw_sig[tail_start_idx:])
else: # long enough
tail_epoch = raw_sig[tail_start_idx-epoch_overlap:]
l_epoch.append(tail_epoch)
cpu_num = max(1, mp.cpu_count()-3)
with mp.Pool(processes=cpu_num) as pool:
result = pool.starmap(
func=preprocess_signal,
iterable=[(e, fs, cfg) for e in l_epoch],
)
if cfg.parallel_keep_tail:
tail_result = result[-1]
result = result[:-1]
filtered_ecg = result[0]['filtered_ecg'][:epoch_len-epoch_overlap_half]
rpeaks = result[0]['rpeaks'][np.where(result[0]['rpeaks']<epoch_len-epoch_overlap_half)[0]]
for idx, e in enumerate(result[1:]):
filtered_ecg = np.append(
filtered_ecg, e['filtered_ecg'][epoch_overlap_half: -epoch_overlap_half]
)
epoch_rpeaks = e['rpeaks'][np.where( (e['rpeaks'] >= epoch_overlap_half) & (e['rpeaks'] < epoch_len-epoch_overlap_half) )[0]]
rpeaks = np.append(rpeaks, (idx+1)*epoch_forward + epoch_rpeaks)
if cfg.parallel_keep_tail:
filtered_ecg = np.append(filtered_ecg, tail_result['filtered_ecg'][epoch_overlap_half:])
tail_rpeaks = tail_result['rpeaks'][np.where(tail_result['rpeaks'] >= epoch_overlap_half)[0]]
rpeaks = np.append(rpeaks, len(result)*epoch_forward + tail_rpeaks)
if verbose >= 1:
if cfg.rpeaks.lower() in DL_QRS_DETECTORS:
print(f"signal processing took {round(time.time()-start_time, 3)} seconds")
else:
print(f"signal processing and R peaks detection took {round(time.time()-start_time, 3)} seconds")
start_time = time.time()
if cfg.rpeaks and cfg.rpeaks.lower() in DL_QRS_DETECTORS:
rpeaks = QRS_DETECTORS[cfg.rpeaks.lower()](sig=raw_sig, fs=fs, verbose=verbose).astype(int)
if verbose >= 1:
print(f"R peaks detection using {cfg.rpeaks} took {round(time.time()-start_time, 3)} seconds")
if save_dir:
# NOTE: this part is not tested
os.makedirs(save_dir, exist_ok=True)
if save_fmt.lower() == 'npy':
np.save(os.path.join(save_dir, "filtered_ecg.npy"), filtered_ecg)
np.save(os.path.join(save_dir, "rpeaks.npy"), rpeaks)
elif save_fmt.lower() == 'mat':
# save into 2 files, keep in accordance
savemat(os.path.join(save_dir, "filtered_ecg.mat"), {"filtered_ecg": filtered_ecg}, format='5')
savemat(os.path.join(save_dir, "rpeaks.mat"), {"rpeaks": rpeaks}, format='5')
retval = ED({
"filtered_ecg": filtered_ecg,
"rpeaks": rpeaks,
})
return retval
"""
to check correctness of the function `parallel_preprocess_signal`,
say for record A01, one can call
>>> raw_sig = loadmat("./data/A01.mat")['ecg'].flatten()
>>> processed = parallel_preprocess_signal(raw_sig, 400)
>>> print(len(processed['filtered_ecg']) - len(raw_sig))
>>> start_t = int(3600*24.7811)
>>> len_t = 10
>>> fig, ax = plt.subplots(figsize=(20,6))
>>> ax.plot(hehe['filtered_ecg'][start_t*400:(start_t+len_t)*400])
>>> for r in [p for p in hehe['rpeaks'] if start_t*400 <= p < (start_t+len_t)*400]:
>>> ax.axvline(r-start_t*400,c='red',linestyle='dashed')
>>> plt.show()
or one can use the 'dataset.py'
"""
|
[
"scipy.ndimage.filters.median_filter",
"os.makedirs",
"numpy.where",
"os.path.join",
"multiprocessing.cpu_count",
"numpy.append",
"easydict.EasyDict",
"numpy.array",
"multiprocessing.Pool",
"copy.deepcopy",
"time.time"
] |
[((2371, 2391), 'copy.deepcopy', 'deepcopy', (['PreprocCfg'], {}), '(PreprocCfg)\n', (2379, 2391), False, 'from copy import deepcopy\n'), ((3534, 3586), 'easydict.EasyDict', 'ED', (["{'filtered_ecg': filtered_ecg, 'rpeaks': rpeaks}"], {}), "({'filtered_ecg': filtered_ecg, 'rpeaks': rpeaks})\n", (3536, 3586), True, 'from easydict import EasyDict as ED\n'), ((4684, 4695), 'time.time', 'time.time', ([], {}), '()\n', (4693, 4695), False, 'import os, time\n'), ((4706, 4726), 'copy.deepcopy', 'deepcopy', (['PreprocCfg'], {}), '(PreprocCfg)\n', (4714, 4726), False, 'from copy import deepcopy\n'), ((8305, 8357), 'easydict.EasyDict', 'ED', (["{'filtered_ecg': filtered_ecg, 'rpeaks': rpeaks}"], {}), "({'filtered_ecg': filtered_ecg, 'rpeaks': rpeaks})\n", (8307, 8357), True, 'from easydict import EasyDict as ED\n'), ((2741, 2798), 'scipy.ndimage.filters.median_filter', 'median_filter', (['filtered_ecg'], {'size': 'window1', 'mode': '"""nearest"""'}), "(filtered_ecg, size=window1, mode='nearest')\n", (2754, 2798), False, 'from scipy.ndimage.filters import median_filter\n'), ((2818, 2871), 'scipy.ndimage.filters.median_filter', 'median_filter', (['baseline'], {'size': 'window2', 'mode': '"""nearest"""'}), "(baseline, size=window2, mode='nearest')\n", (2831, 2871), False, 'from scipy.ndimage.filters import median_filter\n'), ((3496, 3519), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (3504, 3519), True, 'import numpy as np\n'), ((5981, 6007), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'cpu_num'}), '(processes=cpu_num)\n', (5988, 6007), True, 'import multiprocessing as mp\n'), ((6483, 6570), 'numpy.append', 'np.append', (['filtered_ecg', "e['filtered_ecg'][epoch_overlap_half:-epoch_overlap_half]"], {}), "(filtered_ecg, e['filtered_ecg'][epoch_overlap_half:-\n epoch_overlap_half])\n", (6492, 6570), True, 'import numpy as np\n'), ((6740, 6799), 'numpy.append', 'np.append', (['rpeaks', '((idx + 1) * epoch_forward + epoch_rpeaks)'], {}), '(rpeaks, (idx + 1) * epoch_forward + epoch_rpeaks)\n', (6749, 6799), True, 'import numpy as np\n'), ((6851, 6924), 'numpy.append', 'np.append', (['filtered_ecg', "tail_result['filtered_ecg'][epoch_overlap_half:]"], {}), "(filtered_ecg, tail_result['filtered_ecg'][epoch_overlap_half:])\n", (6860, 6924), True, 'import numpy as np\n'), ((7409, 7420), 'time.time', 'time.time', ([], {}), '()\n', (7418, 7420), False, 'import os, time\n'), ((7782, 7818), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (7793, 7818), False, 'import os, time\n'), ((5751, 5799), 'numpy.append', 'np.append', (['l_epoch[-1]', 'raw_sig[tail_start_idx:]'], {}), '(l_epoch[-1], raw_sig[tail_start_idx:])\n', (5760, 5799), True, 'import numpy as np\n'), ((5954, 5968), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (5966, 5968), True, 'import multiprocessing as mp\n'), ((6356, 6418), 'numpy.where', 'np.where', (["(result[0]['rpeaks'] < epoch_len - epoch_overlap_half)"], {}), "(result[0]['rpeaks'] < epoch_len - epoch_overlap_half)\n", (6364, 6418), True, 'import numpy as np\n'), ((6624, 6722), 'numpy.where', 'np.where', (["((e['rpeaks'] >= epoch_overlap_half) & (e['rpeaks'] < epoch_len -\n epoch_overlap_half))"], {}), "((e['rpeaks'] >= epoch_overlap_half) & (e['rpeaks'] < epoch_len -\n epoch_overlap_half))\n", (6632, 6722), True, 'import numpy as np\n'), ((6969, 7022), 'numpy.where', 'np.where', (["(tail_result['rpeaks'] >= epoch_overlap_half)"], {}), "(tail_result['rpeaks'] >= epoch_overlap_half)\n", (6977, 7022), True, 'import numpy as np\n'), ((7877, 7919), 'os.path.join', 'os.path.join', (['save_dir', '"""filtered_ecg.npy"""'], {}), "(save_dir, 'filtered_ecg.npy')\n", (7889, 7919), False, 'import os, time\n'), ((7955, 7991), 'os.path.join', 'os.path.join', (['save_dir', '"""rpeaks.npy"""'], {}), "(save_dir, 'rpeaks.npy')\n", (7967, 7991), False, 'import os, time\n'), ((8113, 8155), 'os.path.join', 'os.path.join', (['save_dir', '"""filtered_ecg.mat"""'], {}), "(save_dir, 'filtered_ecg.mat')\n", (8125, 8155), False, 'import os, time\n'), ((8221, 8257), 'os.path.join', 'os.path.join', (['save_dir', '"""rpeaks.mat"""'], {}), "(save_dir, 'rpeaks.mat')\n", (8233, 8257), False, 'import os, time\n'), ((7226, 7237), 'time.time', 'time.time', ([], {}), '()\n', (7235, 7237), False, 'import os, time\n'), ((7350, 7361), 'time.time', 'time.time', ([], {}), '()\n', (7359, 7361), False, 'import os, time\n'), ((7678, 7689), 'time.time', 'time.time', ([], {}), '()\n', (7687, 7689), False, 'import os, time\n')]
|
import numpy as np
class ProjectionMatrix():
"""This matrix provides projection distortion.
Projection distortion is when things that are far away
appear smaller and things that are close appear bigger.
This works flawlessly so far. Takes in screen-size and
provides near- and far clipping. fov is field-of-view
and smaller values will make view zoom in. A value of 1
will provide a panorama image."""
def __init__(self, screen_size, zNear, zFar, fov):
if fov >= 1: # Limit to 0.99 or we get infinity error at 1.0. >1.0 will give strange result.
fov = 0.99999;
tanHalfFOV = np.tan(fov * np.pi / 2.0)
zRange = zNear - zFar;
self.projectionMatrix = np.array([
[ # Row 0:
screen_size[1] / (tanHalfFOV * screen_size[0]),
0,
0,
0
],
[ # Row 1:
0,
1.0 / tanHalfFOV,
0,
0
],
[ # Row 2:
0,
0,
(-zNear - zFar)/zRange,
2.0 * zFar * zNear / zRange
],
[ # Row 3:
0,
0,
1,
0
],
], dtype=np.float32)
def get(self):
return self.projectionMatrix
class ViewMatrix():
"""This matrix transform a model as if it's percieved by a
camera with a target 'self.t' in global world coordinates
and a position 'self.p' in global world coordinates. Global
coordinates are x=right, y=forth and z=up."""
def __init__(self, position):
self.p = vec3(position.x, position.y, position.z)
# target coordinates:
self.t = vec3(0, 0, 0)
# tolerance value:
self.tolerance = 0.5
"""The tolerance value is for testing when view lies within bounds.
In case of 'self.orbitTarget()', it's for testing when view gets too
close to target z-axis. In case of 'self.approachTarget()', it's for
testing when view gets too close to target coordinates."""
# Sensitivity value:
self.alpha = 0.01
"""The sensitivity value is for tuning how sensitive 'self.orbitTarget()'
and 'self.approachTarget()' are to user input."""
# Initialize the rotationMatrix as the identity matrix:
self.rotationMatrix = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=np.float32)
def translate(self, dp):
self.p = self.p.add(dp)
def setPos(self, p):
self.p = vec3(p.x, p.y, p.z)
def lookAt(self, target=None, up=None):
"""This function focuses the view on a target.
Tested and seem to work as it should... ........finally........"""
if target != None:
self.t = vec3(target.x, target.y, target.z)
f = self.t.sub(self.p).norm()
if up != None:
u = vec3(up.x, up.y, up.z).norm()
else:
u = vec3(0, 0, 1)
s = f.cross(u).norm() # f x u
u = s.cross(f) # s x f, automatically normalized
self.rotationMatrix = np.matrix([
[ s.x, s.y, s.z, 0],
[ u.x, u.y, u.z, 0],
[ f.x, f.y, f.z, 0],
[ 0, 0, 0, 1]], dtype=np.float32)
def approachTarget(self, amount):
"""This function approaches the view towards the target
when amount is positive and moves away from the target when
amount is negative. It will stay outside the self.tolerance
distance. When completely close to the target, view cannot
look up or down too much."""
if amount == 0:
# If amount is zero, do nothing.
return
if self.t.sub(self.p).mag()*(1 - amount) > 2.0*self.tolerance:
# If 'self.approachTarget()' will not take the view within twice the
# tolerance distance, approach the target by given amount:
self.p = self.p.add(self.t.sub(self.p).scale(amount))
def orbitTarget(self, axis):
if axis == (0, 0):
return # Do nothing
# Get target2camera-vector:
p = self.p.sub(self.t)
# Assign passed values to variables we can change if we have to:
axis_x = -axis[0]
if axis[1] > 0.30/self.alpha:
"""If axis[1] is bigger than 0.40 / self.alpha, we get strange results
becouse view can 'tunnel' over the boundary set when getting view is
getting close to target z-axis. Changing tolerance doen't change it a
whole lot so I'm setting a boundary value for axis[1] to +-0.30 / self.alpha which is
really really large as it is."""
axis_y = 0.3 / self.alpha
elif axis[1] < -0.30/self.alpha:
axis_y = -0.3 / self.alpha
else:
axis_y = axis[1]
if axis_y > 0 and p.z > 0:
"""Tests if user is trying to orbit the view up
and if the view is above the 'equator'. The second
test is to make sure the view doesn't get stuck
if it gets inside the tolerance bounds and can get back
out as long as it's trying to move away."""
if vec2(p.x, p.y).mag() < self.tolerance:
axis_y = 0
elif axis_y < 0 and p.z < 0:
"""Tests if user is trying to orbit the view down
and if the view is below the 'equator'. Same test
but for different case as the one above."""
if vec2(p.x, p.y).mag() < self.tolerance:
axis_y = 0
if axis_y == 0: #If the other axis is zero:
# Amount of rotation for target-cam x-axis: (longitude, west2east)
v = vec3(0, 0, 1) # v is up vector
rate = axis_x
elif axis_x == 0: #If the other axis is zero:
# Amount of rotation for target-cam y-axis: (latitude, south2north)
v = p.cross(vec3(0, 0, 1)).norm() # v is side vector
rate = axis_y
else: #If neither is zero
# u is up vector:
u = vec3(0, 0, axis_x)
# s is side vector:
s = p.cross(vec3(0, 0, 1)).norm().scale(axis_y)
# v is combined vector:
v = u.add(s).norm()
rate = abs(axis_x) + abs(axis_y)
sin = np.sin(self.alpha * rate)
cos = np.cos(self.alpha * rate)
rotateMatrix = np.matrix([
[ # Row 0:
( v.x*v.x*(1 - cos) + cos ),
( v.y*v.x*(1 - cos) - v.z*sin ),
( v.z*v.x*(1 - cos) + v.y*sin ),
0
],
[ # Row 1:
( v.x*v.y*(1 - cos) + v.z*sin ),
( v.y*v.y*(1 - cos) + cos ),
( v.z*v.y*(1 - cos) - v.x*sin ),
0
],
[ # Row 2:
( v.x*v.z*(1 - cos) - v.y*sin ),
( v.y*v.z*(1 - cos) + v.x*sin ),
( v.z*v.z*(1 - cos) + cos ),
0
],
[ # Row 3:
0,
0,
0,
1
],
], dtype=np.float32)
p = rotateMatrix.dot( np.array([p.x, p.y, p.z, 1.0]) ).getA()[0][0:3]
self.p = vec3(p[0], p[1], p[2]).add(self.t)
self.lookAt(self.t)
def get(self):
translationMatrix = np.matrix([
[1,0,0,-self.p.x],
[0,1,0,-self.p.y],
[0,0,1,-self.p.z],
[0,0,0,1]
], dtype=np.float32)
return (self.rotationMatrix*translationMatrix).getA()
class ModelMatrix():
"""This matrix transform a model into world coordinates.
Heavily tested and should work properly. Could probably
be optimized further or even translated into cython for
performance."""
def __init__(self, position):
self.p = vec3(position.x, position.y, position.z)
self.s = vec3(1, 1, 1)
self.rotationMatrix = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=np.float32)
def translate(self, dp):
self.p = self.p.add(dp)
def rotate(self, turns, unit):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
u = unit.norm()
sin = np.sin(turns * np.pi * 2)
cos = np.cos(turns * np.pi * 2)
self.rotationMatrix = self.rotationMatrix.dot(
np.matrix([
[ # Row 0:
( u.x*u.x*(1 - cos) + cos ),
( u.y*u.x*(1 - cos) - u.z*sin ),
( u.z*u.x*(1 - cos) + u.y*sin ),
0
],
[ # Row 1:
( u.x*u.y*(1 - cos) + u.z*sin ),
( u.y*u.y*(1 - cos) + cos ),
( u.z*u.y*(1 - cos) - u.x*sin ),
0
],
[ # Row 2:
( u.x*u.z*(1 - cos) - u.y*sin ),
( u.y*u.z*(1 - cos) + u.x*sin ),
( u.z*u.z*(1 - cos) + cos ),
0
],
[ # Row 3:
0,
0,
0,
1
],
], dtype=np.float32))
def scale(self, s):
self.s = vec3(s.x, s.y, s.z)
def lookAt(self, target, up=None):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
# Get normalized vector pointing from model to target
f = target.sub(self.p).norm()
if up != None:
u = vec3(up.x, up.y, up.z).norm()
else:
u = vec3(0, 0, 1)
s = f.cross(u).norm() # f x u
# s must be normalized! Consider when f and u are not perpendicular!
u = s.cross(f) # s x f, automatically normalized
self.rotationMatrix = np.matrix([
[ s.x, f.x, u.x, 0],
[ s.y, f.y, u.y, 0],
[ s.z, f.z, u.z, 0],
[ 0, 0, 0, 1]], dtype=np.float32)
def get(self):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
translationMatrix = np.matrix([
[1,0,0,self.p.x],
[0,1,0,self.p.y],
[0,0,1,self.p.z],
[0,0,0,1]
], dtype=np.float32)
scaleMatrix = np.matrix([
[self.s.x,0,0,0],
[0,self.s.y,0,0],
[0,0,self.s.z,0],
[0,0,0,1]
], dtype=np.float32)
return (translationMatrix*self.rotationMatrix*scaleMatrix).getA()
class quaternion():
def __init__(self, x, y, z, w):
self.x = float(x)
self.y = float(y)
self.z = float(z)
self.w = float(w)
def mag(self): # Get length of quaternion
return np.sqrt(self.x*self.x + self.y*self.y + self.y*self.y + self.y*self.y)
def norm(self): # Normalize quaternion
return quaternion(
x= self.x / self.mag(),
y= self.y / self.mag(),
z= self.z / self.mag(),
w= self.w / self.mag())
def conjugate(self):
return quaternion(
x=-self.x,
y=-self.y,
z=-self.z,
w= self.w)
def xQ(self, q): # Multiply with quaternion
return quaternion(
x= self.x * q.w + self.w * q.x + self.y * q.z - self.z * q.y,
y= self.y * q.w + self.w * q.y + self.z * q.x - self.x * q.z,
z= self.z * q.w + self.w * q.z + self.x * q.y - self.y * q.x,
w= self.w * q.w - self.x * q.x - self.y * q.y - self.z * q.z)
def xV(self, v): # Multiply with vector
return quaternion(
x= self.w*v.x + self.y*v.z - self.z*v.y,
y= self.w*v.y + self.z*v.x - self.x*v.z,
z= self.w*v.z + self.x*v.y - self.y*v.x,
w=-self.x*v.x - self.y*v.y - self.z*v.z)
class vec2():
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def mag(self):
return np.sqrt(self.x*self.x + self.y*self.y)
def norm(self):
return vec2(
x= self.x / self.mag(),
y= self.y / self.mag())
class vec3():
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def cross(self, vector):
return vec3(
x= self.y*vector.z - self.z*vector.y,
y= self.z*vector.x - self.x*vector.z,
z= self.x*vector.y - self.y*vector.x)
def dot(self, vector):
return float( self.x*vector.x + self.y*vector.y + self.z*vector.z )
def mag(self):
return np.sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
def norm(self):
return vec3(
x= self.x / self.mag(),
y= self.y / self.mag(),
z= self.z / self.mag())
def add(self, vector):
return vec3(
x= self.x + vector.x,
y= self.y + vector.y,
z= self.z + vector.z)
def sub(self, vector):
return vec3(
x= self.x - vector.x,
y= self.y - vector.y,
z= self.z - vector.z)
def scale(self, scalar):
return vec3(
self.x*scalar,
self.y*scalar,
self.z*scalar)
def rotate(self, angle, axis):
pass
|
[
"numpy.sqrt",
"numpy.tan",
"numpy.array",
"numpy.cos",
"numpy.sin",
"numpy.matrix"
] |
[((648, 673), 'numpy.tan', 'np.tan', (['(fov * np.pi / 2.0)'], {}), '(fov * np.pi / 2.0)\n', (654, 673), True, 'import numpy as np\n'), ((746, 948), 'numpy.array', 'np.array', (['[[screen_size[1] / (tanHalfFOV * screen_size[0]), 0, 0, 0], [0, 1.0 /\n tanHalfFOV, 0, 0], [0, 0, (-zNear - zFar) / zRange, 2.0 * zFar * zNear /\n zRange], [0, 0, 1, 0]]'], {'dtype': 'np.float32'}), '([[screen_size[1] / (tanHalfFOV * screen_size[0]), 0, 0, 0], [0, \n 1.0 / tanHalfFOV, 0, 0], [0, 0, (-zNear - zFar) / zRange, 2.0 * zFar *\n zNear / zRange], [0, 0, 1, 0]], dtype=np.float32)\n', (754, 948), True, 'import numpy as np\n'), ((2582, 2672), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=\n np.float32)\n', (2591, 2672), True, 'import numpy as np\n'), ((3428, 3536), 'numpy.matrix', 'np.matrix', (['[[s.x, s.y, s.z, 0], [u.x, u.y, u.z, 0], [f.x, f.y, f.z, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[s.x, s.y, s.z, 0], [u.x, u.y, u.z, 0], [f.x, f.y, f.z, 0], [0, \n 0, 0, 1]], dtype=np.float32)\n', (3437, 3536), True, 'import numpy as np\n'), ((6746, 6771), 'numpy.sin', 'np.sin', (['(self.alpha * rate)'], {}), '(self.alpha * rate)\n', (6752, 6771), True, 'import numpy as np\n'), ((6786, 6811), 'numpy.cos', 'np.cos', (['(self.alpha * rate)'], {}), '(self.alpha * rate)\n', (6792, 6811), True, 'import numpy as np\n'), ((6844, 7216), 'numpy.matrix', 'np.matrix', (['[[v.x * v.x * (1 - cos) + cos, v.y * v.x * (1 - cos) - v.z * sin, v.z * v.x *\n (1 - cos) + v.y * sin, 0], [v.x * v.y * (1 - cos) + v.z * sin, v.y * v.\n y * (1 - cos) + cos, v.z * v.y * (1 - cos) - v.x * sin, 0], [v.x * v.z *\n (1 - cos) - v.y * sin, v.y * v.z * (1 - cos) + v.x * sin, v.z * v.z * (\n 1 - cos) + cos, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[v.x * v.x * (1 - cos) + cos, v.y * v.x * (1 - cos) - v.z * sin,\n v.z * v.x * (1 - cos) + v.y * sin, 0], [v.x * v.y * (1 - cos) + v.z *\n sin, v.y * v.y * (1 - cos) + cos, v.z * v.y * (1 - cos) - v.x * sin, 0],\n [v.x * v.z * (1 - cos) - v.y * sin, v.y * v.z * (1 - cos) + v.x * sin, \n v.z * v.z * (1 - cos) + cos, 0], [0, 0, 0, 1]], dtype=np.float32)\n', (6853, 7216), True, 'import numpy as np\n'), ((7930, 8043), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0, -self.p.x], [0, 1, 0, -self.p.y], [0, 0, 1, -self.p.z], [0, 0, 0, 1]\n ]'], {'dtype': 'np.float32'}), '([[1, 0, 0, -self.p.x], [0, 1, 0, -self.p.y], [0, 0, 1, -self.p.z],\n [0, 0, 0, 1]], dtype=np.float32)\n', (7939, 8043), True, 'import numpy as np\n'), ((8554, 8644), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=\n np.float32)\n', (8563, 8644), True, 'import numpy as np\n'), ((8983, 9008), 'numpy.sin', 'np.sin', (['(turns * np.pi * 2)'], {}), '(turns * np.pi * 2)\n', (8989, 9008), True, 'import numpy as np\n'), ((9023, 9048), 'numpy.cos', 'np.cos', (['(turns * np.pi * 2)'], {}), '(turns * np.pi * 2)\n', (9029, 9048), True, 'import numpy as np\n'), ((10669, 10777), 'numpy.matrix', 'np.matrix', (['[[s.x, f.x, u.x, 0], [s.y, f.y, u.y, 0], [s.z, f.z, u.z, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[s.x, f.x, u.x, 0], [s.y, f.y, u.y, 0], [s.z, f.z, u.z, 0], [0, \n 0, 0, 1]], dtype=np.float32)\n', (10678, 10777), True, 'import numpy as np\n'), ((11012, 11123), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0, self.p.x], [0, 1, 0, self.p.y], [0, 0, 1, self.p.z], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, 0, self.p.x], [0, 1, 0, self.p.y], [0, 0, 1, self.p.z], [\n 0, 0, 0, 1]], dtype=np.float32)\n', (11021, 11123), True, 'import numpy as np\n'), ((11200, 11311), 'numpy.matrix', 'np.matrix', (['[[self.s.x, 0, 0, 0], [0, self.s.y, 0, 0], [0, 0, self.s.z, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[self.s.x, 0, 0, 0], [0, self.s.y, 0, 0], [0, 0, self.s.z, 0], [\n 0, 0, 0, 1]], dtype=np.float32)\n', (11209, 11311), True, 'import numpy as np\n'), ((11675, 11753), 'numpy.sqrt', 'np.sqrt', (['(self.x * self.x + self.y * self.y + self.y * self.y + self.y * self.y)'], {}), '(self.x * self.x + self.y * self.y + self.y * self.y + self.y * self.y)\n', (11682, 11753), True, 'import numpy as np\n'), ((12938, 12980), 'numpy.sqrt', 'np.sqrt', (['(self.x * self.x + self.y * self.y)'], {}), '(self.x * self.x + self.y * self.y)\n', (12945, 12980), True, 'import numpy as np\n'), ((13590, 13650), 'numpy.sqrt', 'np.sqrt', (['(self.x * self.x + self.y * self.y + self.z * self.z)'], {}), '(self.x * self.x + self.y * self.y + self.z * self.z)\n', (13597, 13650), True, 'import numpy as np\n'), ((9125, 9497), 'numpy.matrix', 'np.matrix', (['[[u.x * u.x * (1 - cos) + cos, u.y * u.x * (1 - cos) - u.z * sin, u.z * u.x *\n (1 - cos) + u.y * sin, 0], [u.x * u.y * (1 - cos) + u.z * sin, u.y * u.\n y * (1 - cos) + cos, u.z * u.y * (1 - cos) - u.x * sin, 0], [u.x * u.z *\n (1 - cos) - u.y * sin, u.y * u.z * (1 - cos) + u.x * sin, u.z * u.z * (\n 1 - cos) + cos, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[u.x * u.x * (1 - cos) + cos, u.y * u.x * (1 - cos) - u.z * sin,\n u.z * u.x * (1 - cos) + u.y * sin, 0], [u.x * u.y * (1 - cos) + u.z *\n sin, u.y * u.y * (1 - cos) + cos, u.z * u.y * (1 - cos) - u.x * sin, 0],\n [u.x * u.z * (1 - cos) - u.y * sin, u.y * u.z * (1 - cos) + u.x * sin, \n u.z * u.z * (1 - cos) + cos, 0], [0, 0, 0, 1]], dtype=np.float32)\n', (9134, 9497), True, 'import numpy as np\n'), ((7750, 7780), 'numpy.array', 'np.array', (['[p.x, p.y, p.z, 1.0]'], {}), '([p.x, p.y, p.z, 1.0])\n', (7758, 7780), True, 'import numpy as np\n')]
|
import os
from functools import partial
from io import BytesIO
import numpy as np
import PIL.Image
import scipy.misc
import tensorflow as tf
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(tf.float32, name="input")
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {"input": t_preprocessed})
def load_inception():
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# 定义t_input为我们输入的图像
t_input = tf.placeholder(np.float32, name='input')
imagenet_mean = 117.0
# 输入图像需要经过处理才能送入网络中
# expand_dims是加一维,从[height, width, channel]变成[1, height, width, channel]
# t_input - imagenet_mean是减去一个均值
t_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input': t_preprocessed})
# 找到所有卷积层
layers = [op.name for op in graph.get_operations() if op.type ==
"Conv2D" and "import/" in op.name]
# 输出卷积层层数
print('Number of layers', len(layers))
# 特别地,输出mixed4d_3x3_bottleneck_pre_relu的形状
name = 'mixed4d_3x3_bottleneck_pre_relu'
print('shape of %s: %s' %(name, str(graph.get_tensor_by_name('import/' + name + ':0').get_shape())))
def savearray(img_array, img_name):
scipy.misc.toimage(img_array).save(img_name)
print('img saved: %s' % img_name)
def visstd(a, s=0.1):
return (a-a.mean())/max(a.std(), 1e-4)*s+0.5
def resize_ratio(img, ratio):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, ratio))
img = img / 255 * (max - min) + min
return img
def resize(img, hw):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, hw))
img = img / 255 * (max - min) + min
return img
def calc_grad_tiled(img, t_grad, tile_size=512):
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0) # 先在行上做整体移动,再在列上做整体移动
grad = np.zeros_like(img)
for y in range(0, max(h - sz // 2, sz), sz):
for x in range(0, max(w - sz // 2, sz), sz):
sub = img_shift[y:y + sz, x:x + sz]
g = sess.run(t_grad, {t_input: sub})
grad[y:y + sz, x:x + sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
k = np.float32([1, 4, 6, 4, 1])
k = np.outer(k, k)
k5x5 = k[:, :, None, None] / k.sum() * np.eye(3, dtype=np.float32)
# 将拉普拉斯金字塔还原到原始图像
def lap_merge(levels):
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5 * 4, tf.shape(hi), [1, 2, 2, 1]) + hi
return img
# 对img做标准化。
def normalize_std(img, eps=1e-10):
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img / tf.maximum(std, eps)
# 拉普拉斯金字塔标准化
def lap_normalize(img, scale_n=4):
img = tf.expand_dims(img, 0)
tlevels = lap_split_n(img, scale_n)
# 每一层都做一次normalize_std
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0, :, :, :]
# 这个函数将图像分为低频和高频成分
def lap_split(img):
with tf.name_scope('split'):
# 做过一次卷积相当于一次“平滑”,因此lo为低频成分
lo = tf.nn.conv2d(img, k5x5, [1, 2, 2, 1], 'SAME')
# 低频成分放缩到原始图像一样大小得到lo2,再用原始图像img减去lo2,就得到高频成分hi
lo2 = tf.nn.conv2d_transpose(lo, k5x5 * 4, tf.shape(img), [1, 2, 2, 1])
hi = img - lo2
return lo, hi
# 这个函数将图像img分成n层拉普拉斯金字塔
def lap_split_n(img, n):
levels = []
for i in range(n):
# 调用lap_split将图像分为低频和高频部分
# 高频部分保存到levels中
# 低频部分再继续分解
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def tffunc(*argtypes):
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
def render_deepdream(img0, iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
name = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139
t_obj = graph.get_tensor_by_name("import/%s:0" % name)
t_score = tf.reduce_mean(t_obj)
t_grad = tf.gradients(t_score, t_input)[0]
lap_n=4
# 将lap_normalize转换为正常函数
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0
# 同样将图像进行金字塔分解
# 此时提取高频、低频的方法比较简单。直接缩放就可以
octaves = []
for i in range(octave_n-1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw) / octave_scale))
hi = img - resize(lo, hw)
img = lo
octaves.append(hi)
# 先生成低频的图像,再依次放大并加上高频
for octave in range(octave_n):
if octave > 0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2]) + hi
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
img += g * (step / (np.abs(g).mean() + 1e-7))
# 唯一的区别在于我们使用lap_norm_func来标准化g!
# g = lap_norm_func(g)
# img += g * step
print('.', end=' ')
img = img.clip(0, 255)
savearray(img, './predict_img/deepdream.jpg')
if __name__ == '__main__':
img0 = PIL.Image.open('./images/test.jpg')
img0 = np.float32(img0)
render_deepdream(img0)
|
[
"tensorflow.shape",
"tensorflow.gfile.FastGFile",
"tensorflow.gradients",
"tensorflow.reduce_mean",
"tensorflow.Graph",
"tensorflow.placeholder",
"tensorflow.GraphDef",
"tensorflow.maximum",
"tensorflow.square",
"tensorflow.nn.conv2d",
"numpy.abs",
"numpy.eye",
"tensorflow.InteractiveSession",
"numpy.outer",
"tensorflow.import_graph_def",
"tensorflow.expand_dims",
"numpy.roll",
"numpy.random.randint",
"tensorflow.name_scope",
"functools.partial",
"numpy.zeros_like",
"numpy.float32"
] |
[((151, 161), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (159, 161), True, 'import tensorflow as tf\n'), ((169, 203), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'graph'}), '(graph=graph)\n', (190, 203), True, 'import tensorflow as tf\n'), ((383, 423), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""input"""'}), "(tf.float32, name='input')\n", (397, 423), True, 'import tensorflow as tf\n'), ((463, 505), 'tensorflow.expand_dims', 'tf.expand_dims', (['(t_input - imagenet_mean)', '(0)'], {}), '(t_input - imagenet_mean, 0)\n', (477, 505), True, 'import tensorflow as tf\n'), ((504, 561), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def', "{'input': t_preprocessed}"], {}), "(graph_def, {'input': t_preprocessed})\n", (523, 561), True, 'import tensorflow as tf\n'), ((2762, 2789), 'numpy.float32', 'np.float32', (['[1, 4, 6, 4, 1]'], {}), '([1, 4, 6, 4, 1])\n', (2772, 2789), True, 'import numpy as np\n'), ((2794, 2808), 'numpy.outer', 'np.outer', (['k', 'k'], {}), '(k, k)\n', (2802, 2808), True, 'import numpy as np\n'), ((261, 295), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['model_fn', '"""rb"""'], {}), "(model_fn, 'rb')\n", (279, 295), True, 'import tensorflow as tf\n'), ((318, 331), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (329, 331), True, 'import tensorflow as tf\n'), ((598, 608), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (606, 608), True, 'import tensorflow as tf\n'), ((620, 654), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'graph'}), '(graph=graph)\n', (641, 654), True, 'import tensorflow as tf\n'), ((877, 917), 'tensorflow.placeholder', 'tf.placeholder', (['np.float32'], {'name': '"""input"""'}), "(np.float32, name='input')\n", (891, 917), True, 'import tensorflow as tf\n'), ((1103, 1145), 'tensorflow.expand_dims', 'tf.expand_dims', (['(t_input - imagenet_mean)', '(0)'], {}), '(t_input - imagenet_mean, 0)\n', (1117, 1145), True, 'import tensorflow as tf\n'), ((1150, 1207), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def', "{'input': t_preprocessed}"], {}), "(graph_def, {'input': t_preprocessed})\n", (1169, 1207), True, 'import tensorflow as tf\n'), ((2332, 2361), 'numpy.random.randint', 'np.random.randint', (['sz'], {'size': '(2)'}), '(sz, size=2)\n', (2349, 2361), True, 'import numpy as np\n'), ((2448, 2466), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (2461, 2466), True, 'import numpy as np\n'), ((2848, 2875), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (2854, 2875), True, 'import numpy as np\n'), ((3345, 3367), 'tensorflow.expand_dims', 'tf.expand_dims', (['img', '(0)'], {}), '(img, 0)\n', (3359, 3367), True, 'import tensorflow as tf\n'), ((4664, 4685), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['t_obj'], {}), '(t_obj)\n', (4678, 4685), True, 'import tensorflow as tf\n'), ((5751, 5767), 'numpy.float32', 'np.float32', (['img0'], {}), '(img0)\n', (5761, 5767), True, 'import numpy as np\n'), ((720, 754), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['model_fn', '"""rb"""'], {}), "(model_fn, 'rb')\n", (738, 754), True, 'import tensorflow as tf\n'), ((781, 794), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (792, 794), True, 'import tensorflow as tf\n'), ((2386, 2405), 'numpy.roll', 'np.roll', (['img', 'sx', '(1)'], {}), '(img, sx, 1)\n', (2393, 2405), True, 'import numpy as np\n'), ((2726, 2747), 'numpy.roll', 'np.roll', (['grad', '(-sx)', '(1)'], {}), '(grad, -sx, 1)\n', (2733, 2747), True, 'import numpy as np\n'), ((3162, 3188), 'tensorflow.name_scope', 'tf.name_scope', (['"""normalize"""'], {}), "('normalize')\n", (3175, 3188), True, 'import tensorflow as tf\n'), ((3589, 3611), 'tensorflow.name_scope', 'tf.name_scope', (['"""split"""'], {}), "('split')\n", (3602, 3611), True, 'import tensorflow as tf\n'), ((3662, 3707), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['img', 'k5x5', '[1, 2, 2, 1]', '"""SAME"""'], {}), "(img, k5x5, [1, 2, 2, 1], 'SAME')\n", (3674, 3707), True, 'import tensorflow as tf\n'), ((4699, 4729), 'tensorflow.gradients', 'tf.gradients', (['t_score', 't_input'], {}), '(t_score, t_input)\n', (4711, 4729), True, 'import tensorflow as tf\n'), ((4812, 4849), 'functools.partial', 'partial', (['lap_normalize'], {'scale_n': 'lap_n'}), '(lap_normalize, scale_n=lap_n)\n', (4819, 4849), False, 'from functools import partial\n'), ((2977, 2999), 'tensorflow.name_scope', 'tf.name_scope', (['"""merge"""'], {}), "('merge')\n", (2990, 2999), True, 'import tensorflow as tf\n'), ((3265, 3285), 'tensorflow.maximum', 'tf.maximum', (['std', 'eps'], {}), '(std, eps)\n', (3275, 3285), True, 'import tensorflow as tf\n'), ((3815, 3828), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (3823, 3828), True, 'import tensorflow as tf\n'), ((3227, 3241), 'tensorflow.square', 'tf.square', (['img'], {}), '(img)\n', (3236, 3241), True, 'import tensorflow as tf\n'), ((3057, 3069), 'tensorflow.shape', 'tf.shape', (['hi'], {}), '(hi)\n', (3065, 3069), True, 'import tensorflow as tf\n'), ((5027, 5041), 'numpy.float32', 'np.float32', (['hw'], {}), '(hw)\n', (5037, 5041), True, 'import numpy as np\n'), ((5414, 5423), 'numpy.abs', 'np.abs', (['g'], {}), '(g)\n', (5420, 5423), True, 'import numpy as np\n')]
|
from dataclasses import dataclass
from itertools import cycle
from typing import Dict, Union
import numpy as np
from ...layers.utils.color_transformations import (
transform_color,
transform_color_cycle,
)
@dataclass(eq=False)
class ColorCycle:
"""A dataclass to hold a color cycle for the fallback_colors
in the CategoricalColormap
Attributes
----------
values : np.ndarray
The (Nx4) color array of all colors contained in the color cycle.
cycle : cycle
The cycle object that gives fallback colors.
"""
values: np.ndarray
cycle: cycle
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
# turn a generic dict into object
if isinstance(val, dict):
return _coerce_colorcycle_from_dict(val)
elif isinstance(val, ColorCycle):
return val
else:
return _coerce_colorcycle_from_colors(val)
def _json_encode(self):
return {'values': self.values.tolist()}
def __eq__(self, other):
if isinstance(other, ColorCycle):
eq = np.array_equal(self.values, other.values)
else:
eq = False
return eq
def _coerce_colorcycle_from_dict(
val: Dict[str, Union[str, list, np.ndarray, cycle]]
) -> ColorCycle:
# validate values
color_values = val.get('values')
if color_values is None:
raise ValueError('ColorCycle requires a values argument')
transformed_color_values = transform_color(color_values)
# validate cycle
color_cycle = val.get('cycle')
if color_cycle is None:
transformed_color_cycle = transform_color_cycle(
color_cycle=color_values,
elem_name='color_cycle',
default="white",
)[0]
else:
transformed_color_cycle = color_cycle
return ColorCycle(
values=transformed_color_values, cycle=transformed_color_cycle
)
def _coerce_colorcycle_from_colors(
val: Union[str, list, np.ndarray]
) -> ColorCycle:
if isinstance(val, str):
val = [val]
(
transformed_color_cycle,
transformed_color_values,
) = transform_color_cycle(
color_cycle=val,
elem_name='color_cycle',
default="white",
)
return ColorCycle(
values=transformed_color_values, cycle=transformed_color_cycle
)
def compare_colormap_dicts(cmap_1, cmap_2):
if len(cmap_1) != len(cmap_2):
return False
for k, v in cmap_1.items():
if k not in cmap_2:
return False
if not np.allclose(v, cmap_2[k]):
return False
return True
|
[
"numpy.array_equal",
"numpy.allclose",
"dataclasses.dataclass"
] |
[((219, 238), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (228, 238), False, 'from dataclasses import dataclass\n'), ((1165, 1206), 'numpy.array_equal', 'np.array_equal', (['self.values', 'other.values'], {}), '(self.values, other.values)\n', (1179, 1206), True, 'import numpy as np\n'), ((2640, 2665), 'numpy.allclose', 'np.allclose', (['v', 'cmap_2[k]'], {}), '(v, cmap_2[k])\n', (2651, 2665), True, 'import numpy as np\n')]
|
# run local models given a path, default to './mxnet_models/'
import os
import argparse
import time
import mxnet as mx
import numpy as np
file_path = os.path.realpath(__file__)
dir_name = os.path.dirname(file_path)
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
class cuda_profiler_start():
import numba.cuda as cuda
cuda.profile_start()
class cuda_profiler_stop():
import numba.cuda as cuda
cuda.profile_stop()
def xprint(s):
pass
parser = argparse.ArgumentParser(
description='Predict ImageNet classes from a given image')
parser.add_argument('--model_name', type=str, required=False, default='resnet50_v1',
help='name of the model to use')
parser.add_argument('--batch_size', type=int, required=False, default=1,
help='batch size to use')
parser.add_argument('--input_dim', type=int, required=False, default=224,
help='input dimension')
parser.add_argument('--input_channels', type=int, required=False, default=3,
help='input channels')
parser.add_argument('--num_iterations', type=int, required=False, default=30,
help='number of iterations to run')
parser.add_argument('--num_warmup', type=int, required=False, default=5,
help='number of warmup iterations to run')
parser.add_argument('--model_idx', type=int, required=False, default=2,
help='model idx')
parser.add_argument('--profile', type=bool, required=False, default=False,
help='enable profiling')
opt = parser.parse_args()
model_name = opt.model_name
batch_size = opt.batch_size
input_dim = opt.input_dim
input_channels = opt.input_channels
num_iterations = opt.num_iterations
num_warmup = opt.num_warmup
model_idx = opt.model_idx
profile = opt.profile
ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()
sym, arg_params, aux_params = mx.model.load_checkpoint(
dir_name + '/mxnet_models/'+model_name, 0)
data_names = [
graph_input
for graph_input in sym.list_inputs()
if graph_input not in arg_params and graph_input not in aux_params
]
net = mx.mod.Module(
symbol=sym,
data_names=[data_names[0]],
context=ctx,
label_names=None,
)
input_shape = (batch_size, input_channels, input_dim, input_dim)
img = mx.random.uniform(
shape=input_shape, ctx=ctx)
net.bind(for_training=False, data_shapes=[
(data_names[0], input_shape)], label_shapes=net._label_shapes)
net.set_params(arg_params, aux_params, allow_missing=True)
def forward_once():
mx.nd.waitall()
start = time.time()
prob = net.predict(img)
mx.nd.waitall()
end = time.time() # stop timer
return end - start
for i in range(num_warmup):
forward_once()
res = []
if profile:
cuda_profiler_start()
for i in range(num_iterations):
t = forward_once()
res.append(t)
if profile:
cuda_profiler_stop()
res = np.multiply(res, 1000)
print("{},{},{},{},{},{}".format(model_idx+1, model_name, batch_size, np.min(res),
np.average(res), np.max(res)))
|
[
"numpy.multiply",
"argparse.ArgumentParser",
"mxnet.nd.waitall",
"mxnet.random.uniform",
"mxnet.cpu",
"numpy.average",
"numba.cuda.profile_stop",
"numpy.min",
"numpy.max",
"os.path.realpath",
"os.path.dirname",
"numba.cuda.profile_start",
"mxnet.gpu",
"mxnet.mod.Module",
"mxnet.model.load_checkpoint",
"time.time",
"mxnet.test_utils.list_gpus"
] |
[((152, 178), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (168, 178), False, 'import os\n'), ((190, 216), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (205, 216), False, 'import os\n'), ((474, 561), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Predict ImageNet classes from a given image"""'}), "(description=\n 'Predict ImageNet classes from a given image')\n", (497, 561), False, 'import argparse\n'), ((1909, 1978), 'mxnet.model.load_checkpoint', 'mx.model.load_checkpoint', (["(dir_name + '/mxnet_models/' + model_name)", '(0)'], {}), "(dir_name + '/mxnet_models/' + model_name, 0)\n", (1933, 1978), True, 'import mxnet as mx\n'), ((2135, 2223), 'mxnet.mod.Module', 'mx.mod.Module', ([], {'symbol': 'sym', 'data_names': '[data_names[0]]', 'context': 'ctx', 'label_names': 'None'}), '(symbol=sym, data_names=[data_names[0]], context=ctx,\n label_names=None)\n', (2148, 2223), True, 'import mxnet as mx\n'), ((2312, 2357), 'mxnet.random.uniform', 'mx.random.uniform', ([], {'shape': 'input_shape', 'ctx': 'ctx'}), '(shape=input_shape, ctx=ctx)\n', (2329, 2357), True, 'import mxnet as mx\n'), ((2926, 2948), 'numpy.multiply', 'np.multiply', (['res', '(1000)'], {}), '(res, 1000)\n', (2937, 2948), True, 'import numpy as np\n'), ((332, 352), 'numba.cuda.profile_start', 'cuda.profile_start', ([], {}), '()\n', (350, 352), True, 'import numba.cuda as cuda\n'), ((417, 436), 'numba.cuda.profile_stop', 'cuda.profile_stop', ([], {}), '()\n', (434, 436), True, 'import numba.cuda as cuda\n'), ((1821, 1829), 'mxnet.gpu', 'mx.gpu', ([], {}), '()\n', (1827, 1829), True, 'import mxnet as mx\n'), ((1869, 1877), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (1875, 1877), True, 'import mxnet as mx\n'), ((2565, 2580), 'mxnet.nd.waitall', 'mx.nd.waitall', ([], {}), '()\n', (2578, 2580), True, 'import mxnet as mx\n'), ((2593, 2604), 'time.time', 'time.time', ([], {}), '()\n', (2602, 2604), False, 'import time\n'), ((2637, 2652), 'mxnet.nd.waitall', 'mx.nd.waitall', ([], {}), '()\n', (2650, 2652), True, 'import mxnet as mx\n'), ((2663, 2674), 'time.time', 'time.time', ([], {}), '()\n', (2672, 2674), False, 'import time\n'), ((1837, 1862), 'mxnet.test_utils.list_gpus', 'mx.test_utils.list_gpus', ([], {}), '()\n', (1860, 1862), True, 'import mxnet as mx\n'), ((3020, 3031), 'numpy.min', 'np.min', (['res'], {}), '(res)\n', (3026, 3031), True, 'import numpy as np\n'), ((3066, 3081), 'numpy.average', 'np.average', (['res'], {}), '(res)\n', (3076, 3081), True, 'import numpy as np\n'), ((3083, 3094), 'numpy.max', 'np.max', (['res'], {}), '(res)\n', (3089, 3094), True, 'import numpy as np\n')]
|
# Copyright 2022 IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of the code to reproduce the results in the paper:
# <NAME> and <NAME>, "Circuit optimization of Hamiltonian
# simulation by simultaneous diagonalization of Pauli clusters," Quantum 4,
# p. 322, 2020. https://doi.org/10.22331/q-2020-09-12-322
import os
import cl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.ticker import FuncFormatter
from itertools import permutations
def plotZ(Z, exportFilename=None) :
(m,n) = Z.shape
cmap = colors.LinearSegmentedColormap.from_list("white_and_gray", [(1, 1, 1), (0.6, 0.6, 0.6)], N=2)
fig, ax = plt.subplots()
im = ax.imshow(Z.T,cmap=cmap)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
for i in range(1,m) :
plt.plot([-0.5+i,-0.5+i],[-0.5,-0.5+n],color='k',linewidth=0.7)
for i in range(1,T.n) :
plt.plot([-0.5,-0.5+m],[-0.5+i,-0.5+i],color='k',linewidth=0.7)
for i in range(n) :
v = Z[:,i]
c = np.sum(v[:-1] != v[1:]) + v[0] + v[-1]
ax.text(m-0.25,i, str(c), fontsize=12, ha='left', va='center')
if (exportFilename) :
plt.gcf().tight_layout()
plt.savefig(exportFilename + "-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop %s-uncropped.pdf %s.pdf" % (exportFilename, exportFilename))
else :
plt.show()
# Make sure the figure directory exists
cl.ensureDirExists('fig')
# Create the test problem
M = cl.create_basic_problem(7,0)
C = cl.generate_full_rank_weights(20,7,seed=1)
M = np.dot(C,M) % 2
# Apply diagonalization and get the final Z matrix
T = cl.Tableau(M)
R = cl.RecordOperations(T.n)
T.addRecorder(R)
cl.zeroX_algorithm1_cz(T)
T = cl.Tableau(M)
R.apply(T)
Z = T.getZ()
# Plot the results
plotZ(Z,'fig/Figure_9a')
print("Original: %d" % cl.countCNot(Z))
idx = cl.orderZ(Z)
plotZ(Z[idx,:],'fig/Figure_9b')
print("Sorted : %d" % cl.countCNot(Z[idx,:]))
# Generate histogram of actual permutations
if (True) :
base = list(range(7))
count = []
for idx2 in permutations(base) :
idx1 = cl.orderZ(Z[:,idx2])
count.append(cl.countCNot(Z[idx1,:][:,idx2]))
def format_percentage(y, position):
return str(100 * y)
# Count is always even
plt.hist(count,bins=list(range(min(count)-1,max(count)+2,2)),rwidth=0.9,density=True)
plt.gca().set_xticklabels([str(x) for x in range(min(count),max(count)+1,2)],fontsize=16)
plt.gca().set_xticks(list(range(min(count),max(count)+1,2)))
plt.gca().yaxis.set_major_formatter(FuncFormatter(format_percentage))
plt.xlabel('Number of CNOT gates',fontsize=16)
plt.ylabel("Percentage",fontsize=16)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(16)
plt.gcf().tight_layout()
ratio = 0.5
xleft, xright = plt.gca().get_xlim()
ybottom, ytop = plt.gca().get_ylim()
plt.gca().set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)
plt.savefig("fig/Figure_9c-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop fig/Figure_9c-uncropped.pdf fig/Figure_9c.pdf")
|
[
"matplotlib.pyplot.ylabel",
"cl.orderZ",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.dot",
"itertools.permutations",
"cl.create_basic_problem",
"cl.Tableau",
"matplotlib.pyplot.savefig",
"cl.zeroX_algorithm1_cz",
"cl.ensureDirExists",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"cl.generate_full_rank_weights",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show",
"numpy.sum",
"cl.RecordOperations",
"cl.countCNot",
"os.system",
"matplotlib.pyplot.subplots"
] |
[((2046, 2071), 'cl.ensureDirExists', 'cl.ensureDirExists', (['"""fig"""'], {}), "('fig')\n", (2064, 2071), False, 'import cl\n'), ((2103, 2132), 'cl.create_basic_problem', 'cl.create_basic_problem', (['(7)', '(0)'], {}), '(7, 0)\n', (2126, 2132), False, 'import cl\n'), ((2136, 2180), 'cl.generate_full_rank_weights', 'cl.generate_full_rank_weights', (['(20)', '(7)'], {'seed': '(1)'}), '(20, 7, seed=1)\n', (2165, 2180), False, 'import cl\n'), ((2255, 2268), 'cl.Tableau', 'cl.Tableau', (['M'], {}), '(M)\n', (2265, 2268), False, 'import cl\n'), ((2273, 2297), 'cl.RecordOperations', 'cl.RecordOperations', (['T.n'], {}), '(T.n)\n', (2292, 2297), False, 'import cl\n'), ((2315, 2340), 'cl.zeroX_algorithm1_cz', 'cl.zeroX_algorithm1_cz', (['T'], {}), '(T)\n', (2337, 2340), False, 'import cl\n'), ((2345, 2358), 'cl.Tableau', 'cl.Tableau', (['M'], {}), '(M)\n', (2355, 2358), False, 'import cl\n'), ((2475, 2487), 'cl.orderZ', 'cl.orderZ', (['Z'], {}), '(Z)\n', (2484, 2487), False, 'import cl\n'), ((1144, 1241), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""white_and_gray"""', '[(1, 1, 1), (0.6, 0.6, 0.6)]'], {'N': '(2)'}), "('white_and_gray', [(1, 1, 1), (0.6,\n 0.6, 0.6)], N=2)\n", (1184, 1241), True, 'import matplotlib.colors as colors\n'), ((1251, 1265), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1263, 1265), True, 'import matplotlib.pyplot as plt\n'), ((2183, 2195), 'numpy.dot', 'np.dot', (['C', 'M'], {}), '(C, M)\n', (2189, 2195), True, 'import numpy as np\n'), ((2679, 2697), 'itertools.permutations', 'permutations', (['base'], {}), '(base)\n', (2691, 2697), False, 'from itertools import permutations\n'), ((3204, 3251), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of CNOT gates"""'], {'fontsize': '(16)'}), "('Number of CNOT gates', fontsize=16)\n", (3214, 3251), True, 'import matplotlib.pyplot as plt\n'), ((3254, 3291), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage"""'], {'fontsize': '(16)'}), "('Percentage', fontsize=16)\n", (3264, 3291), True, 'import matplotlib.pyplot as plt\n'), ((3571, 3631), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig/Figure_9c-uncropped.pdf"""'], {'transparent': '(True)'}), "('fig/Figure_9c-uncropped.pdf', transparent=True)\n", (3582, 3631), True, 'import matplotlib.pyplot as plt\n'), ((3635, 3646), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3644, 3646), True, 'import matplotlib.pyplot as plt\n'), ((3650, 3716), 'os.system', 'os.system', (['"""pdfcrop fig/Figure_9c-uncropped.pdf fig/Figure_9c.pdf"""'], {}), "('pdfcrop fig/Figure_9c-uncropped.pdf fig/Figure_9c.pdf')\n", (3659, 3716), False, 'import os\n'), ((1425, 1499), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.5 + i, -0.5 + i]', '[-0.5, -0.5 + n]'], {'color': '"""k"""', 'linewidth': '(0.7)'}), "([-0.5 + i, -0.5 + i], [-0.5, -0.5 + n], color='k', linewidth=0.7)\n", (1433, 1499), True, 'import matplotlib.pyplot as plt\n'), ((1522, 1596), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.5, -0.5 + m]', '[-0.5 + i, -0.5 + i]'], {'color': '"""k"""', 'linewidth': '(0.7)'}), "([-0.5, -0.5 + m], [-0.5 + i, -0.5 + i], color='k', linewidth=0.7)\n", (1530, 1596), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1872), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(exportFilename + '-uncropped.pdf')"], {'transparent': '(True)'}), "(exportFilename + '-uncropped.pdf', transparent=True)\n", (1819, 1872), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1890), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1888, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1897, 1976), 'os.system', 'os.system', (["('pdfcrop %s-uncropped.pdf %s.pdf' % (exportFilename, exportFilename))"], {}), "('pdfcrop %s-uncropped.pdf %s.pdf' % (exportFilename, exportFilename))\n", (1906, 1976), False, 'import os\n'), ((1993, 2003), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2001, 2003), True, 'import matplotlib.pyplot as plt\n'), ((2451, 2466), 'cl.countCNot', 'cl.countCNot', (['Z'], {}), '(Z)\n', (2463, 2466), False, 'import cl\n'), ((2543, 2566), 'cl.countCNot', 'cl.countCNot', (['Z[idx, :]'], {}), '(Z[idx, :])\n', (2555, 2566), False, 'import cl\n'), ((2714, 2735), 'cl.orderZ', 'cl.orderZ', (['Z[:, idx2]'], {}), '(Z[:, idx2])\n', (2723, 2735), False, 'import cl\n'), ((3167, 3199), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['format_percentage'], {}), '(format_percentage)\n', (3180, 3199), False, 'from matplotlib.ticker import FuncFormatter\n'), ((2755, 2788), 'cl.countCNot', 'cl.countCNot', (['Z[idx1, :][:, idx2]'], {}), '(Z[idx1, :][:, idx2])\n', (2767, 2788), False, 'import cl\n'), ((2974, 2983), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2981, 2983), True, 'import matplotlib.pyplot as plt\n'), ((3067, 3076), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3074, 3076), True, 'import matplotlib.pyplot as plt\n'), ((3380, 3389), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3387, 3389), True, 'import matplotlib.pyplot as plt\n'), ((3440, 3449), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3447, 3449), True, 'import matplotlib.pyplot as plt\n'), ((3480, 3489), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3487, 3489), True, 'import matplotlib.pyplot as plt\n'), ((3504, 3513), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3511, 3513), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1660), 'numpy.sum', 'np.sum', (['(v[:-1] != v[1:])'], {}), '(v[:-1] != v[1:])\n', (1643, 1660), True, 'import numpy as np\n'), ((1777, 1786), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1784, 1786), True, 'import matplotlib.pyplot as plt\n'), ((3131, 3140), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3138, 3140), True, 'import matplotlib.pyplot as plt\n'), ((3307, 3316), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3314, 3316), True, 'import matplotlib.pyplot as plt\n')]
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from copy import deepcopy
from math import sqrt
import numpy as np
from .unit_cell_lattice import UnitCell, UnitCellLattice
from ..geometry import Cube
from ..tiling import CubicTiling
from ..transform_func import ScaleFunc, RotateFunc
from ...util.util import ListHasPoint
class DiamondLattice(UnitCellLattice):
RefIAD = sqrt(3) / 4
# === STANDARD CONSTRUCTOR
def __init__(self, IAD):
RefUnitCellShape = Cube(1, BotBackLeftCorner=np.array([0, 0, 0], dtype=float))
RefUnitCellTiling = CubicTiling(RefUnitCellShape)
RefFracPositions = [np.array([0.0, 0.0, 0.0]),
np.array([0.5, 0.5, 0.0]),
np.array([0.0, 0.5, 0.5]),
np.array([0.5, 0.0, 0.5]),
np.array([0.25, 0.25, 0.25]),
np.array([0.25, 0.75, 0.75]),
np.array([0.75, 0.25, 0.75]),
np.array([0.75, 0.75, 0.25])]
RefUnitCell = UnitCell(RefUnitCellTiling, RefFracPositions)
UnitCellLattice.__init__(self, RefUnitCell)
self._IAD = DiamondLattice.RefIAD # IAD is set correctly after calling applyTransF
self.applyTransF(ScaleFunc(IAD / DiamondLattice.RefIAD))
self._NthNeighbors = [[[np.array([0.25, 0.25, 0.25]),
np.array([-0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, -0.25]),
np.array([0.25, -0.25, -0.25])],
[np.array([-0.25, -0.25, -0.25]),
np.array([0.25, 0.25, -0.25]),
np.array([0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, 0.25])]],
[[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])],
[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])]]]
self._typeDict = {0: 0, 3: 1}
self._relativePositions = {0: np.array([0.0, 0.0, 0.0]), 3: np.array([0.25, 0.25, 0.25])}
# === CONSTRUCTOR - Aligned with {100}
@classmethod
def alignedWith100(cls, IAD):
return cls(IAD) # Default implementation
# === CONSTRUCTOR - Aligned with {110}
@classmethod
def aligndWith110(cls, IAD):
result = cls(IAD)
thetaX = 0
thetaY = np.pi * 0.25
thetaZ = 0
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {111}
@classmethod
def alignedWith111(cls, IAD, blnTrianglesAlignedWithX=True):
result = cls(IAD)
thetaX = -np.pi * 0.25
thetaY = -np.arctan2(-sqrt(2), 2)
thetaZ = (np.pi * 0.5 if blnTrianglesAlignedWithX else 0)
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {xyz}
@classmethod
def alignedWith(cls, IAD, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return cls(IAD)
elif MI in ['110', '101', '011']:
return cls.aligndWith110(IAD)
elif MI == '111':
return cls.alignedWith111(IAD)
else:
result = cls(IAD)
a = np.array([0.0, 0.0, 1.0])
b = np.array([float(MI[0]), float(MI[1]), float(MI[2])])
axis = np.cross(a, b)
angle = np.arccos(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
result.applyTransF(RotateFunc.fromAxisAngle(axis, angle))
return result
return ValueError('DiamondLattice.alignedWith: Input direction is not correct.')
# === MANIPULATION METHODS
def applyTransF(self, TransF):
if isinstance(TransF, ScaleFunc):
if TransF.isIsometric:
self._IAD *= TransF.Scale[0]
else:
raise ValueError('DiamondLattice.applyTransF: Can only scale isometrically')
UnitCellLattice.applyTransF(self, TransF)
# === AUXILIARY METHODS
def _getPointType(self, P):
return (int(round(P[0] * 4)) + int(round(P[1] * 4)) + int(round(P[2] * 4))) % 4
# === PROPERTY EVALUATION METHODS
# NOTE: inherited from UnitCellLattice
# def isOnLattice(self,P):
def areNeighbors(self, P1, P2):
return np.linalg.norm(P2 - P1) <= self.IAD
def getNeighbors(self, P, layer=1):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
if PType not in self._typeDict.keys():
raise ValueError('DiamondLattice.getNeighbors Should never reach here!')
if layer > len(self._NthNeighbors):
self._calculateNeighbors(layer)
NBs = deepcopy(self._NthNeighbors[layer - 1][self._typeDict[PType]])
for NeighP in NBs:
NeighP += RefP
self._convertFromReference(NeighP)
return NBs
def _calculateNeighbors(self, layer):
NList = []
for k, v in self._typeDict.items():
tmp = [np.array([0, 0, 0], dtype=float)]
for nb in self._NthNeighbors:
tmp.extend(nb[v])
NList.append(tmp)
for _ in range(layer - len(self._NthNeighbors)):
tmp = [[] for _ in self._typeDict.keys()]
for k, v in self._typeDict.items():
for P in self._NthNeighbors[len(self._NthNeighbors) - 1][v]:
PType = self._getPointType(P + self._relativePositions[k])
for Q in self._NthNeighbors[0][self._typeDict[PType]]:
N = P + Q
if not ListHasPoint(NList[v], N, 0.001 * DiamondLattice.RefIAD):
tmp[v].append(N)
NList[v].append(N)
self._NthNeighbors.append(tmp)
def isASite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 0
def isBSite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 3
def setDesign(self, D, AType, BType):
for i, P in enumerate(D.Canvas.Points):
if self.isASite(P):
D.setContent(i, AType)
elif self.isBSite(P):
D.setContent(i, BType)
else:
raise ValueError('setDesign can not set site not on lattice')
# === BASIC QUERY METHODS
@property
def IAD(self):
return self._IAD
@property
def Diamond100LayerSpacing(self):
return self.IAD / sqrt(3)
@property
def Diamond110LayerSpacing(self):
return self.IAD * sqrt(2) / sqrt(3)
@property
def Diamond111LayerSpacing(self):
return self.IAD * 4 / 3
@property
def Diamond112LayerSpacing(self):
return self.IAD * sqrt(2) / 3
def getLayerSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return self.Diamond100LayerSpacing
elif MI in ['110', '101', '011']:
return self.Diamond110LayerSpacing
elif MI == '111':
return self.Diamond111LayerSpacing
elif MI in ['112', '121', '211']:
return self.Diamond112LayerSpacing
else:
raise NotImplementedError('DiamondLattice.getLayerSpacing: Input direction is not supported.')
return ValueError('DiamondLattice.getLayerSpacing: Input direction is not correct.')
def getShellSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001', '110', '101', '011', '111']:
return self.IAD * sqrt(8) / sqrt(3)
elif MI in ['112', '121', '211']:
return self.IAD * sqrt(2) / sqrt(3)
else:
raise NotImplementedError('DiamondLattice.getShellSpacing: Input direction is not supported.')
return ValueError('The input direction is not correct.')
def getUniqueLayerCount(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return 4
elif MI in ['110', '101', '011']:
return 2
elif MI == '111':
return 3
elif MI in ['112', '121', '211']:
return 6
else:
raise NotImplementedError('DiamondLattice.getUniqueLayerCount: Input direction is not supported.')
return ValueError('The input direction is not correct.')
|
[
"copy.deepcopy",
"numpy.cross",
"math.sqrt",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm"
] |
[((1104, 1111), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (1108, 1111), False, 'from math import sqrt\n'), ((6940, 7002), 'copy.deepcopy', 'deepcopy', (['self._NthNeighbors[layer - 1][self._typeDict[PType]]'], {}), '(self._NthNeighbors[layer - 1][self._typeDict[PType]])\n', (6948, 7002), False, 'from copy import deepcopy\n'), ((1350, 1375), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1358, 1375), True, 'import numpy as np\n'), ((1405, 1430), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (1413, 1430), True, 'import numpy as np\n'), ((1460, 1485), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (1468, 1485), True, 'import numpy as np\n'), ((1515, 1540), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (1523, 1540), True, 'import numpy as np\n'), ((1570, 1598), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (1578, 1598), True, 'import numpy as np\n'), ((1628, 1656), 'numpy.array', 'np.array', (['[0.25, 0.75, 0.75]'], {}), '([0.25, 0.75, 0.75])\n', (1636, 1656), True, 'import numpy as np\n'), ((1686, 1714), 'numpy.array', 'np.array', (['[0.75, 0.25, 0.75]'], {}), '([0.75, 0.25, 0.75])\n', (1694, 1714), True, 'import numpy as np\n'), ((1744, 1772), 'numpy.array', 'np.array', (['[0.75, 0.75, 0.25]'], {}), '([0.75, 0.75, 0.25])\n', (1752, 1772), True, 'import numpy as np\n'), ((4081, 4106), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4089, 4106), True, 'import numpy as np\n'), ((4111, 4139), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (4119, 4139), True, 'import numpy as np\n'), ((6542, 6565), 'numpy.linalg.norm', 'np.linalg.norm', (['(P2 - P1)'], {}), '(P2 - P1)\n', (6556, 6565), True, 'import numpy as np\n'), ((8815, 8822), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (8819, 8822), False, 'from math import sqrt\n'), ((8912, 8919), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (8916, 8919), False, 'from math import sqrt\n'), ((1230, 1262), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'float'}), '([0, 0, 0], dtype=float)\n', (1238, 1262), True, 'import numpy as np\n'), ((7248, 7280), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'float'}), '([0, 0, 0], dtype=float)\n', (7256, 7280), True, 'import numpy as np\n'), ((8902, 8909), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (8906, 8909), False, 'from math import sqrt\n'), ((9084, 9091), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (9088, 9091), False, 'from math import sqrt\n'), ((2083, 2111), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (2091, 2111), True, 'import numpy as np\n'), ((2145, 2175), 'numpy.array', 'np.array', (['[-0.25, -0.25, 0.25]'], {}), '([-0.25, -0.25, 0.25])\n', (2153, 2175), True, 'import numpy as np\n'), ((2209, 2239), 'numpy.array', 'np.array', (['[-0.25, 0.25, -0.25]'], {}), '([-0.25, 0.25, -0.25])\n', (2217, 2239), True, 'import numpy as np\n'), ((2273, 2303), 'numpy.array', 'np.array', (['[0.25, -0.25, -0.25]'], {}), '([0.25, -0.25, -0.25])\n', (2281, 2303), True, 'import numpy as np\n'), ((2338, 2369), 'numpy.array', 'np.array', (['[-0.25, -0.25, -0.25]'], {}), '([-0.25, -0.25, -0.25])\n', (2346, 2369), True, 'import numpy as np\n'), ((2403, 2432), 'numpy.array', 'np.array', (['[0.25, 0.25, -0.25]'], {}), '([0.25, 0.25, -0.25])\n', (2411, 2432), True, 'import numpy as np\n'), ((2466, 2495), 'numpy.array', 'np.array', (['[0.25, -0.25, 0.25]'], {}), '([0.25, -0.25, 0.25])\n', (2474, 2495), True, 'import numpy as np\n'), ((2529, 2558), 'numpy.array', 'np.array', (['[-0.25, 0.25, 0.25]'], {}), '([-0.25, 0.25, 0.25])\n', (2537, 2558), True, 'import numpy as np\n'), ((2594, 2619), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (2602, 2619), True, 'import numpy as np\n'), ((2653, 2679), 'numpy.array', 'np.array', (['[0.0, 0.5, -0.5]'], {}), '([0.0, 0.5, -0.5])\n', (2661, 2679), True, 'import numpy as np\n'), ((2713, 2739), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.5]'], {}), '([0.0, -0.5, 0.5])\n', (2721, 2739), True, 'import numpy as np\n'), ((2773, 2800), 'numpy.array', 'np.array', (['[0.0, -0.5, -0.5]'], {}), '([0.0, -0.5, -0.5])\n', (2781, 2800), True, 'import numpy as np\n'), ((2834, 2859), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (2842, 2859), True, 'import numpy as np\n'), ((2893, 2918), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (2901, 2918), True, 'import numpy as np\n'), ((2952, 2978), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.0]'], {}), '([0.5, -0.5, 0.0])\n', (2960, 2978), True, 'import numpy as np\n'), ((3012, 3038), 'numpy.array', 'np.array', (['[0.5, 0.0, -0.5]'], {}), '([0.5, 0.0, -0.5])\n', (3020, 3038), True, 'import numpy as np\n'), ((3072, 3098), 'numpy.array', 'np.array', (['[-0.5, 0.5, 0.0]'], {}), '([-0.5, 0.5, 0.0])\n', (3080, 3098), True, 'import numpy as np\n'), ((3132, 3158), 'numpy.array', 'np.array', (['[-0.5, 0.0, 0.5]'], {}), '([-0.5, 0.0, 0.5])\n', (3140, 3158), True, 'import numpy as np\n'), ((3192, 3219), 'numpy.array', 'np.array', (['[-0.5, -0.5, 0.0]'], {}), '([-0.5, -0.5, 0.0])\n', (3200, 3219), True, 'import numpy as np\n'), ((3253, 3280), 'numpy.array', 'np.array', (['[-0.5, 0.0, -0.5]'], {}), '([-0.5, 0.0, -0.5])\n', (3261, 3280), True, 'import numpy as np\n'), ((3315, 3340), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (3323, 3340), True, 'import numpy as np\n'), ((3374, 3400), 'numpy.array', 'np.array', (['[0.0, 0.5, -0.5]'], {}), '([0.0, 0.5, -0.5])\n', (3382, 3400), True, 'import numpy as np\n'), ((3434, 3460), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.5]'], {}), '([0.0, -0.5, 0.5])\n', (3442, 3460), True, 'import numpy as np\n'), ((3494, 3521), 'numpy.array', 'np.array', (['[0.0, -0.5, -0.5]'], {}), '([0.0, -0.5, -0.5])\n', (3502, 3521), True, 'import numpy as np\n'), ((3555, 3580), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (3563, 3580), True, 'import numpy as np\n'), ((3614, 3639), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (3622, 3639), True, 'import numpy as np\n'), ((3673, 3699), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.0]'], {}), '([0.5, -0.5, 0.0])\n', (3681, 3699), True, 'import numpy as np\n'), ((3733, 3759), 'numpy.array', 'np.array', (['[0.5, 0.0, -0.5]'], {}), '([0.5, 0.0, -0.5])\n', (3741, 3759), True, 'import numpy as np\n'), ((3793, 3819), 'numpy.array', 'np.array', (['[-0.5, 0.5, 0.0]'], {}), '([-0.5, 0.5, 0.0])\n', (3801, 3819), True, 'import numpy as np\n'), ((3853, 3879), 'numpy.array', 'np.array', (['[-0.5, 0.0, 0.5]'], {}), '([-0.5, 0.0, 0.5])\n', (3861, 3879), True, 'import numpy as np\n'), ((3913, 3940), 'numpy.array', 'np.array', (['[-0.5, -0.5, 0.0]'], {}), '([-0.5, -0.5, 0.0])\n', (3921, 3940), True, 'import numpy as np\n'), ((3974, 4001), 'numpy.array', 'np.array', (['[-0.5, 0.0, -0.5]'], {}), '([-0.5, 0.0, -0.5])\n', (3982, 4001), True, 'import numpy as np\n'), ((4786, 4793), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (4790, 4793), False, 'from math import sqrt\n'), ((10040, 10047), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (10044, 10047), False, 'from math import sqrt\n'), ((5458, 5483), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (5466, 5483), True, 'import numpy as np\n'), ((5580, 5594), 'numpy.cross', 'np.cross', (['a', 'b'], {}), '(a, b)\n', (5588, 5594), True, 'import numpy as np\n'), ((10030, 10037), 'math.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (10034, 10037), False, 'from math import sqrt\n'), ((10138, 10145), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (10142, 10145), False, 'from math import sqrt\n'), ((10128, 10135), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (10132, 10135), False, 'from math import sqrt\n'), ((5629, 5641), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (5635, 5641), True, 'import numpy as np\n'), ((5645, 5662), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (5659, 5662), True, 'import numpy as np\n'), ((5665, 5682), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (5679, 5682), True, 'import numpy as np\n')]
|
import os, sys
import numpy as np
from sedflow import obs as Obs
from sedflow import train as Train
from provabgs import infer as Infer
from provabgs import models as Models
####################################################
# input
####################################################
sample = sys.argv[1]
itrain = int(sys.argv[2])
nhidden = int(sys.argv[3])
nblocks = int(sys.argv[4])
niter = int(sys.argv[5])
i0 = int(sys.argv[6])
i1 = int(sys.argv[7])
####################################################
# compile NSA failures
####################################################
# u, g, r, i, z, sigma_u, sigma_g, sigma_r, sigma_i, sigma_z, redshift
y_nsa = Obs.load_nsa_data(test_set=False)
igals = np.load('/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy')
# convert to flux
y_flux = Train.mag2flux(y_nsa[:,:5])
y_ivar = Train.sigma_mag2flux(y_nsa[:,5:10], y_nsa[:,:5])**-2
y_zred = y_nsa[:,-1]
####################################################
# setup inference
####################################################
# SPS parameter priors
prior_sps = Infer.load_priors([
Infer.UniformPrior(7., 12.5, label='sed'),
Infer.FlatDirichletPrior(4, label='sed'), # flat dirichilet priors
Infer.UniformPrior(0., 1., label='sed'), # burst fraction
Infer.UniformPrior(1e-2, 13.27, label='sed'), # tburst
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust1
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust2
Infer.UniformPrior(-2., 1., label='sed') # uniform priors on dust_index
])
# SPS model
m_sps = Models.NMF(burst=True, emulator=True)
def run_mcmc(i_obs):
# desi MCMC object
nsa_mcmc = Infer.nsaMCMC(model=m_sps, prior=prior_sps)
fmcmc = os.path.join('/scratch/network/chhahn/sedflow/nsa_fail',
'mcmc.nsa.%i.hdf5' % i_obs)
if not os.path.isfile(fmcmc):
print('%s running' % os.path.basename(fmcmc))
if not np.all(np.isfinite(y_flux[i_obs])):
print('NaN photometry', y_flux[i_obs])
return None
if not np.all(np.isfinite(y_ivar[i_obs])):
print('NaN ivar', y_ivar[i_obs])
return None
# run MCMC
zeus_chain = nsa_mcmc.run(
bands='sdss', # u, g, r, i, z
photo_obs=y_flux[i_obs],
photo_ivar_obs=y_ivar[i_obs],
zred=y_zred[i_obs],
vdisp=0.,
sampler='zeus',
nwalkers=30,
burnin=0,
opt_maxiter=2000,
niter=niter,
progress=True,
writeout=fmcmc)
else:
print('%s already exists' % os.path.basename(fmcmc))
return None
for i in range(i0, i1+1):
run_mcmc(igals[i])
|
[
"provabgs.models.NMF",
"sedflow.train.mag2flux",
"provabgs.infer.nsaMCMC",
"provabgs.infer.LogUniformPrior",
"provabgs.infer.UniformPrior",
"sedflow.obs.load_nsa_data",
"os.path.join",
"sedflow.train.sigma_mag2flux",
"os.path.isfile",
"provabgs.infer.FlatDirichletPrior",
"numpy.isfinite",
"os.path.basename",
"numpy.load"
] |
[((689, 722), 'sedflow.obs.load_nsa_data', 'Obs.load_nsa_data', ([], {'test_set': '(False)'}), '(test_set=False)\n', (706, 722), True, 'from sedflow import obs as Obs\n'), ((732, 798), 'numpy.load', 'np.load', (['"""/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy"""'], {}), "('/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy')\n", (739, 798), True, 'import numpy as np\n'), ((828, 856), 'sedflow.train.mag2flux', 'Train.mag2flux', (['y_nsa[:, :5]'], {}), '(y_nsa[:, :5])\n', (842, 856), True, 'from sedflow import train as Train\n'), ((1863, 1900), 'provabgs.models.NMF', 'Models.NMF', ([], {'burst': '(True)', 'emulator': '(True)'}), '(burst=True, emulator=True)\n', (1873, 1900), True, 'from provabgs import models as Models\n'), ((865, 915), 'sedflow.train.sigma_mag2flux', 'Train.sigma_mag2flux', (['y_nsa[:, 5:10]', 'y_nsa[:, :5]'], {}), '(y_nsa[:, 5:10], y_nsa[:, :5])\n', (885, 915), True, 'from sedflow import train as Train\n'), ((1962, 2005), 'provabgs.infer.nsaMCMC', 'Infer.nsaMCMC', ([], {'model': 'm_sps', 'prior': 'prior_sps'}), '(model=m_sps, prior=prior_sps)\n', (1975, 2005), True, 'from provabgs import infer as Infer\n'), ((2023, 2111), 'os.path.join', 'os.path.join', (['"""/scratch/network/chhahn/sedflow/nsa_fail"""', "('mcmc.nsa.%i.hdf5' % i_obs)"], {}), "('/scratch/network/chhahn/sedflow/nsa_fail', 'mcmc.nsa.%i.hdf5' %\n i_obs)\n", (2035, 2111), False, 'import os, sys\n'), ((1129, 1171), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(7.0)', '(12.5)'], {'label': '"""sed"""'}), "(7.0, 12.5, label='sed')\n", (1147, 1171), True, 'from provabgs import infer as Infer\n'), ((1180, 1220), 'provabgs.infer.FlatDirichletPrior', 'Infer.FlatDirichletPrior', (['(4)'], {'label': '"""sed"""'}), "(4, label='sed')\n", (1204, 1220), True, 'from provabgs import infer as Infer\n'), ((1265, 1306), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(0.0)', '(1.0)'], {'label': '"""sed"""'}), "(0.0, 1.0, label='sed')\n", (1283, 1306), True, 'from provabgs import infer as Infer\n'), ((1342, 1386), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(0.01)', '(13.27)'], {'label': '"""sed"""'}), "(0.01, 13.27, label='sed')\n", (1360, 1386), True, 'from provabgs import infer as Infer\n'), ((1408, 1458), 'provabgs.infer.LogUniformPrior', 'Infer.LogUniformPrior', (['(4.5e-05)', '(0.015)'], {'label': '"""sed"""'}), "(4.5e-05, 0.015, label='sed')\n", (1429, 1458), True, 'from provabgs import infer as Infer\n'), ((1501, 1551), 'provabgs.infer.LogUniformPrior', 'Infer.LogUniformPrior', (['(4.5e-05)', '(0.015)'], {'label': '"""sed"""'}), "(4.5e-05, 0.015, label='sed')\n", (1522, 1551), True, 'from provabgs import infer as Infer\n'), ((1594, 1635), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(0.0)', '(3.0)'], {'label': '"""sed"""'}), "(0.0, 3.0, label='sed')\n", (1612, 1635), True, 'from provabgs import infer as Infer\n'), ((1676, 1717), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(0.0)', '(3.0)'], {'label': '"""sed"""'}), "(0.0, 3.0, label='sed')\n", (1694, 1717), True, 'from provabgs import infer as Infer\n'), ((1758, 1800), 'provabgs.infer.UniformPrior', 'Infer.UniformPrior', (['(-2.0)', '(1.0)'], {'label': '"""sed"""'}), "(-2.0, 1.0, label='sed')\n", (1776, 1800), True, 'from provabgs import infer as Infer\n'), ((2133, 2154), 'os.path.isfile', 'os.path.isfile', (['fmcmc'], {}), '(fmcmc)\n', (2147, 2154), False, 'import os, sys\n'), ((2186, 2209), 'os.path.basename', 'os.path.basename', (['fmcmc'], {}), '(fmcmc)\n', (2202, 2209), False, 'import os, sys\n'), ((2238, 2264), 'numpy.isfinite', 'np.isfinite', (['y_flux[i_obs]'], {}), '(y_flux[i_obs])\n', (2249, 2264), True, 'import numpy as np\n'), ((2374, 2400), 'numpy.isfinite', 'np.isfinite', (['y_ivar[i_obs]'], {}), '(y_ivar[i_obs])\n', (2385, 2400), True, 'import numpy as np\n'), ((2985, 3008), 'os.path.basename', 'os.path.basename', (['fmcmc'], {}), '(fmcmc)\n', (3001, 3008), False, 'import os, sys\n')]
|
import os
from tqdm import tqdm
import cv2
import numpy as np
#pre process test data:
path = "raw_test_data/"
list_width = []
list_height = []
list_image = []
def pre_process():
print("analyze images")
for Files in tqdm(os.listdir(path)):
if "jpg" in Files:
#print(Files)
img = cv2.imread(path + Files, 1)
height, width, chan = img.shape
#print(width)
#print(height)
list_width.append(width)
list_height.append(height)
max_width = np.max(list_width)
max_height = np.max(list_height)
if max_height == max_width :
print("max height == max width")
print("format images: ")
for image in tqdm(os.listdir(path)):
if "jpg" in image:
#print(image)
img = cv2.imread(path + image, 1)
height, width, chan = img.shape
new_height = (round(max_height/16)+1)*16 # image dimension needs to be a multiple of 16
new_width = new_height # image needs to be squared
delta_width = new_width - width
delta_height = new_height - height
#print("delta height",delta_height)
#print("delta width",delta_width)
pad_img = cv2.copyMakeBorder(img, 0, delta_height, 0, delta_width, cv2.BORDER_CONSTANT,None, value = 0)
#list_image.append(pad_img)
cv2.imwrite("test_data/"+image, pad_img)
pre_process()
for image in list_image:
print(image.shape)
|
[
"cv2.imwrite",
"os.listdir",
"cv2.copyMakeBorder",
"numpy.max",
"cv2.imread"
] |
[((539, 557), 'numpy.max', 'np.max', (['list_width'], {}), '(list_width)\n', (545, 557), True, 'import numpy as np\n'), ((575, 594), 'numpy.max', 'np.max', (['list_height'], {}), '(list_height)\n', (581, 594), True, 'import numpy as np\n'), ((230, 246), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (240, 246), False, 'import os\n'), ((720, 736), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (730, 736), False, 'import os\n'), ((320, 347), 'cv2.imread', 'cv2.imread', (['(path + Files)', '(1)'], {}), '(path + Files, 1)\n', (330, 347), False, 'import cv2\n'), ((810, 837), 'cv2.imread', 'cv2.imread', (['(path + image)', '(1)'], {}), '(path + image, 1)\n', (820, 837), False, 'import cv2\n'), ((1265, 1362), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', '(0)', 'delta_height', '(0)', 'delta_width', 'cv2.BORDER_CONSTANT', 'None'], {'value': '(0)'}), '(img, 0, delta_height, 0, delta_width, cv2.\n BORDER_CONSTANT, None, value=0)\n', (1283, 1362), False, 'import cv2\n'), ((1411, 1453), 'cv2.imwrite', 'cv2.imwrite', (["('test_data/' + image)", 'pad_img'], {}), "('test_data/' + image, pad_img)\n", (1422, 1453), False, 'import cv2\n')]
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for creating RPC clusters on localhost."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import portpicker
import tensorflow as tf
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return their servers."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]}
cs = tf.train.ClusterSpec(cluster_dict)
workers = [
tf.train.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)]
ps_servers = [
tf.train.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)]
return workers, ps_servers
class CreateLocalClusterTest(tf.test.TestCase):
def testCreateLocalCluster(self):
workers, _ = create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
var0 = tf.Variable(0.0)
with tf.device("/job:ps/task:1"):
var1 = tf.Variable(1.0)
worker_sessions[0].run([var0.initializer, var1.initializer])
with tf.device("/job:ps/task:0"):
var2 = tf.Variable(2.0)
with tf.device("/job:ps/task:1"):
var3 = tf.Variable(3.0)
worker_sessions[1].run([var2.initializer, var3.initializer])
# Read values back in the opposite session
self.assertAllEqual(0.0, var0.eval(session=worker_sessions[1]))
self.assertAllEqual(1.0, var1.eval(session=worker_sessions[1]))
self.assertAllEqual(2.0, var2.eval(session=worker_sessions[0]))
self.assertAllEqual(3.0, var3.eval(session=worker_sessions[0]))
class CreateLocalClusterBenchmark(tf.test.Benchmark):
def benchmarkCreateLocalCluster(self):
deltas = []
iters = 5
for _ in range(iters):
start_time = time.time()
create_local_cluster(num_workers=1, num_ps=10)
end_time = time.time()
deltas.append(end_time - start_time)
median_deltas = np.median(deltas)
print(
"\n\nbenchmark_create_local_cluster_1_worker_10_ps. "
"iterations: %d, median wall time: %g\n\n" % (iters, median_deltas))
self.report_benchmark(
iters=iters,
wall_time=median_deltas,
name="benchmark_create_local_cluster_1_worker_10_ps")
class PartitionedVariablesBenchmark(tf.test.Benchmark):
def benchmark_create_1000_partitions_with_100_parameter_servers(self):
workers, _ = create_local_cluster(num_workers=1, num_ps=100)
worker_sessions = [tf.Session(w.target) for w in workers]
worker = worker_sessions[0]
partition_sizes = (1, 512, 1024*32, 1024*128)
partitioned = []
for partition_size in partition_sizes:
# max_shard_bytes is 4, shape is 1000*partition_size float32s which should
# partition into 1000 shards, each containing partition_size float32s.
print("Building partitioned variable with %d floats per partition"
% partition_size)
with tf.device(tf.train.replica_device_setter(ps_tasks=100)):
partitioned_ix = tf.get_variable(
"partitioned_%d" % partition_size,
shape=[1000 * partition_size],
dtype=tf.float32,
# Each partition to have exactly N float32s
partitioner=tf.variable_axis_size_partitioner(
max_shard_bytes=4 * partition_size))
# Concatenates along axis 0
partitioned.append(tf.convert_to_tensor(partitioned_ix))
tf.global_variables_initializer().run(session=worker)
for ix, partition_size in enumerate(partition_sizes):
print("Running benchmark having partitions with %d floats"
% partition_size)
self.run_op_benchmark(
worker,
partitioned[ix],
name=("read_concat_1000_partitions_from_"
"100_parameter_servers_partsize_%d_floats" % partition_size))
if __name__ == "__main__":
tf.test.main()
|
[
"tensorflow.train.ClusterSpec",
"tensorflow.device",
"numpy.median",
"tensorflow.train.Server",
"tensorflow.Variable",
"tensorflow.Session",
"tensorflow.test.main",
"tensorflow.global_variables_initializer",
"tensorflow.variable_axis_size_partitioner",
"tensorflow.train.replica_device_setter",
"tensorflow.convert_to_tensor",
"portpicker.pick_unused_port",
"time.time"
] |
[((1362, 1396), 'tensorflow.train.ClusterSpec', 'tf.train.ClusterSpec', (['cluster_dict'], {}), '(cluster_dict)\n', (1382, 1396), True, 'import tensorflow as tf\n'), ((4919, 4933), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (4931, 4933), True, 'import tensorflow as tf\n'), ((1085, 1114), 'portpicker.pick_unused_port', 'portpicker.pick_unused_port', ([], {}), '()\n', (1112, 1114), False, 'import portpicker\n'), ((1158, 1187), 'portpicker.pick_unused_port', 'portpicker.pick_unused_port', ([], {}), '()\n', (1185, 1187), False, 'import portpicker\n'), ((1418, 1506), 'tensorflow.train.Server', 'tf.train.Server', (['cs'], {'job_name': '"""worker"""', 'protocol': 'protocol', 'task_index': 'ix', 'start': '(True)'}), "(cs, job_name='worker', protocol=protocol, task_index=ix,\n start=True)\n", (1433, 1506), True, 'import tensorflow as tf\n'), ((1573, 1658), 'tensorflow.train.Server', 'tf.train.Server', (['cs'], {'job_name': '"""ps"""', 'protocol': 'protocol', 'task_index': 'ix', 'start': '(True)'}), "(cs, job_name='ps', protocol=protocol, task_index=ix, start=True\n )\n", (1588, 1658), True, 'import tensorflow as tf\n'), ((2992, 3009), 'numpy.median', 'np.median', (['deltas'], {}), '(deltas)\n', (3001, 3009), True, 'import numpy as np\n'), ((1899, 1919), 'tensorflow.Session', 'tf.Session', (['w.target'], {}), '(w.target)\n', (1909, 1919), True, 'import tensorflow as tf\n'), ((1947, 1974), 'tensorflow.device', 'tf.device', (['"""/job:ps/task:0"""'], {}), "('/job:ps/task:0')\n", (1956, 1974), True, 'import tensorflow as tf\n'), ((1989, 2005), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), '(0.0)\n', (2000, 2005), True, 'import tensorflow as tf\n'), ((2015, 2042), 'tensorflow.device', 'tf.device', (['"""/job:ps/task:1"""'], {}), "('/job:ps/task:1')\n", (2024, 2042), True, 'import tensorflow as tf\n'), ((2057, 2073), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (2068, 2073), True, 'import tensorflow as tf\n'), ((2148, 2175), 'tensorflow.device', 'tf.device', (['"""/job:ps/task:0"""'], {}), "('/job:ps/task:0')\n", (2157, 2175), True, 'import tensorflow as tf\n'), ((2190, 2206), 'tensorflow.Variable', 'tf.Variable', (['(2.0)'], {}), '(2.0)\n', (2201, 2206), True, 'import tensorflow as tf\n'), ((2216, 2243), 'tensorflow.device', 'tf.device', (['"""/job:ps/task:1"""'], {}), "('/job:ps/task:1')\n", (2225, 2243), True, 'import tensorflow as tf\n'), ((2258, 2274), 'tensorflow.Variable', 'tf.Variable', (['(3.0)'], {}), '(3.0)\n', (2269, 2274), True, 'import tensorflow as tf\n'), ((2834, 2845), 'time.time', 'time.time', ([], {}), '()\n', (2843, 2845), False, 'import time\n'), ((2916, 2927), 'time.time', 'time.time', ([], {}), '()\n', (2925, 2927), False, 'import time\n'), ((3524, 3544), 'tensorflow.Session', 'tf.Session', (['w.target'], {}), '(w.target)\n', (3534, 3544), True, 'import tensorflow as tf\n'), ((4476, 4509), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4507, 4509), True, 'import tensorflow as tf\n'), ((3993, 4037), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', ([], {'ps_tasks': '(100)'}), '(ps_tasks=100)\n', (4023, 4037), True, 'import tensorflow as tf\n'), ((4433, 4469), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['partitioned_ix'], {}), '(partitioned_ix)\n', (4453, 4469), True, 'import tensorflow as tf\n'), ((4282, 4351), 'tensorflow.variable_axis_size_partitioner', 'tf.variable_axis_size_partitioner', ([], {'max_shard_bytes': '(4 * partition_size)'}), '(max_shard_bytes=4 * partition_size)\n', (4315, 4351), True, 'import tensorflow as tf\n')]
|
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def read_dataset_from_npy(path):
""" Read dataset from .npy file
"""
data = np.load(path, allow_pickle=True)
return data[()]['X'], data[()]['y'], data[()]['train_idx'], data[()]['test_idx']
def read_dataset(ucr_root_dir, dataset_name, shot):
""" Read univariate dataset from UCR
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
y_train = df_train.values[:, 0].astype(np.int64)
y_test = df_test.values[:, 0].astype(np.int64)
y = np.concatenate((y_train, y_test))
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test))
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
X[np.isnan(X)] = 0
std_ = X.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=1, keepdims=True)) / std_
# add a dimension to make it multivariate with one dimension
X = X.reshape((X.shape[0], 1, X.shape[1]))
return X, y, train_idx, test_idx
def read_multivariate_dataset(root_dir, dataset_name, shot):
""" Read multivariate dataset
"""
X = np.load(os.path.join(root_dir, dataset_name+".npy"), allow_pickle=True)
y = np.loadtxt(os.path.join(root_dir, dataset_name+'_label.txt'))
y = y.astype(np.int64)
dim = X[0].shape[0]
max_length = 0
for _X in X:
if _X.shape[1] > max_length:
max_length = _X.shape[1]
X_list = []
for i in range(len(X)):
_X = np.zeros((dim, max_length))
_X[:, :X[i].shape[1]] = X[i]
X_list.append(_X)
X = np.array(X_list, dtype=np.float32)
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
std_ = X.std(axis=2, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=2, keepdims=True)) / std_
return X, y, train_idx, test_idx
def read_X(ucr_root_dir, dataset_name):
""" Read the raw time-series
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test), axis=0)
return X
class Logger:
def __init__(self, f):
self.f = f
def log(self, content):
print(content)
self.f.write(content + '\n')
self.f.flush()
|
[
"sklearn.preprocessing.LabelEncoder",
"numpy.unique",
"os.path.join",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"numpy.concatenate",
"numpy.load",
"numpy.random.shuffle"
] |
[((185, 217), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)'}), '(path, allow_pickle=True)\n', (192, 217), True, 'import numpy as np\n'), ((423, 463), 'os.path.join', 'os.path.join', (['ucr_root_dir', 'dataset_name'], {}), '(ucr_root_dir, dataset_name)\n', (435, 463), False, 'import os\n'), ((783, 816), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (797, 816), True, 'import numpy as np\n'), ((826, 840), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (838, 840), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1154, 1187), 'numpy.concatenate', 'np.concatenate', (['(X_train, X_test)'], {}), '((X_train, X_test))\n', (1168, 1187), True, 'import numpy as np\n'), ((1240, 1262), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (1257, 1262), True, 'import numpy as np\n'), ((2396, 2430), 'numpy.array', 'np.array', (['X_list'], {'dtype': 'np.float32'}), '(X_list, dtype=np.float32)\n', (2404, 2430), True, 'import numpy as np\n'), ((2441, 2455), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2453, 2455), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2547, 2569), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (2564, 2569), True, 'import numpy as np\n'), ((3093, 3133), 'os.path.join', 'os.path.join', (['ucr_root_dir', 'dataset_name'], {}), '(ucr_root_dir, dataset_name)\n', (3105, 3133), False, 'import os\n'), ((3615, 3656), 'numpy.concatenate', 'np.concatenate', (['(X_train, X_test)'], {'axis': '(0)'}), '((X_train, X_test), axis=0)\n', (3629, 3656), True, 'import numpy as np\n'), ((491, 545), 'os.path.join', 'os.path.join', (['dataset_dir', "(dataset_name + '_TRAIN.tsv')"], {}), "(dataset_dir, dataset_name + '_TRAIN.tsv')\n", (503, 545), False, 'import os\n'), ((594, 647), 'os.path.join', 'os.path.join', (['dataset_dir', "(dataset_name + '_TEST.tsv')"], {}), "(dataset_dir, dataset_name + '_TEST.tsv')\n", (606, 647), False, 'import os\n'), ((1537, 1548), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (1545, 1548), True, 'import numpy as np\n'), ((1943, 1988), 'os.path.join', 'os.path.join', (['root_dir', "(dataset_name + '.npy')"], {}), "(root_dir, dataset_name + '.npy')\n", (1955, 1988), False, 'import os\n'), ((2026, 2077), 'os.path.join', 'os.path.join', (['root_dir', "(dataset_name + '_label.txt')"], {}), "(root_dir, dataset_name + '_label.txt')\n", (2038, 2077), False, 'import os\n'), ((2297, 2324), 'numpy.zeros', 'np.zeros', (['(dim, max_length)'], {}), '((dim, max_length))\n', (2305, 2324), True, 'import numpy as np\n'), ((3161, 3215), 'os.path.join', 'os.path.join', (['dataset_dir', "(dataset_name + '_TRAIN.tsv')"], {}), "(dataset_dir, dataset_name + '_TRAIN.tsv')\n", (3173, 3215), False, 'import os\n'), ((3264, 3317), 'os.path.join', 'os.path.join', (['dataset_dir', "(dataset_name + '_TEST.tsv')"], {}), "(dataset_dir, dataset_name + '_TEST.tsv')\n", (3276, 3317), False, 'import os\n'), ((1372, 1384), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1381, 1384), True, 'import numpy as np\n'), ((2679, 2691), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2688, 2691), True, 'import numpy as np\n')]
|
import numpy as np
def denormalize(x, x_min, x_max):
if x_max is None:
_range = 1
else:
_range = (x_max - x_min)
return x * _range + x_min
def normalize(x, x_min=None, x_max=None, return_bounds=False, estimate_bounds_if_none=True):
# if the bounds should be estimated if none do it for both
if estimate_bounds_if_none and x_min is None:
x_min = np.min(x, axis=0)
if estimate_bounds_if_none and x_max is None:
x_max = np.max(x, axis=0)
# if they are still none set them to default to avoid exception
if x_min is None:
x_min = np.zeros()
if x_max is None:
x_max = np.ones()
# calculate the denominator
denom = x_max - x_min
# we can not divide by zero -> plus small epsilon
denom += 1e-30
# normalize the actual values
N = (x - x_min) / denom
# return with or without bounds
if not return_bounds:
return N
else:
return N, x_min, x_max
def standardize(x, return_bounds=False):
mean = np.mean(x, axis=0)
std = np.std(x, axis=0)
# standardize
val = (x - mean) / std
if not return_bounds:
return val
else:
return val, mean, std
def destandardize(x, mean, std):
return (x * std) + mean
|
[
"numpy.mean",
"numpy.ones",
"numpy.std",
"numpy.max",
"numpy.zeros",
"numpy.min"
] |
[((1035, 1053), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1042, 1053), True, 'import numpy as np\n'), ((1064, 1081), 'numpy.std', 'np.std', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1070, 1081), True, 'import numpy as np\n'), ((396, 413), 'numpy.min', 'np.min', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (402, 413), True, 'import numpy as np\n'), ((480, 497), 'numpy.max', 'np.max', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (486, 497), True, 'import numpy as np\n'), ((605, 615), 'numpy.zeros', 'np.zeros', ([], {}), '()\n', (613, 615), True, 'import numpy as np\n'), ((654, 663), 'numpy.ones', 'np.ones', ([], {}), '()\n', (661, 663), True, 'import numpy as np\n')]
|
import glob
import os
import sys
from tempfile import TemporaryDirectory
import netCDF4
import numpy as np
import numpy.ma as ma
from all_products_fun import Check
from lidar_fun import LidarFun
from cloudnetpy import concat_lib
from cloudnetpy.instruments import ceilo2nc
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_PATH)
FILES = glob.glob(f"{SCRIPT_PATH}/data/cl61d/*.nc")
FILES.sort()
SITE_META = {
"name": "Hyytiälä",
"altitude": 123,
"calibration_factor": 2.0,
"latitude": 45.0,
"longitude": 22.0,
}
class TestCl61d(Check):
site_meta = SITE_META
date = "2021-08-29"
temp_dir = TemporaryDirectory()
daily_file = temp_dir.name + "/daily.nc"
concat_lib.concatenate_files(FILES, daily_file, concat_dimension="profile")
temp_path = temp_dir.name + "/test.nc"
uuid = ceilo2nc(daily_file, temp_path, site_meta, date=date)
def test_variable_names(self):
keys = {
"beta",
"beta_smooth",
"calibration_factor",
"range",
"height",
"zenith_angle",
"time",
"depolarisation",
"altitude",
"latitude",
"longitude",
"wavelength",
}
assert set(self.nc.variables.keys()) == keys
def test_common_lidar(self):
lidar_fun = LidarFun(self.nc, self.site_meta, self.date, self.uuid)
for name, method in LidarFun.__dict__.items():
if "test_" in name:
getattr(lidar_fun, name)()
def test_variable_values(self):
assert abs(self.nc.variables["wavelength"][:] - 910.55) < 0.001
assert self.nc.variables["zenith_angle"][:] == 3.0
assert ma.max(self.nc.variables["depolarisation"][:]) < 1
assert ma.min(self.nc.variables["depolarisation"][:]) > -0.1
def test_comments(self):
assert "SNR threshold applied: 5" in self.nc.variables["beta"].comment
def test_global_attributes(self):
assert self.nc.source == "Vaisala CL61d"
assert self.nc.title == f'CL61d ceilometer from {self.site_meta["name"]}'
def test_date_argument(tmp_path):
daily_file = str(tmp_path / "daily.nc")
test_file = str(tmp_path / "test.nc")
concat_lib.concatenate_files(FILES, daily_file, concat_dimension="profile")
ceilo2nc(daily_file, test_file, SITE_META, date="2021-08-30")
with netCDF4.Dataset(test_file) as nc:
assert len(nc.variables["time"]) == 12
assert np.all(np.diff(nc.variables["time"][:]) > 0)
assert nc.year == "2021"
assert nc.month == "08"
assert nc.day == "30"
|
[
"tempfile.TemporaryDirectory",
"numpy.ma.max",
"lidar_fun.LidarFun.__dict__.items",
"netCDF4.Dataset",
"numpy.ma.min",
"numpy.diff",
"os.path.realpath",
"cloudnetpy.instruments.ceilo2nc",
"lidar_fun.LidarFun",
"sys.path.append",
"cloudnetpy.concat_lib.concatenate_files",
"glob.glob"
] |
[((334, 362), 'sys.path.append', 'sys.path.append', (['SCRIPT_PATH'], {}), '(SCRIPT_PATH)\n', (349, 362), False, 'import sys\n'), ((372, 415), 'glob.glob', 'glob.glob', (['f"""{SCRIPT_PATH}/data/cl61d/*.nc"""'], {}), "(f'{SCRIPT_PATH}/data/cl61d/*.nc')\n", (381, 415), False, 'import glob\n'), ((306, 332), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (322, 332), False, 'import os\n'), ((658, 678), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (676, 678), False, 'from tempfile import TemporaryDirectory\n'), ((728, 803), 'cloudnetpy.concat_lib.concatenate_files', 'concat_lib.concatenate_files', (['FILES', 'daily_file'], {'concat_dimension': '"""profile"""'}), "(FILES, daily_file, concat_dimension='profile')\n", (756, 803), False, 'from cloudnetpy import concat_lib\n'), ((858, 911), 'cloudnetpy.instruments.ceilo2nc', 'ceilo2nc', (['daily_file', 'temp_path', 'site_meta'], {'date': 'date'}), '(daily_file, temp_path, site_meta, date=date)\n', (866, 911), False, 'from cloudnetpy.instruments import ceilo2nc\n'), ((2277, 2352), 'cloudnetpy.concat_lib.concatenate_files', 'concat_lib.concatenate_files', (['FILES', 'daily_file'], {'concat_dimension': '"""profile"""'}), "(FILES, daily_file, concat_dimension='profile')\n", (2305, 2352), False, 'from cloudnetpy import concat_lib\n'), ((2357, 2418), 'cloudnetpy.instruments.ceilo2nc', 'ceilo2nc', (['daily_file', 'test_file', 'SITE_META'], {'date': '"""2021-08-30"""'}), "(daily_file, test_file, SITE_META, date='2021-08-30')\n", (2365, 2418), False, 'from cloudnetpy.instruments import ceilo2nc\n'), ((1383, 1438), 'lidar_fun.LidarFun', 'LidarFun', (['self.nc', 'self.site_meta', 'self.date', 'self.uuid'], {}), '(self.nc, self.site_meta, self.date, self.uuid)\n', (1391, 1438), False, 'from lidar_fun import LidarFun\n'), ((1467, 1492), 'lidar_fun.LidarFun.__dict__.items', 'LidarFun.__dict__.items', ([], {}), '()\n', (1490, 1492), False, 'from lidar_fun import LidarFun\n'), ((2428, 2454), 'netCDF4.Dataset', 'netCDF4.Dataset', (['test_file'], {}), '(test_file)\n', (2443, 2454), False, 'import netCDF4\n'), ((1752, 1798), 'numpy.ma.max', 'ma.max', (["self.nc.variables['depolarisation'][:]"], {}), "(self.nc.variables['depolarisation'][:])\n", (1758, 1798), True, 'import numpy.ma as ma\n'), ((1818, 1864), 'numpy.ma.min', 'ma.min', (["self.nc.variables['depolarisation'][:]"], {}), "(self.nc.variables['depolarisation'][:])\n", (1824, 1864), True, 'import numpy.ma as ma\n'), ((2531, 2563), 'numpy.diff', 'np.diff', (["nc.variables['time'][:]"], {}), "(nc.variables['time'][:])\n", (2538, 2563), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import matplotlib as mpl
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from cell2cell.clustering import compute_linkage
from cell2cell.preprocessing.manipulate_dataframes import check_symmetry
from cell2cell.plotting.aesthetics import map_colors_to_metadata
def clustermap_cci(interaction_space, method='ward', optimal_leaf=True, metadata=None, sample_col='#SampleID',
group_col='Groups', meta_cmap='gist_rainbow', colors=None, excluded_cells=None, title='',
cbar_title='CCI score', cbar_fontsize=18, filename=None, **kwargs):
'''Generates a clustermap (heatmap + dendrograms from a hierarchical
clustering) based on CCI scores of cell-cell pairs.
Parameters
----------
interaction_space : cell2cell.core.interaction_space.InteractionSpace
Interaction space that contains all a distance matrix after running the
the method compute_pairwise_cci_scores. Alternatively, this object
can be a numpy-array or a pandas DataFrame. Also, a
SingleCellInteractions or a BulkInteractions object after running
the method compute_pairwise_cci_scores.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_leaf : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
metadata : pandas.Dataframe, default=None
Metadata associated with the cells, cell types or samples in the
matrix containing CCI scores. If None, cells will not be colored
by major groups.
sample_col : str, default='#SampleID'
Column in the metadata for the cells, cell types or samples
in the matrix containing CCI scores.
group_col : str, default='Groups'
Column in the metadata containing the major groups of cells, cell types
or samples in the matrix with CCI scores.
meta_cmap : str, default='gist_rainbow'
Name of the color palette for coloring the major groups of cells.
colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells. If colors is specified, meta_cmap will be
ignored.
excluded_cells : list, default=None
List containing cell names that are present in the interaction_space
object but that will be excluded from this plot.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=18
Font size for the colorbar title as well as labels for axes X and Y.
filename : str, default=None
Path to save the figure of the elbow analysis. If None, the figure is not
saved.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if hasattr(interaction_space, 'distance_matrix'):
print('Interaction space detected as an InteractionSpace class')
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
elif (type(interaction_space) is np.ndarray) or (type(interaction_space) is pd.core.frame.DataFrame):
print('Interaction space detected as a distance matrix')
distance_matrix = interaction_space
space_type = 'matrix'
elif hasattr(interaction_space, 'interaction_space'):
print('Interaction space detected as a Interactions class')
if not hasattr(interaction_space.interaction_space, 'distance_matrix'):
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
else:
interaction_space = interaction_space.interaction_space
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
else:
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
# Drop excluded cells
if excluded_cells is not None:
df = distance_matrix.loc[~distance_matrix.index.isin(excluded_cells),
~distance_matrix.columns.isin(excluded_cells)]
else:
df = distance_matrix
# Check symmetry to get linkage
symmetric = check_symmetry(df)
if (not symmetric) & (type(interaction_space) is pd.core.frame.DataFrame):
assert set(df.index) == set(df.columns), 'The distance matrix does not have the same elements in rows and columns'
# Obtain info for generating plot
linkage = _get_distance_matrix_linkages(df=df,
kwargs=kwargs,
method=method,
optimal_ordering=optimal_leaf,
symmetric=symmetric
)
kwargs_ = kwargs.copy()
# PLOT CCI MATRIX
if space_type == 'class':
df = interaction_space.interaction_elements['cci_matrix']
else:
df = distance_matrix
if excluded_cells is not None:
df = df.loc[~df.index.isin(excluded_cells),
~df.columns.isin(excluded_cells)]
# Colors
if metadata is not None:
col_colors = map_colors_to_metadata(metadata=metadata,
ref_df=df,
colors=colors,
sample_col=sample_col,
group_col=group_col,
cmap=meta_cmap)
if not symmetric:
row_colors = col_colors
else:
row_colors = None
else:
col_colors = None
row_colors = None
# Plot hierarchical clustering (triangular)
hier = _plot_triangular_clustermap(df=df,
symmetric=symmetric,
linkage=linkage,
col_colors=col_colors,
row_colors=row_colors,
title=title,
cbar_title=cbar_title,
cbar_fontsize=cbar_fontsize,
**kwargs_)
if ~symmetric:
hier.ax_heatmap.set_xlabel('Receiver cells', fontsize=cbar_fontsize)
hier.ax_heatmap.set_ylabel('Sender cells', fontsize=cbar_fontsize)
if filename is not None:
plt.savefig(filename, dpi=300,
bbox_inches='tight')
return hier
def _get_distance_matrix_linkages(df, kwargs, method='ward', optimal_ordering=True, symmetric=None):
'''Computes linkages for the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores in a form of distances (that is, smaller
values represent stronger interactions). Diagonal must be filled
by zeros.
kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_ordering : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
symmetric : boolean, default=None
Whether df is symmetric.
Returns
-------
linkage : ndarray
The hierarchical clustering of cells encoded as a linkage matrix.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if symmetric:
if 'col_cluster' in kwargs.keys():
kwargs['row_cluster'] = kwargs['col_cluster']
if kwargs['col_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
elif 'row_cluster' in kwargs.keys():
if kwargs['row_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
else:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
return linkage
def _triangularize_distance_matrix(df, linkage=None, symmetric=None, **kwargs):
'''Generates a mask to plot the upper triangle of the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
symmetric : boolean, default=None
Whether df is symmetric.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
mask : ndarray
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros).
'''
if symmetric is None:
symmetric = check_symmetry(df)
# Triangular matrix
if symmetric:
order_map = dict()
if linkage is None:
mask = np.ones((df.shape[0], df.shape[1]))
for i in range(mask.shape[0]):
for j in range(i, mask.shape[1]):
mask[i, j] = 0
else:
# Plot hierarchical clustering for getting indexes according to linkage
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
**kwargs
)
plt.close()
ind_order = hier.dendrogram_col.reordered_ind
mask = np.zeros((df.shape[0], df.shape[1]))
for i, ind in enumerate(ind_order):
order_map[i] = ind
filter_list = [order_map[j] for j in range(i)]
mask[ind, filter_list] = 1
else:
mask = None
return mask
def _plot_triangular_clustermap(df, symmetric=None, linkage=None, mask=None, col_colors=None, row_colors=None,
title='', cbar_title='CCI score', cbar_fontsize=12, **kwargs):
'''Plots a triangular clustermap based on a mask.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
mask : ndarray, default=None
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros). If None, a mask will be computed based on the CCI matrix
symmetry.
col_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the columns.
row_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the rows.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=12
Font size for the colorbar title as well as labels for axes X and Y.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if mask is None:
mask = _triangularize_distance_matrix(df=df,
linkage=linkage,
symmetric=symmetric,
**kwargs
)
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
mask=mask,
col_colors=col_colors,
row_colors=row_colors,
**kwargs
)
hier = _move_xticks_triangular_clustermap(clustermap=hier,
symmetric=symmetric
)
# Title
if len(title) > 0:
hier.ax_col_dendrogram.set_title(title, fontsize=16)
# Color bar label
cbar = hier.ax_heatmap.collections[0].colorbar
cbar.ax.set_ylabel(cbar_title, fontsize=cbar_fontsize)
cbar.ax.yaxis.set_label_position("left")
return hier
def _move_xticks_triangular_clustermap(clustermap, symmetric=True):
'''Moves xticks to the diagonal when plotting a symmetric matrix
in the form of a upper triangle.
Parameters
---------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
symmetric : boolean, default=None
Whether the CCI matrix plotted in the clustermap is symmetric.
Returns
-------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance, with the xticks moved to the
diagonal if the CCI matrix was symmetric. If not, the same
input clustermap is returned, but with rotated xtick labels.
'''
if symmetric:
# Apply offset transform to all xticklabels.
clustermap.ax_row_dendrogram.set_visible(False)
clustermap.ax_heatmap.tick_params(bottom=False) # Hide xtick line
x_labels = clustermap.ax_heatmap.xaxis.get_majorticklabels()
dpi_x = clustermap.fig.dpi_scale_trans.to_values()[0]
dpi_y = clustermap.fig.dpi_scale_trans.to_values()[3]
x0 = clustermap.ax_heatmap.transData.transform(x_labels[0].get_position())
x1 = clustermap.ax_heatmap.transData.transform(x_labels[1].get_position())
ylims = clustermap.ax_heatmap.get_ylim()
bottom_points = clustermap.ax_heatmap.transData.transform((1.0, ylims[0]))[1]
for i, xl in enumerate(x_labels):
# Move labels in dx and dy points.
swap_xy = (1.0, xl.get_position()[0] + 0.5)
new_y_points = clustermap.ax_heatmap.transData.transform(swap_xy)[1]
dx = -0.5 * abs(x1[0] - x0[0]) / dpi_x
dy = (new_y_points - bottom_points) / dpi_y
offset = mpl.transforms.ScaledTranslation(dx, dy, clustermap.fig.dpi_scale_trans)
xl.set_transform(xl.get_transform() + offset)
if symmetric:
rot = 45
else:
rot = 90
va = 'center'
clustermap.ax_heatmap.set_xticklabels(clustermap.ax_heatmap.xaxis.get_majorticklabels(),
rotation=rot,
rotation_mode='anchor',
va='bottom',
ha='right') # , fontsize=9.5)
clustermap.ax_heatmap.set_yticklabels(clustermap.ax_heatmap.yaxis.get_majorticklabels(),
rotation=0,
va=va,
ha='left') # , fontsize=9.5)
return clustermap
|
[
"matplotlib.pyplot.savefig",
"numpy.ones",
"cell2cell.preprocessing.manipulate_dataframes.check_symmetry",
"seaborn.clustermap",
"cell2cell.clustering.compute_linkage",
"matplotlib.pyplot.close",
"numpy.zeros",
"cell2cell.plotting.aesthetics.map_colors_to_metadata",
"matplotlib.transforms.ScaledTranslation"
] |
[((4765, 4783), 'cell2cell.preprocessing.manipulate_dataframes.check_symmetry', 'check_symmetry', (['df'], {}), '(df)\n', (4779, 4783), False, 'from cell2cell.preprocessing.manipulate_dataframes import check_symmetry\n'), ((12729, 12860), 'seaborn.clustermap', 'sns.clustermap', (['df'], {'col_linkage': 'linkage', 'row_linkage': 'linkage', 'mask': 'mask', 'col_colors': 'col_colors', 'row_colors': 'row_colors'}), '(df, col_linkage=linkage, row_linkage=linkage, mask=mask,\n col_colors=col_colors, row_colors=row_colors, **kwargs)\n', (12743, 12860), True, 'import seaborn as sns\n'), ((5773, 5904), 'cell2cell.plotting.aesthetics.map_colors_to_metadata', 'map_colors_to_metadata', ([], {'metadata': 'metadata', 'ref_df': 'df', 'colors': 'colors', 'sample_col': 'sample_col', 'group_col': 'group_col', 'cmap': 'meta_cmap'}), '(metadata=metadata, ref_df=df, colors=colors,\n sample_col=sample_col, group_col=group_col, cmap=meta_cmap)\n', (5795, 5904), False, 'from cell2cell.plotting.aesthetics import map_colors_to_metadata\n'), ((7067, 7118), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(filename, dpi=300, bbox_inches='tight')\n", (7078, 7118), True, 'from matplotlib import pyplot as plt\n'), ((8237, 8255), 'cell2cell.preprocessing.manipulate_dataframes.check_symmetry', 'check_symmetry', (['df'], {}), '(df)\n', (8251, 8255), False, 'from cell2cell.preprocessing.manipulate_dataframes import check_symmetry\n'), ((9761, 9779), 'cell2cell.preprocessing.manipulate_dataframes.check_symmetry', 'check_symmetry', (['df'], {}), '(df)\n', (9775, 9779), False, 'from cell2cell.preprocessing.manipulate_dataframes import check_symmetry\n'), ((12390, 12408), 'cell2cell.preprocessing.manipulate_dataframes.check_symmetry', 'check_symmetry', (['df'], {}), '(df)\n', (12404, 12408), False, 'from cell2cell.preprocessing.manipulate_dataframes import check_symmetry\n'), ((9897, 9932), 'numpy.ones', 'np.ones', (['(df.shape[0], df.shape[1])'], {}), '((df.shape[0], df.shape[1]))\n', (9904, 9932), True, 'import numpy as np\n'), ((10178, 10248), 'seaborn.clustermap', 'sns.clustermap', (['df'], {'col_linkage': 'linkage', 'row_linkage': 'linkage'}), '(df, col_linkage=linkage, row_linkage=linkage, **kwargs)\n', (10192, 10248), True, 'import seaborn as sns\n'), ((10398, 10409), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10407, 10409), True, 'from matplotlib import pyplot as plt\n'), ((10487, 10523), 'numpy.zeros', 'np.zeros', (['(df.shape[0], df.shape[1])'], {}), '((df.shape[0], df.shape[1]))\n', (10495, 10523), True, 'import numpy as np\n'), ((15244, 15316), 'matplotlib.transforms.ScaledTranslation', 'mpl.transforms.ScaledTranslation', (['dx', 'dy', 'clustermap.fig.dpi_scale_trans'], {}), '(dx, dy, clustermap.fig.dpi_scale_trans)\n', (15276, 15316), True, 'import matplotlib as mpl\n'), ((8440, 8509), 'cell2cell.clustering.compute_linkage', 'compute_linkage', (['df'], {'method': 'method', 'optimal_ordering': 'optimal_ordering'}), '(df, method=method, optimal_ordering=optimal_ordering)\n', (8455, 8509), False, 'from cell2cell.clustering import compute_linkage\n'), ((8823, 8892), 'cell2cell.clustering.compute_linkage', 'compute_linkage', (['df'], {'method': 'method', 'optimal_ordering': 'optimal_ordering'}), '(df, method=method, optimal_ordering=optimal_ordering)\n', (8838, 8892), False, 'from cell2cell.clustering import compute_linkage\n'), ((8668, 8737), 'cell2cell.clustering.compute_linkage', 'compute_linkage', (['df'], {'method': 'method', 'optimal_ordering': 'optimal_ordering'}), '(df, method=method, optimal_ordering=optimal_ordering)\n', (8683, 8737), False, 'from cell2cell.clustering import compute_linkage\n')]
|
#! /usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import argparse
def generate_csv(start_index, fname):
cols = [
str('A' + str(i)) for i in range(start_index, NUM_COLS + start_index)
]
data = []
for i in range(NUM_ROWS):
vals = (np.random.choice(NUM_DISTINCT_VALS) for j in range(NUM_COLS))
data.append(vals)
df = pd.DataFrame(data=data, columns=cols)
df.to_csv(fname, index=False, header=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate sample tables to test joins.')
parser.add_argument('--num-rows', '-r', type=int, default=100)
parser.add_argument('--num-cols', '-c', type=int, required=True)
parser.add_argument('--num-distinct-vals', '-d', type=int, required=True)
parser.add_argument('--num-cols-overlap', '-o', type=int, default=1)
args = parser.parse_args()
NUM_ROWS = args.num_rows
NUM_COLS = args.num_cols
NUM_DISTINCT_VALS = args.num_distinct_vals
num_overlap = args.num_cols_overlap
if num_overlap > NUM_COLS:
print('--num-cols-overlap cannot be greater than --num-cols')
import sys
sys.exit(1)
generate_csv(0, 'table_a.csv')
generate_csv(NUM_COLS - num_overlap, 'table_b.csv')
|
[
"pandas.DataFrame",
"numpy.random.choice",
"argparse.ArgumentParser",
"sys.exit"
] |
[((413, 450), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'cols'}), '(data=data, columns=cols)\n', (425, 450), True, 'import pandas as pd\n'), ((540, 616), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate sample tables to test joins."""'}), "(description='Generate sample tables to test joins.')\n", (563, 616), False, 'import argparse\n'), ((1219, 1230), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1227, 1230), False, 'import sys\n'), ((315, 350), 'numpy.random.choice', 'np.random.choice', (['NUM_DISTINCT_VALS'], {}), '(NUM_DISTINCT_VALS)\n', (331, 350), True, 'import numpy as np\n')]
|
"""
Neighborhood Components Analysis (NCA)
Ported to Python from https://github.com/vomjom/nca
"""
from __future__ import absolute_import
import numpy as np
from six.moves import xrange
from sklearn.utils.validation import check_X_y
from .base_metric import BaseMetricLearner
EPS = np.finfo(float).eps
class NCA(BaseMetricLearner):
def __init__(self, num_dims=None, max_iter=100, learning_rate=0.01):
self.num_dims = num_dims
self.max_iter = max_iter
self.learning_rate = learning_rate
def transformer(self):
return self.A_
def fit(self, X, y):
"""
X: data matrix, (n x d)
y: scalar labels, (n)
"""
X, labels = check_X_y(X, y)
n, d = X.shape
num_dims = self.num_dims
if num_dims is None:
num_dims = d
# Initialize A to a scaling matrix
A = np.zeros((num_dims, d))
np.fill_diagonal(A, 1./(np.maximum(X.max(axis=0)-X.min(axis=0), EPS)))
# Run NCA
dX = X[:,None] - X[None] # shape (n, n, d)
tmp = np.einsum('...i,...j->...ij', dX, dX) # shape (n, n, d, d)
masks = labels[:,None] == labels[None]
for it in xrange(self.max_iter):
for i, label in enumerate(labels):
mask = masks[i]
Ax = A.dot(X.T).T # shape (n, num_dims)
softmax = np.exp(-((Ax[i] - Ax)**2).sum(axis=1)) # shape (n)
softmax[i] = 0
softmax /= softmax.sum()
t = softmax[:, None, None] * tmp[i] # shape (n, d, d)
d = softmax[mask].sum() * t.sum(axis=0) - t[mask].sum(axis=0)
A += self.learning_rate * A.dot(d)
self.X_ = X
self.A_ = A
self.n_iter_ = it
return self
|
[
"numpy.zeros",
"six.moves.xrange",
"numpy.einsum",
"numpy.finfo",
"sklearn.utils.validation.check_X_y"
] |
[((285, 300), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (293, 300), True, 'import numpy as np\n'), ((660, 675), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (669, 675), False, 'from sklearn.utils.validation import check_X_y\n'), ((817, 840), 'numpy.zeros', 'np.zeros', (['(num_dims, d)'], {}), '((num_dims, d))\n', (825, 840), True, 'import numpy as np\n'), ((989, 1026), 'numpy.einsum', 'np.einsum', (['"""...i,...j->...ij"""', 'dX', 'dX'], {}), "('...i,...j->...ij', dX, dX)\n", (998, 1026), True, 'import numpy as np\n'), ((1106, 1127), 'six.moves.xrange', 'xrange', (['self.max_iter'], {}), '(self.max_iter)\n', (1112, 1127), False, 'from six.moves import xrange\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
from aif360.metrics import ClassificationMetric
def test_generalized_entropy_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.2
pred = data.copy()
pred[:, -1] = np.array([0, 1, 1, 0, 0, 0, 0, 1, 1, 1])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.generalized_entropy_index() == 0.3
def test_theil_index():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
assert cm.theil_index() == 4*np.log(2)/10
def test_between_all_groups():
data = np.array([[0, 1],
[0, 0],
[1, 0],
[1, 1],
[1, 0],
[1, 0],
[2, 1],
[2, 0],
[2, 1],
[2, 1]])
pred = data.copy()
pred[[3, 9], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat'])
cm = ClassificationMetric(bld, bld2)
b = np.array([1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75])
assert cm.between_all_groups_generalized_entropy_index() == 1/20*np.sum(b**2 - 1)
def test_between_group():
data = np.array([[0, 0, 1],
[0, 1, 0],
[1, 1, 0],
[1, 1, 1],
[1, 0, 0],
[1, 0, 0]])
pred = data.copy()
pred[[0, 3], -1] = 0
pred[[4, 5], -1] = 1
df = pd.DataFrame(data, columns=['feat', 'feat2', 'label'])
df2 = pd.DataFrame(pred, columns=['feat', 'feat2', 'label'])
bld = BinaryLabelDataset(df=df, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
bld2 = BinaryLabelDataset(df=df2, label_names=['label'],
protected_attribute_names=['feat', 'feat2'])
cm = ClassificationMetric(bld, bld2, unprivileged_groups=[{'feat': 0}],
privileged_groups=[{'feat': 1}])
b = np.array([0.5, 0.5, 1.25, 1.25, 1.25, 1.25])
assert cm.between_group_generalized_entropy_index() == 1/12*np.sum(b**2 - 1)
|
[
"numpy.log",
"numpy.array",
"numpy.sum",
"pandas.DataFrame",
"aif360.datasets.BinaryLabelDataset",
"aif360.metrics.ClassificationMetric"
] |
[((336, 431), 'numpy.array', 'np.array', (['[[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [2, 1], [2, 1]\n ]'], {}), '([[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [\n 2, 1], [2, 1]])\n', (344, 431), True, 'import numpy as np\n'), ((698, 743), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['feat', 'label']"}), "(data, columns=['feat', 'label'])\n", (710, 743), True, 'import pandas as pd\n'), ((754, 799), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'columns': "['feat', 'label']"}), "(pred, columns=['feat', 'label'])\n", (766, 799), True, 'import pandas as pd\n'), ((810, 899), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df, label_names=['label'], protected_attribute_names=\n ['feat'])\n", (828, 899), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((914, 1004), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df2', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df2, label_names=['label'], protected_attribute_names\n =['feat'])\n", (932, 1004), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((1017, 1048), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['bld', 'bld2'], {}), '(bld, bld2)\n', (1037, 1048), False, 'from aif360.metrics import ClassificationMetric\n'), ((1141, 1181), 'numpy.array', 'np.array', (['[0, 1, 1, 0, 0, 0, 0, 1, 1, 1]'], {}), '([0, 1, 1, 0, 0, 0, 0, 1, 1, 1])\n', (1149, 1181), True, 'import numpy as np\n'), ((1192, 1237), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'columns': "['feat', 'label']"}), "(pred, columns=['feat', 'label'])\n", (1204, 1237), True, 'import pandas as pd\n'), ((1249, 1339), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df2', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df2, label_names=['label'], protected_attribute_names\n =['feat'])\n", (1267, 1339), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((1352, 1383), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['bld', 'bld2'], {}), '(bld, bld2)\n', (1372, 1383), False, 'from aif360.metrics import ClassificationMetric\n'), ((1470, 1565), 'numpy.array', 'np.array', (['[[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [2, 1], [2, 1]\n ]'], {}), '([[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [\n 2, 1], [2, 1]])\n', (1478, 1565), True, 'import numpy as np\n'), ((1832, 1877), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['feat', 'label']"}), "(data, columns=['feat', 'label'])\n", (1844, 1877), True, 'import pandas as pd\n'), ((1888, 1933), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'columns': "['feat', 'label']"}), "(pred, columns=['feat', 'label'])\n", (1900, 1933), True, 'import pandas as pd\n'), ((1944, 2033), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df, label_names=['label'], protected_attribute_names=\n ['feat'])\n", (1962, 2033), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((2048, 2138), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df2', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df2, label_names=['label'], protected_attribute_names\n =['feat'])\n", (2066, 2138), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((2151, 2182), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['bld', 'bld2'], {}), '(bld, bld2)\n', (2171, 2182), False, 'from aif360.metrics import ClassificationMetric\n'), ((2273, 2368), 'numpy.array', 'np.array', (['[[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [2, 1], [2, 1]\n ]'], {}), '([[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1], [2, 0], [\n 2, 1], [2, 1]])\n', (2281, 2368), True, 'import numpy as np\n'), ((2635, 2680), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['feat', 'label']"}), "(data, columns=['feat', 'label'])\n", (2647, 2680), True, 'import pandas as pd\n'), ((2691, 2736), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'columns': "['feat', 'label']"}), "(pred, columns=['feat', 'label'])\n", (2703, 2736), True, 'import pandas as pd\n'), ((2747, 2836), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df, label_names=['label'], protected_attribute_names=\n ['feat'])\n", (2765, 2836), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((2851, 2941), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df2', 'label_names': "['label']", 'protected_attribute_names': "['feat']"}), "(df=df2, label_names=['label'], protected_attribute_names\n =['feat'])\n", (2869, 2941), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((2954, 2985), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['bld', 'bld2'], {}), '(bld, bld2)\n', (2974, 2985), False, 'from aif360.metrics import ClassificationMetric\n'), ((2995, 3059), 'numpy.array', 'np.array', (['[1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75]'], {}), '([1, 1, 1.25, 1.25, 1.25, 1.25, 0.75, 0.75, 0.75, 0.75])\n', (3003, 3059), True, 'import numpy as np\n'), ((3184, 3260), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 1, 0], [1, 1, 0], [1, 1, 1], [1, 0, 0], [1, 0, 0]]'], {}), '([[0, 0, 1], [0, 1, 0], [1, 1, 0], [1, 1, 1], [1, 0, 0], [1, 0, 0]])\n', (3192, 3260), True, 'import numpy as np\n'), ((3448, 3502), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['feat', 'feat2', 'label']"}), "(data, columns=['feat', 'feat2', 'label'])\n", (3460, 3502), True, 'import pandas as pd\n'), ((3513, 3567), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'columns': "['feat', 'feat2', 'label']"}), "(pred, columns=['feat', 'feat2', 'label'])\n", (3525, 3567), True, 'import pandas as pd\n'), ((3578, 3676), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df', 'label_names': "['label']", 'protected_attribute_names': "['feat', 'feat2']"}), "(df=df, label_names=['label'], protected_attribute_names=\n ['feat', 'feat2'])\n", (3596, 3676), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((3691, 3790), 'aif360.datasets.BinaryLabelDataset', 'BinaryLabelDataset', ([], {'df': 'df2', 'label_names': "['label']", 'protected_attribute_names': "['feat', 'feat2']"}), "(df=df2, label_names=['label'], protected_attribute_names\n =['feat', 'feat2'])\n", (3709, 3790), False, 'from aif360.datasets import BinaryLabelDataset\n'), ((3803, 3906), 'aif360.metrics.ClassificationMetric', 'ClassificationMetric', (['bld', 'bld2'], {'unprivileged_groups': "[{'feat': 0}]", 'privileged_groups': "[{'feat': 1}]"}), "(bld, bld2, unprivileged_groups=[{'feat': 0}],\n privileged_groups=[{'feat': 1}])\n", (3823, 3906), False, 'from aif360.metrics import ClassificationMetric\n'), ((3920, 3964), 'numpy.array', 'np.array', (['[0.5, 0.5, 1.25, 1.25, 1.25, 1.25]'], {}), '([0.5, 0.5, 1.25, 1.25, 1.25, 1.25])\n', (3928, 3964), True, 'import numpy as np\n'), ((3129, 3147), 'numpy.sum', 'np.sum', (['(b ** 2 - 1)'], {}), '(b ** 2 - 1)\n', (3135, 3147), True, 'import numpy as np\n'), ((4029, 4047), 'numpy.sum', 'np.sum', (['(b ** 2 - 1)'], {}), '(b ** 2 - 1)\n', (4035, 4047), True, 'import numpy as np\n'), ((2217, 2226), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2223, 2226), True, 'import numpy as np\n')]
|
import sys
import numpy as np
import shutil
import time
import itertools as it
import collections
import ctypes as ct
import os
import copy
sys.path.append(os.path.dirname(__file__))
from ThreadStoppable import ThreadStoppable
class Idq801(object):
def __init__(
self,
deviceId=-1,
timestamp_buffer_size=int(1e6),
integration_time_ms=0.5 * 1e3,
coincidence_window_bins=1000,
max_retry=3,
delay_retry_sec=0.01,
clean_data_directory=False,
data_directory="Idq801Data",
processing="external",
):
self._max_retry = max_retry
self._set_check_delay = delay_retry_sec # Delay in seconds between setting and
# checking that a parameter was set.
self._data_directory = data_directory
self._wait_for_settings = 1
self._processing_dict = {"i": "internal", "e": "external"}
processing = processing.lower()
assert processing in self._processing.values()
self._processing = processing
if not os.path.isdir(data_directory):
os.mkdir(data_directory)
if clean_data_directory:
self.clean_data_directory()
module_path = os.path.dirname(__file__) + "/"
if sys.platform == "linux":
self.idq801Lib = ct.CDLL(module_path + "libtdcbase.so")
elif sys.platform == "win32":
self.idq801Lib = ct.CDLL(module_path + "./tdcbase.dll")
else:
raise OSError("Invalid operating system")
if self.idq801Lib.TDC_init(deviceId):
raise RuntimeError("Could not connect to the ID801 counter.")
# Initial parameters.
self.unset_channel(-1)
self.set_timestamp_buffer_size(timestamp_buffer_size)
self.integration_time_ms = integration_time_ms
if self._processing == self._processing_dict["i"]:
self.set_integration_time(integration_time_ms)
else:
self.set_integration_time(1.0e-3) # 1us integration time.
self.set_coincidence_window_bins(1000)
self._time_last_get_timestamps = time.time()
self.channel_delays = {
"1": 0,
"2": 0,
"3": 0,
"4": 0,
"5": 0,
"6": 0,
"7": 0,
"8": 0,
}
self.set_channel_delays_ns(self.channel_delays)
self.accidental_delay = 0
def __del__(self):
self.idq801Lib.TDC_deInit()
def _set_value(self, set_value, setter, getter):
"""Sets a value and makes sure it was set."""
attempt = 0
is_set = False
while not is_set and attempt < self._max_retry:
attempt += 1
setter(set_value)
time.sleep(self._set_check_delay)
try:
if list(set_value) == list(getter()):
is_set = True
except TypeError:
if set_value == getter():
is_set = True
if not is_set:
raise RuntimeError(
"Unable to set the value using %s to %s after %i attempts."
% (setter.__name__, str(set_value), self._max_retry)
)
def _get_device_params(self):
cm = ct.c_int32()
cw = ct.c_int32()
ew = ct.c_int32()
self.idq801Lib.TDC_getDeviceParams(ct.byref(cm), ct.byref(cw), ct.byref(ew))
return (cm, cw, ew)
def _set_processing(self, processing):
processing = processing.lower()
assert processing in self._processing_dict.values()
self._processing = processing
if processing == self._processing_dict["i"]:
self.set_integration_time(self.integration_time_ms)
return self._processing
def set_processing_internal(self):
return self._set_processing("internal")
def set_processing_external(self):
return self._set_processing("external")
def clean_data_directory(self):
"""
Deletes all data in the `Idq801Data` directory.
"""
shutil.rmtree(self._data_directory, ignore_errors=True)
os.mkdir(self._data_directory)
def get_timebase(self):
self.idq801Lib.TDC_getTimebase.restype = ct.c_double
tb = self.idq801Lib.TDC_getTimebase()
return tb
def get_mask_channels(self):
cm, _, _ = self._get_device_params()
return cm.value
def get_status_channels(self):
cm, cw, ew = self._get_device_params()
channels_enabled = [bool(int(c)) for c in bin(cm.value)[2:]][::-1]
padLength = 8 - len(channels_enabled)
channels_enabled.extend([False] * padLength)
return tuple(channels_enabled)
def get_enabled_channels(self):
channels_status = self.get_status_channels()
channels_enabled = tuple(
i + 1 for i, v in enumerate(channels_status) if v == True
)
return channels_enabled
def get_disabled_channels(self):
channels_status = self.get_status_channels()
channels_disabled = tuple(
i + 1 for i, v in enumerate(channels_status) if v == False
)
return channels_disabled
def is_channel_enabled(self, channel):
assert 1 <= channel <= 8, "Invalid choice channel range."
channel -= 1
channel_status = self.get_status_channels()[channel]
return channel_status
def _get_channel_mask(self, channel, set_unset):
def channel_mask_from_channel_list(channels_enabled):
channel_mask = 0
for b in channels_enabled[::-1]:
channel_mask = (channel_mask << b - 1) | True
return channel_mask
set_unset = set_unset.lower()
assert set_unset in ("set", "unset"), (
"Invalid `set_unset` choice %s." % set_unset
)
if isinstance(channel, str):
channel = channel.lower()
if channel == "all" or channel == -1:
channel_mask = 0xFF
elif channel in range(1, 9):
channel_mask = 1 << channel
elif isinstance(channel, collections.Iterable):
channel_mask = channel_mask_from_channel_list(channel)
else:
raise TypeError("Invalid `channel` choice.")
if set_unset == "unset":
channel_mask ^= 0xFF
return channel_mask
def _set_unset_channel(self, channel, set_unset):
self._channel_mask = self._get_channel_mask(channel, set_unset)
self._set_value(
self._channel_mask,
self.idq801Lib.TDC_enableChannels,
self.get_mask_channels,
)
return self._channel_mask
def set_channel(self, channel):
"""Choose which channels to enable.
Options include:
* -1 or 'all' for (all channels).
* A single number for channel to be enabled.
* An iterable containing the channels
to be enables. e.g. (1,4,5)
* Default is no channels are enabled.
"""
return self._set_unset_channel(channel, "set")
def unset_channel(self, channel):
"""Choose which channels to disable.
Options include:
* -1 or 'all' for (all channels).
* A single number for channel to be disabled.
* An iterable containing the channels
to be disables. e.g. (1,4,5)
* Default is no channels are disabled.
"""
return self._set_unset_channel(channel, "unset")
def get_coincidence_window_bins(self):
cm, cw, ew = self._get_device_params()
return cw.value
def get_coincidence_window_ns(self):
bin = self.get_timebase()
return bin * self.get_coincidence_window_bins() * 1e9
def set_coincidence_window_bins(self, coincidence_window_bins):
coincidence_window_bins = int(coincidence_window_bins)
if not 0 < coincidence_window_bins <= 65535:
raise ValueError(
"The chosen number of coincidence \
window bins is not in the range (0,65535]."
)
self._set_value(
coincidence_window_bins,
self.idq801Lib.TDC_setCoincidenceWindow,
self.get_coincidence_window_bins,
)
def set_coincidence_window_ns(self, coincidence_window_ns):
bin = self.get_timebase()
coincidence_window_bins = int(coincidence_window_ns * 1e-9 / bin)
return self.set_coincidence_window_bins(coincidence_window_bins)
def get_integration_time(self):
cm, cw, ew = self._get_device_params()
return ew.value
def freeze_buffers(self):
self.idq801Lib.TDC_freezeBuffers(True)
def unfreeze_buffers(self):
self.idq801Lib.TDC_freezeBuffers(False)
def set_integration_time(self, window_time_ms):
window_time_ms = round(window_time_ms)
if self._processing == self._processing_dict["i"]:
if not 0 < window_time_ms <= 65535:
raise ValueError(
"The chosen exposure window is not \
in the range (0,65535]. Can't do more than 65.5s \
integration time internally."
)
self._set_value(
self.window_time_ms,
self.idq801Lib.TDC_setExposureTime,
self.get_integration_time,
)
def get_data_lost_status(self):
"""Returns true if data is being lost, and false
if data is not being lost.
"""
# Get the status of the lost latch.
lost = ct.c_int32()
self.idq801Lib.TDC_getDataLost(ct.byref(lost))
latch = lost.value
# Calls the function again to clear the lost latch.
self.idq801Lib.TDC_getDataLost(ct.byref(lost))
return latch
def get_timestamp_buffer_size(self):
size = ct.c_int32()
self.idq801Lib.TDC_getTimestampBufferSize(ct.byref(size))
return size.value
def set_timestamp_buffer_size(self, size):
"""`size` is the amount of timestamps that the
the counter will store. Range is 1->1000000
"""
self._set_value(
size,
self.idq801Lib.TDC_setTimestampBufferSize,
self.get_timestamp_buffer_size,
)
def get_timestamps(self, clear_retrieved_timestamps=True, trim_time_s=None):
"""
Gets all the time stamps in the buffer and returns
a dictionary corresponding to the timestamps in each
channel.
args:
clear_retrieved_timestamps(bool): Clears the timestamp
buffer of the IDQ801 after reading.
trim_time_s(float, None): The amount of timestamps, in
seconds, from the import first timestamps to keep.
If `None`, all timestamps are returned. Multiple
channels are all trimmed starting from the lowest
timestamps of all the channels combined.
returns:
dict: A dictionary containing numpy arrays with the
timestamps of each channel. The time from the
last calling of this function is also returned
in the dictionary.
"""
if self.get_timestamp_buffer_size() == 0:
raise RuntimeError(
"The timestamp buffer size is 0. \
Can't get timestamps. Need to set the timestamp \
buffer."
)
r = ct.c_int32(clear_retrieved_timestamps)
ts = (ct.c_int64 * self.get_timestamp_buffer_size())()
c = (ct.c_int8 * self.get_timestamp_buffer_size())()
v = ct.c_int32()
self.idq801Lib.TDC_getLastTimestamps(r, ts, c, ct.byref(v))
time_read = time.time()
time_diff = time_read - self._time_last_get_timestamps
self._time_last_get_timestamps = time_read
channel = np.frombuffer(c, dtype=np.int8)
channel_masks = [
channel == i for i in range(4) if self._channel_mask & (1 << i)
]
timestamps = np.frombuffer(ts, dtype=np.int64)
timestamps_masked = {
str(c + 1): timestamps[c_m] for c, c_m in enumerate(channel_masks)
}
timestamps_masked.update((k, v[v > 0]) for k, v in timestamps_masked.items())
last_counts = []
if trim_time_s:
for timestamps in timestamps_masked.values():
if timestamps.size:
first_count = timestamps[0]
last_counts.append(
first_count + int(trim_time_s / self.get_timebase() + 0.5)
)
if len(last_counts):
last_count = np.min(last_counts)
for channel, timestamps in timestamps_masked.items():
if timestamps.size:
last_idx = np.searchsorted(timestamps, last_count, "right")
timestamps_masked[channel] = timestamps[: last_idx - 1]
timestamps_masked["time_diff"] = time_diff
return timestamps_masked
def _get_coins(self, timestamps_1, timestamps_2, method="2"):
t2 = np.array(timestamps_2, dtype=np.int64)
assert method in ("1", "2"), "Invalid method chosen."
if method == "1":
t1 = np.empty(len(timestamps_1) + 2, dtype=np.int64)
t1[0] = 0
t1[-1] = np.iinfo(np.int64).max
t1[1:-1] = timestamps_1
t2_pos = np.searchsorted(t1, t2)
t1_pos_forw = t2_pos
t1_pos_back = t2_pos - 1
t1_pos_back[t1_pos_back == -1] = 0
dt_forw = np.abs(t1[t1_pos_forw] - t2) <= self.get_coincidence_window_bins()
dt_back = np.abs(t1[t1_pos_back] - t2) <= self.get_coincidence_window_bins()
coin_forw_args = dt_forw.nonzero()[0]
coin_back_args = dt_back.nonzero()[0]
coins_forw = np.c_[t1_pos_forw[coin_forw_args] - 1, coin_forw_args]
coins_back = np.c_[t1_pos_back[coin_back_args] - 1, coin_back_args]
coins = np.vstack((coins_back, coins_forw))
elif method == "2":
t1 = np.array(timestamps_1, dtype=np.int64)
l = np.searchsorted(t1, t2 - self.get_coincidence_window_bins() / 2)
r = np.searchsorted(t1, t2 + self.get_coincidence_window_bins() / 2)
args = np.where(l != r)[0]
coins = np.c_[r[args], args]
return coins
def get_coin_counts(
self, coin_channels, accidentals_delay_ns=None, trim_time_s=None
):
bin = self.get_timebase()
timestamps = self.get_timestamps(
clear_retrieved_timestamps=True, trim_time_s=trim_time_s
)
time_diff = timestamps["time_diff"]
timestamps.pop("time_diff", None)
coin_counts = {}
acc_counts = {}
# Get singles counts
for c in coin_channels:
if str(c) in timestamps:
coin_counts[str(c)] = len(timestamps[str(c)])
else:
coin_counts[str(c)] = 0
coin_combinations = list(it.combinations(coin_channels, 2))
for c in coin_combinations:
# Get coincidence counts
if str(c[0]) in timestamps and str(c[1]) in timestamps:
coin_counts[str(c[0]) + "/" + str(c[1])] = len(
self._get_coins(timestamps[str(c[0])], timestamps[str(c[1])])
)
else:
coin_counts[str(c[0]) + "/" + str(c[1])] = 0
if accidentals_delay_ns != None:
accidentals_delay_bin = int(accidentals_delay_ns * 1e-9 / bin)
for c in coin_combinations:
# Get accidental counts
if str(c[0]) in timestamps and str(c[1]) in timestamps:
acc_counts[str(c[0]) + "/" + str(c[1])] = len(
self._get_coins(
timestamps[str(c[0])],
timestamps[str(c[1])] + accidentals_delay_bin,
)
)
else:
acc_counts[str(c[0]) + "/" + str(c[1])] = 0
return coin_counts, acc_counts, timestamps
def scan_channel_delay(
self, coin_channels, scan_channel, scan_range_ns, integration_time=1.0
):
"""
Scans channel delay electronically - integrates once then applies delays to the timestamps to find coins
Args:
coin_channels: channels to look at coins
scan_channel: channel to scan
scan_range_ns: +/- range of delay in ns
integration_time: initial integration time
Returns: max coin reading, delay in ns of the max, all coin counts, delay range
"""
current_delays_bins = self.get_channel_delays_bins()
self.set_channel_delays_ns({str(coin_channels[0]): 0, str(coin_channels[1]): 0})
bin = self.get_timebase()
self.get_timestamps()
time.sleep(integration_time)
original_timestamps = self.get_timestamps()
delay_range = range(-scan_range_ns, scan_range_ns + 1)
coin_counts = np.zeros(len(delay_range))
timestamps = copy.deepcopy(original_timestamps)
for idd, d in enumerate(delay_range):
timestamps[str(scan_channel)] = copy.deepcopy(
original_timestamps[str(scan_channel)]
) + int(d * 1e-9 / bin)
coin_counts[idd] = len(
self._get_coins(
timestamps[str(coin_channels[0])], timestamps[str(coin_channels[1])]
)
)
print(
"delay channel = %s, delay = %s ns, coin counts = %s"
% (scan_channel, d, int(coin_counts[idd]))
)
max_coin = np.max(coin_counts)
max_coin_delay = delay_range[np.argmax(coin_counts)]
self.set_channel_delays_bins(current_delays_bins)
return max_coin, max_coin_delay, coin_counts, delay_range
def get_timestamps_continuous(self, seconds=-1):
"""Runs `gets_timestamps` continuously in a separate
thread for `seconds` amount of seconds in a loop.
If seconds == -1, it doesn't timeout. Returns a
thread object that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
clear_retrieved_timestamps = True
t = ThreadStoppable(
self.get_timestamps, seconds, True, args=(clear_retrieved_timestamps,)
)
return t
def write_timestamps_to_file(self):
"""Writes the timestamps in the buffer to a
file.
"""
timestamp_dir = "Timestamps"
if not os.path.isdir(self._data_directory + "/" + timestamp_dir):
os.mkdir(self._data_directory + "/" + timestamp_dir)
filename_prefix = (
self._data_directory + "/" + timestamp_dir + "/" + "timestamp_channel_"
)
filenames = [filename_prefix + str(i) + ".dat" for i in range(1, 9)]
for fn in filenames:
if not os.path.exists(fn):
open(fn, "w").close()
ts = self.get_timestamps(clear_retrieved_timestamps=True)
for i, fn in enumerate(filenames):
with open(fn, "a") as fs:
try:
for t in ts[str(i + 1)]:
fs.write(str(t) + "\n")
except KeyError:
pass
def write_timestamps_to_file_continuous(self, seconds=-1):
"""Runs `write_timestamps_to_file` continuously in a separate
thread for `seconds` amount of seconds in a loop. If
seconds == -1, it doesn't timeout. Returns a thread object
that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(self.write_timestamps_to_file, seconds)
return t
def get_counters(self):
"""Returns a list of the most recent value of
of the counters.
"""
counters = (ct.c_int32 * 19)()
self.idq801Lib.TDC_getCoincCounters(counters, None)
return list(counters)
def get_counters_continuous(self, seconds=-1):
"""Runs `get_counters` continuously in a separate thread for
`seconds` amount of seconds in a loop. If seconds == -1,
it doesn't timeout. Returns a thread object that can be
stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(self.get_counters, seconds, True)
return t
def write_counters_to_file(self, filename="counters.dat"):
"""Writes the most recent values of the internal
counters and coincidence counters to a file
named `filename`.
"""
fn = self._data_directory + "/" + filename
if not os.path.exists(fn):
with open(fn, "w") as fs:
header = (
"1,2,3,4,5,6,7,8,1/2,1/3,1/4,2/3,2/4,3/4,"
"1/2/3,1/2/4,1/3/4,2/3/4,1/2/3/4"
)
fs.write("#" + header + "\n")
counters = self.get_counters()
counters_str = ",".join([str(c) for c in counters])
with open(fn, "a") as fs:
fs.write(counters_str + "\n")
def write_counters_to_file_continuous(self, seconds=-1, filename="counters.dat"):
"""Runs `write_counters_to_file` continuously in a separate
thread for `seconds` amount of seconds in a loop. If
seconds == -1, it doesn't timeout. Returns a thread
object that can be stopped and started.
"""
time.sleep(self._wait_for_settings)
t = ThreadStoppable(
self.write_counters_to_file, seconds, False, args=(filename,)
)
return t
def _get_channel_delays(self):
channels = range(8)
channels = (ct.c_int32 * len(channels))(*channels)
self.idq801Lib.TDC_getChannelDelays(channels)
return channels
def get_channel_delays_bins(self):
return list(self._get_channel_delays())
def get_channel_delays_ns(self):
bin = self.get_timebase()
delays_bins = list(self._get_channel_delays())
return [d * 1e9 * bin for d in delays_bins]
def set_channel_delays_bins(self, delays_bins):
delays = (ct.c_int * len(delays_bins))(*delays_bins)
return self._set_value(
delays, self.idq801Lib.TDC_setChannelDelays, self._get_channel_delays
)
def set_channel_delays_ns(self, delays_ns_dict):
"""
Set channel delays in ns. The delays are in a dictionary.
Args:
delays_ns_dict:
Returns:
"""
delays_ns = self.get_channel_delays_ns()
for channel in delays_ns_dict.keys():
self.channel_delays[str(channel)] = delays_ns[int(channel) - 1]
delays_ns[int(channel) - 1] = delays_ns_dict[str(channel)]
bin = self.get_timebase()
delays_bins = [int(d * 1e-9 / bin) for d in delays_ns]
return self.set_channel_delays_bins(delays_bins)
def main():
idq801 = Idq801()
idq801.clean_data_directory()
idq801.set_channel((1, 2))
# t1 = idq801.write_counters_to_file_continuous(2)
# t2 = idq801.write_timestamps_to_file_continuous(2)
#
if __name__ == "__main__":
main()
|
[
"ctypes.c_int32",
"numpy.iinfo",
"time.sleep",
"numpy.array",
"copy.deepcopy",
"ctypes.CDLL",
"ThreadStoppable.ThreadStoppable",
"os.path.exists",
"numpy.searchsorted",
"numpy.where",
"numpy.max",
"os.path.isdir",
"os.mkdir",
"numpy.vstack",
"numpy.min",
"numpy.frombuffer",
"numpy.abs",
"numpy.argmax",
"os.path.dirname",
"time.time",
"ctypes.byref",
"itertools.combinations",
"shutil.rmtree"
] |
[((157, 182), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (172, 182), False, 'import os\n'), ((2118, 2129), 'time.time', 'time.time', ([], {}), '()\n', (2127, 2129), False, 'import time\n'), ((3264, 3276), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (3274, 3276), True, 'import ctypes as ct\n'), ((3290, 3302), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (3300, 3302), True, 'import ctypes as ct\n'), ((3316, 3328), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (3326, 3328), True, 'import ctypes as ct\n'), ((4074, 4129), 'shutil.rmtree', 'shutil.rmtree', (['self._data_directory'], {'ignore_errors': '(True)'}), '(self._data_directory, ignore_errors=True)\n', (4087, 4129), False, 'import shutil\n'), ((4138, 4168), 'os.mkdir', 'os.mkdir', (['self._data_directory'], {}), '(self._data_directory)\n', (4146, 4168), False, 'import os\n'), ((9637, 9649), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (9647, 9649), True, 'import ctypes as ct\n'), ((9927, 9939), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (9937, 9939), True, 'import ctypes as ct\n'), ((11556, 11594), 'ctypes.c_int32', 'ct.c_int32', (['clear_retrieved_timestamps'], {}), '(clear_retrieved_timestamps)\n', (11566, 11594), True, 'import ctypes as ct\n'), ((11731, 11743), 'ctypes.c_int32', 'ct.c_int32', ([], {}), '()\n', (11741, 11743), True, 'import ctypes as ct\n'), ((11832, 11843), 'time.time', 'time.time', ([], {}), '()\n', (11841, 11843), False, 'import time\n'), ((11977, 12008), 'numpy.frombuffer', 'np.frombuffer', (['c'], {'dtype': 'np.int8'}), '(c, dtype=np.int8)\n', (11990, 12008), True, 'import numpy as np\n'), ((12142, 12175), 'numpy.frombuffer', 'np.frombuffer', (['ts'], {'dtype': 'np.int64'}), '(ts, dtype=np.int64)\n', (12155, 12175), True, 'import numpy as np\n'), ((13241, 13279), 'numpy.array', 'np.array', (['timestamps_2'], {'dtype': 'np.int64'}), '(timestamps_2, dtype=np.int64)\n', (13249, 13279), True, 'import numpy as np\n'), ((17086, 17114), 'time.sleep', 'time.sleep', (['integration_time'], {}), '(integration_time)\n', (17096, 17114), False, 'import time\n'), ((17301, 17335), 'copy.deepcopy', 'copy.deepcopy', (['original_timestamps'], {}), '(original_timestamps)\n', (17314, 17335), False, 'import copy\n'), ((17906, 17925), 'numpy.max', 'np.max', (['coin_counts'], {}), '(coin_counts)\n', (17912, 17925), True, 'import numpy as np\n'), ((18418, 18453), 'time.sleep', 'time.sleep', (['self._wait_for_settings'], {}), '(self._wait_for_settings)\n', (18428, 18453), False, 'import time\n'), ((18508, 18600), 'ThreadStoppable.ThreadStoppable', 'ThreadStoppable', (['self.get_timestamps', 'seconds', '(True)'], {'args': '(clear_retrieved_timestamps,)'}), '(self.get_timestamps, seconds, True, args=(\n clear_retrieved_timestamps,))\n', (18523, 18600), False, 'from ThreadStoppable import ThreadStoppable\n'), ((19883, 19918), 'time.sleep', 'time.sleep', (['self._wait_for_settings'], {}), '(self._wait_for_settings)\n', (19893, 19918), False, 'import time\n'), ((19931, 19986), 'ThreadStoppable.ThreadStoppable', 'ThreadStoppable', (['self.write_timestamps_to_file', 'seconds'], {}), '(self.write_timestamps_to_file, seconds)\n', (19946, 19986), False, 'from ThreadStoppable import ThreadStoppable\n'), ((20554, 20589), 'time.sleep', 'time.sleep', (['self._wait_for_settings'], {}), '(self._wait_for_settings)\n', (20564, 20589), False, 'import time\n'), ((20602, 20651), 'ThreadStoppable.ThreadStoppable', 'ThreadStoppable', (['self.get_counters', 'seconds', '(True)'], {}), '(self.get_counters, seconds, True)\n', (20617, 20651), False, 'from ThreadStoppable import ThreadStoppable\n'), ((21735, 21770), 'time.sleep', 'time.sleep', (['self._wait_for_settings'], {}), '(self._wait_for_settings)\n', (21745, 21770), False, 'import time\n'), ((21783, 21861), 'ThreadStoppable.ThreadStoppable', 'ThreadStoppable', (['self.write_counters_to_file', 'seconds', '(False)'], {'args': '(filename,)'}), '(self.write_counters_to_file, seconds, False, args=(filename,))\n', (21798, 21861), False, 'from ThreadStoppable import ThreadStoppable\n'), ((1052, 1081), 'os.path.isdir', 'os.path.isdir', (['data_directory'], {}), '(data_directory)\n', (1065, 1081), False, 'import os\n'), ((1095, 1119), 'os.mkdir', 'os.mkdir', (['data_directory'], {}), '(data_directory)\n', (1103, 1119), False, 'import os\n'), ((1217, 1242), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1232, 1242), False, 'import os\n'), ((1314, 1352), 'ctypes.CDLL', 'ct.CDLL', (["(module_path + 'libtdcbase.so')"], {}), "(module_path + 'libtdcbase.so')\n", (1321, 1352), True, 'import ctypes as ct\n'), ((2756, 2789), 'time.sleep', 'time.sleep', (['self._set_check_delay'], {}), '(self._set_check_delay)\n', (2766, 2789), False, 'import time\n'), ((3372, 3384), 'ctypes.byref', 'ct.byref', (['cm'], {}), '(cm)\n', (3380, 3384), True, 'import ctypes as ct\n'), ((3386, 3398), 'ctypes.byref', 'ct.byref', (['cw'], {}), '(cw)\n', (3394, 3398), True, 'import ctypes as ct\n'), ((3400, 3412), 'ctypes.byref', 'ct.byref', (['ew'], {}), '(ew)\n', (3408, 3412), True, 'import ctypes as ct\n'), ((9689, 9703), 'ctypes.byref', 'ct.byref', (['lost'], {}), '(lost)\n', (9697, 9703), True, 'import ctypes as ct\n'), ((9832, 9846), 'ctypes.byref', 'ct.byref', (['lost'], {}), '(lost)\n', (9840, 9846), True, 'import ctypes as ct\n'), ((9990, 10004), 'ctypes.byref', 'ct.byref', (['size'], {}), '(size)\n', (9998, 10004), True, 'import ctypes as ct\n'), ((11799, 11810), 'ctypes.byref', 'ct.byref', (['v'], {}), '(v)\n', (11807, 11810), True, 'import ctypes as ct\n'), ((13558, 13581), 'numpy.searchsorted', 'np.searchsorted', (['t1', 't2'], {}), '(t1, t2)\n', (13573, 13581), True, 'import numpy as np\n'), ((14160, 14195), 'numpy.vstack', 'np.vstack', (['(coins_back, coins_forw)'], {}), '((coins_back, coins_forw))\n', (14169, 14195), True, 'import numpy as np\n'), ((15195, 15228), 'itertools.combinations', 'it.combinations', (['coin_channels', '(2)'], {}), '(coin_channels, 2)\n', (15210, 15228), True, 'import itertools as it\n'), ((17963, 17985), 'numpy.argmax', 'np.argmax', (['coin_counts'], {}), '(coin_counts)\n', (17972, 17985), True, 'import numpy as np\n'), ((18806, 18863), 'os.path.isdir', 'os.path.isdir', (["(self._data_directory + '/' + timestamp_dir)"], {}), "(self._data_directory + '/' + timestamp_dir)\n", (18819, 18863), False, 'import os\n'), ((18877, 18929), 'os.mkdir', 'os.mkdir', (["(self._data_directory + '/' + timestamp_dir)"], {}), "(self._data_directory + '/' + timestamp_dir)\n", (18885, 18929), False, 'import os\n'), ((20946, 20964), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (20960, 20964), False, 'import os\n'), ((1420, 1458), 'ctypes.CDLL', 'ct.CDLL', (["(module_path + './tdcbase.dll')"], {}), "(module_path + './tdcbase.dll')\n", (1427, 1458), True, 'import ctypes as ct\n'), ((12780, 12799), 'numpy.min', 'np.min', (['last_counts'], {}), '(last_counts)\n', (12786, 12799), True, 'import numpy as np\n'), ((13477, 13495), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (13485, 13495), True, 'import numpy as np\n'), ((13722, 13750), 'numpy.abs', 'np.abs', (['(t1[t1_pos_forw] - t2)'], {}), '(t1[t1_pos_forw] - t2)\n', (13728, 13750), True, 'import numpy as np\n'), ((13811, 13839), 'numpy.abs', 'np.abs', (['(t1[t1_pos_back] - t2)'], {}), '(t1[t1_pos_back] - t2)\n', (13817, 13839), True, 'import numpy as np\n'), ((14241, 14279), 'numpy.array', 'np.array', (['timestamps_1'], {'dtype': 'np.int64'}), '(timestamps_1, dtype=np.int64)\n', (14249, 14279), True, 'import numpy as np\n'), ((19179, 19197), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (19193, 19197), False, 'import os\n'), ((14462, 14478), 'numpy.where', 'np.where', (['(l != r)'], {}), '(l != r)\n', (14470, 14478), True, 'import numpy as np\n'), ((12946, 12994), 'numpy.searchsorted', 'np.searchsorted', (['timestamps', 'last_count', '"""right"""'], {}), "(timestamps, last_count, 'right')\n", (12961, 12994), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
TOTAL = 200
STEP = 0.25
EPS = 0.1
INITIAL_THETA = [9, 14]
def func(x):
return 0.2 * x + 3
def generate_sample(total=TOTAL):
x = 0
while x < total * STEP:
yield func(x) + np.random.uniform(-1, 1) * np.random.uniform(2, 8)
x += STEP
def cost_function(A, Y, theta):
return (Y - A@theta).T@(Y - A@theta)
def batch_descent(A, Y, speed=0.001):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
theta.reshape((len(theta), 1))
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
derivatives = [0] * len(theta)
# ---------------------------------------------
for j in range(len(theta)):
summ = 0
for i in range(len(Y)):
summ += (Y[i] - A[i]@theta) * A[i][j]
derivatives[j] = summ
# Выполнение требования одновремменности
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# ---------------------------------------------
current_cost = cost_function(A, Y, theta)
print("Batch cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
def stochastic_descent(A, Y, speed=0.1):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
# --------------------------------------
# for i in range(len(Y)):
i = np.random.randint(0, len(Y))
derivatives = [0] * len(theta)
for j in range(len(theta)):
derivatives[j] = (Y[i] - A[i]@theta) * A[i][j]
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# --------------------------------------
current_cost = cost_function(A, Y, theta)
print("Stochastic cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
X = np.arange(0, TOTAL * STEP, STEP)
Y = np.array([y for y in generate_sample(TOTAL)])
# Нормализацию вкрячил, чтобы парабалоид красивый был
X = (X - X.min()) / (X.max() - X.min())
A = np.empty((TOTAL, 2))
A[:, 0] = 1
A[:, 1] = X
theta = np.linalg.pinv(A).dot(Y)
print(theta, cost_function(A, Y, theta))
import time
start = time.clock()
theta_stochastic = stochastic_descent(A, Y, 0.1)
print("St:", time.clock() - start, theta_stochastic)
start = time.clock()
theta_batch = batch_descent(A, Y, 0.001)
print("Btch:", time.clock() - start, theta_batch)
|
[
"numpy.abs",
"numpy.linalg.pinv",
"time.clock",
"matplotlib.pyplot.plot",
"numpy.empty",
"numpy.random.uniform",
"numpy.arange"
] |
[((2136, 2168), 'numpy.arange', 'np.arange', (['(0)', '(TOTAL * STEP)', 'STEP'], {}), '(0, TOTAL * STEP, STEP)\n', (2145, 2168), True, 'import numpy as np\n'), ((2319, 2339), 'numpy.empty', 'np.empty', (['(TOTAL, 2)'], {}), '((TOTAL, 2))\n', (2327, 2339), True, 'import numpy as np\n'), ((2460, 2472), 'time.clock', 'time.clock', ([], {}), '()\n', (2470, 2472), False, 'import time\n'), ((2584, 2596), 'time.clock', 'time.clock', ([], {}), '()\n', (2594, 2596), False, 'import time\n'), ((610, 646), 'numpy.abs', 'np.abs', (['(previous_cost - current_cost)'], {}), '(previous_cost - current_cost)\n', (616, 646), True, 'import numpy as np\n'), ((1259, 1293), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[0]', 'theta[1]', '"""ro"""'], {}), "(theta[0], theta[1], 'ro')\n", (1267, 1293), True, 'import matplotlib.pyplot as plt\n'), ((1499, 1535), 'numpy.abs', 'np.abs', (['(previous_cost - current_cost)'], {}), '(previous_cost - current_cost)\n', (1505, 1535), True, 'import numpy as np\n'), ((2079, 2113), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[0]', 'theta[1]', '"""ro"""'], {}), "(theta[0], theta[1], 'ro')\n", (2087, 2113), True, 'import matplotlib.pyplot as plt\n'), ((2373, 2390), 'numpy.linalg.pinv', 'np.linalg.pinv', (['A'], {}), '(A)\n', (2387, 2390), True, 'import numpy as np\n'), ((2535, 2547), 'time.clock', 'time.clock', ([], {}), '()\n', (2545, 2547), False, 'import time\n'), ((2653, 2665), 'time.clock', 'time.clock', ([], {}), '()\n', (2663, 2665), False, 'import time\n'), ((246, 270), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (263, 270), True, 'import numpy as np\n'), ((273, 296), 'numpy.random.uniform', 'np.random.uniform', (['(2)', '(8)'], {}), '(2, 8)\n', (290, 296), True, 'import numpy as np\n')]
|
import random
import numpy as np
import itertools
import re
from collections import defaultdict
import os
def get_tags(s, open_delim='<', close_delim='/>'):
"""Iterator to spit out the xml style disfluency tags in a given string.
Keyword arguments:
s -- input string
"""
while True:
# Search for the next two delimiters in the source text
start = s.find(open_delim)
end = s.find(close_delim)
# We found a non-empty match
if -1 < start < end:
# Skip the length of the open delimiter
start += len(open_delim)
# Spit out the tag
yield open_delim + s[start:end].strip() + close_delim
# Truncate string to start from last match
s = s[end+len(close_delim):]
else:
return
def remove_uttseg_tag(tag):
tags = get_tags(tag)
final_tag = ""
for t in tags:
m = re.search(r'<[ct]*/>', t)
if m:
continue
final_tag += t
return final_tag
def convert_to_simple_label(tag, rep="disf1_uttseg"):
"""Takes the complex tag set and gives back the simple,
smaller version with ten tags:
"""
disftag = "<f/>"
if "<rm-" in tag:
disftag = "<rm-0/>"
elif "<e" in tag:
disftag = "<e/>"
if "uttseg" in rep: # if combined task with TTO
m = re.search(r'<[ct]*/>', tag)
if m:
return disftag + m.group(0)
else:
print("WARNING NO TAG", +tag)
return ""
return disftag # if not TT0
def convert_to_simple_idx(tag, rep='1_trp'):
tag = convert_to_simple_label(tag, rep)
simple_tags = """<e/><cc/>
<e/><ct/>
<e/><tc/>
<e/><tt/>
<f/><cc/>
<f/><ct/>
<f/><tc/>
<f/><tt/>
<rm-0/><cc/>
<rm-0/><ct/>""".split("\n")
simple_tag_dict = {}
for s in range(0, len(simple_tags)):
simple_tag_dict[simple_tags[s].strip()] = s
return simple_tag_dict[tag]
def convert_from_full_tag_set_to_idx(tag, rep, idx_to_label):
"""Maps from the full tag set of trp repairs to the new dictionary"""
if "simple" in rep:
tag = convert_to_simple_label(tag)
for k, v in idx_to_label.items():
if v in tag: # a substring relation
return k
def add_word_continuation_tags(tags):
"""In place, add a continutation tag to each word:
<cc/> -word continues current dialogue act and the next word will also
continue it
<ct/> -word continues current dialogue act and is the last word of it
<tc/> -word starts this dialogue act tag and the next word continues it
<tt/> -word starts and ends dialogue act (single word dialogue act)
"""
tags = list(tags)
for i in range(0, len(tags)):
if i == 0:
tags[i] = tags[i] + "<t"
else:
tags[i] = tags[i] + "<c"
if i == len(tags)-1:
tags[i] = tags[i] + "t/>"
else:
tags[i] = tags[i] + "c/>"
return tags
def verify_disfluency_tags(tags, normalize_ID=False):
"""Check that the repair tags sequence is valid.
Keyword arguments:
normalize_ID -- boolean, whether to convert the repair ID
numbers to be derivable from their unique RPS position in the utterance.
"""
id_map = dict() # map between old ID and new ID
# in first pass get old and new IDs
for i in range(0, len(tags)):
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[i])
if rps:
id_map[rps[0][rps[0].find("=")+2:-3]] = str(i)
# key: old repair ID, value, list [reparandum,interregnum,repair]
# all True when repair is all there
repairs = defaultdict(list)
for r in id_map.keys():
repairs[r] = [None, None, None] # three valued None<False<True
# print(repairs)
# second pass verify the validity of the tags
# and (optionally) modify the IDs
for i in range(0, len(tags)): # iterate over all tag strings
new_tags = []
if tags[i] == "":
assert(all([repairs[ID][2] or
repairs[ID] == [None, None, None]
for ID in repairs.keys()])),\
"Unresolved repairs at fluent tag\n\t" + str(repairs)
for tag in get_tags(tags[i]): # iterate over all tags
# print(i)
# print(tag)
if tag == "<e/>":
new_tags.append(tag)
continue
ID = tag[tag.find("=")+2:-3]
if "<rms" in tag:
assert repairs[ID][0] == None,\
"reparandum started parsed more than once " + ID
assert repairs[ID][1] == None,\
"reparandum start again during interregnum phase " + ID
assert repairs[ID][2] == None,\
"reparandum start again during repair phase " + ID
repairs[ID][0] = False # set in progress
elif "<rm " in tag:
assert repairs[ID][0] != None,\
"mid reparandum tag before reparandum start " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a interregnum phase or beyond " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a repair phase or beyond " + ID
elif "<i" in tag:
assert repairs[ID][0] != None,\
"interregnum start before reparandum start " + ID
assert repairs[ID][2] == None,\
"interregnum in a repair phase " + ID
if repairs[ID][1] == None: # interregnum not reached yet
repairs[ID][0] = True # reparandum completed
repairs[ID][1] = False # interregnum in progress
elif "<rps" in tag:
assert repairs[ID][0] != None,\
"repair start before reparandum start " + ID
assert repairs[ID][1] != True,\
"interregnum over before repair start " + ID
assert repairs[ID][2] == None,\
"repair start parsed twice " + ID
repairs[ID][0] = True # reparanudm complete
repairs[ID][1] = True # interregnum complete
repairs[ID][2] = False # repair in progress
elif "<rp " in tag:
assert repairs[ID][0] == True,\
"mid repair word start before reparandum end " + ID
assert repairs[ID][1] == True,\
"mid repair word start before interregnum end " + ID
assert repairs[ID][2] == False,\
"mid repair tag before repair start tag " + ID
elif "<rpn" in tag:
# make sure the rps is order in tag string is before
assert repairs[ID][0] == True,\
"repair end before reparandum end " + ID
assert repairs[ID][1] == True,\
"repair end before interregnum end " + ID
assert repairs[ID][2] == False,\
"repair end before repair start " + ID
repairs[ID][2] = True
# do the replacement of the tag's ID after checking
new_tags.append(tag.replace(ID, id_map[ID]))
if normalize_ID:
tags[i] = "".join(new_tags)
assert all([repairs[ID][2] for ID in repairs.keys()]),\
"Unresolved repairs:\n\t" + str(repairs)
def shuffle(lol, seed):
"""Shuffle inplace each list in the same order.
lol :: list of list as input
seed :: seed the shuffling
"""
for l in lol:
random.seed(seed)
random.shuffle(l)
def minibatch(l, bs):
"""Returns a list of minibatches of indexes
which size is equal to bs
border cases are treated as follow:
eg: [0,1,2,3] and bs = 3
will output:
[[0],[0,1],[0,1,2],[1,2,3]]
l :: list of word idxs
"""
out = [l[:i] for i in xrange(1, min(bs, len(l)+1))]
out += [l[i-bs:i] for i in xrange(bs, len(l)+1)]
assert len(l) == len(out)
return out
def indices_from_length(sentence_length, bs, start_index=0):
"""Return a list of indexes pairs (start/stop) for each word
max difference between start and stop equal to bs
border cases are treated as follow:
eg: sentenceLength=4 and bs = 3
will output:
[[0,0],[0,1],[0,2],[1,3]]
"""
l = map(lambda x: start_index+x, xrange(sentence_length))
out = []
for i in xrange(0, min(bs, len(l))):
out.append([l[0], l[i]])
for i in xrange(bs+1, len(l)+1):
out.append([l[i-bs], l[i-1]])
assert len(l) == sentence_length
return out
def context_win(l, win):
"""Return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
given a list of indexes composing a sentence.
win :: int corresponding to the size of the window
"""
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win/2 * [-1] + l + win/2 * [-1]
out = [lpadded[i:i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def context_win_backwards(l, win):
'''Same as contextwin except only backwards context
(i.e. like an n-gram model)
'''
assert win >= 1
l = list(l)
lpadded = (win-1) * [-1] + l
out = [lpadded[i: i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def corpus_to_indexed_matrix(my_array_list, win, bs, sentence=False):
"""Returns a matrix of contextwins for a list of utterances of
dimensions win * n_words_in_corpus
(i.e. total length of all arrays in my_array_list)
and corresponding matrix of indexes (of just start/stop for each one)
so 2 * n_words_in_corpus
of where to access these, using bs (backprop distance)
as the limiting history size
"""
sentences = [] # a list (of arrays, or lists?), returned as matrix
indices = [] # a list of index pairs (arrays?), returned as matrix
totalSize = 0
if sentence:
for sent in my_array_list:
mysent = np.asarray([-1] * (bs-1) + list(sent)) # padding with eos
# get list of context windows
mywords = context_win_backwards(mysent, win)
# just one per utterance for now..
cindices = [[totalSize, totalSize+len(mywords)-1]]
cwords = []
for i in range(bs, len(mywords)+1):
words = list(itertools.chain(*mywords[(i-bs):i]))
cwords.append(words) # always (bs * n) words long
# print cwords
sentences.extend(cwords)
indices.extend(cindices)
totalSize += len(cwords)
else:
for sentence in my_array_list:
# get list of context windows
cwords = context_win_backwards(sentence, win)
cindices = indices_from_length(len(cwords), bs, totalSize)
indices.extend(cindices)
sentences.extend(cwords)
totalSize += len(cwords)
for s in sentences:
if any([x is None for x in s]):
print(s)
return np.matrix(sentences, dtype='int32'), indices
def convert_from_eval_tags_to_inc_disfluency_tags(tags, words,
representation="disf1",
limit=8):
"""Conversion from disfluency tagged corpus with xml-style tags
as from STIR (https://bitbucket.org/julianhough/stir)
to the strictly left-to-right schemas as
described by Hough and Schlangen 2015 Interspeech paper,
which are used by RNN architectures at runtime.
Keyword arguments:
tags -- the STIR eval style disfluency tags
words -- the words in the utterance
representation -- the number corresponding to the type of tagging system
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
limit -- the limit on the distance back from the repair start
"""
repair_dict = defaultdict(list)
new_tags = []
# print("tags")
# print(tags)
# print('words')
# print(words)
for t in range(0, len(tags)):
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
tags[t] = tags[t].replace(TTO_tag, "")
if "dact" in representation:
m = re.search(r'<diact type="[^\s]*"/>', tags[t])
if m:
dact_tag = m.group(0)
tags[t] = tags[t].replace(dact_tag, "")
if "laugh" in representation:
m = re.search(r'<speechLaugh/>|<laughter/>', tags[t])
if m:
laughter_tag = m.group(0)
else:
laughter_tag = "<nolaughter/>"
tags[t] = tags[t].replace(laughter_tag, "")
current_tag = ""
if "<e/>" in tags[t] or "<i" in tags[t]:
current_tag = "<e/>" # TODO may make this an interregnum
if "<rms" in tags[t]:
rms = re.findall("<rms id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rms:
repairID = r[r.find("=")+2:-3]
repair_dict[repairID] = [t, 0]
if "<rps" in tags[t]:
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rps:
repairID = r[r.find("=")+2:-3]
# print('repairID')
# print(repairID)
# print(repair_dict.get(repairID))
# print(str(repairID)+str(tags)+str(words))
assert repair_dict.get(repairID), str(repairID)+str(tags)+str(words)
repair_dict[repairID][1] = t
dist = min(t-repair_dict[repairID][0], limit)
# adjust in case the reparandum is shortened due to the limit
repair_dict[repairID][0] = t-dist
current_tag += "<rm-{}/>".format(dist) + "<rpMid/>"
if "<rpn" in tags[t]:
rpns = re.findall("<rpnrep id\=\"[0-9]+\"\/>", tags[t], re.S) +\
re.findall("<rpnsub id\=\"[0-9]+\"\/>", tags[t], re.S)
rpns_del = re.findall("<rpndel id\=\"[0-9]+\"\/>", tags[t], re.S)
# slight simplifying assumption is to take the repair with
# the longest reparandum as the end category
repair_type = ""
longestlength = 0
for r in rpns:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Sub"
for r in rpns_del:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Del"
if repair_type == "":
raise Exception("Repair not passed \
correctly."+str(words)+str(tags))
current_tag += "<rpEnd"+repair_type+"/>"
current_tag = current_tag.replace("<rpMid/>", "")
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
if "dact" in representation:
current_tag += dact_tag
if "laugh" in representation:
current_tag += laughter_tag
new_tags.append(current_tag)
return new_tags
def convert_from_inc_disfluency_tags_to_eval_tags(
tags, words,
start=0,
representation="disf1_uttseg"):
"""Converts the incremental style output tags of the RNN to the standard
STIR eval output tags.
The exact inverse of convertFromEvalTagsToIncrementalDisfluencyTags.
Keyword arguments:
tags -- the RNN style disfluency tags
words -- the words in the utterance
start -- position from where to begin changing the tags from
representation -- the number corresponding to the type of tagging system,
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
"""
# maps from the repair ID to a list of
# [reparandumStart,repairStart,repairOver]
repair_dict = defaultdict(list)
new_tags = []
if start > 0:
# assuming the tags up to this point are already converted
new_tags = tags[:start]
if "mid" not in representation:
rps_s = re.findall("<rps id\=\"[0-9]+\"\/>", tags[start-1])
rpmid = re.findall("<rp id\=\"[0-9]+\"\/>", tags[start-1])
if rps_s:
for r in rps_s:
repairID = r[r.find("=")+2:-3]
resolved_repair = re.findall(
"<rpn[repsubdl]+ id\=\"{}\"\/>"
.format(repairID), tags[start-1])
if not resolved_repair:
if not rpmid:
rpmid = []
rpmid.append(r.replace("rps ", "rp "))
if rpmid:
newstart = start-1
for rp in rpmid:
rps = rp.replace("rp ", "rps ")
repairID = rp[rp.find("=")+2:-3]
# go back and find the repair
for b in range(newstart, -1, -1):
if rps in tags[b]:
repair_dict[repairID] = [b, b, False]
break
for t in range(start, len(tags)):
current_tag = ""
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
if "<e/>" in tags[t] or "<i/>" in tags[t]:
current_tag = "<e/>"
if "<rm-" in tags[t]:
rps = re.findall("<rm-[0-9]+\/>", tags[t], re.S)
for r in rps: # should only be one
current_tag += '<rps id="{}"/>'.format(t)
# print t-dist
if "simple" in representation:
# simply tagging the rps
pass
else:
dist = int(r[r.find("-")+1:-2])
repair_dict[str(t)] = [max([0, t-dist]), t, False]
# backwards looking search if full set
# print new_tags, t, dist, t-dist, max([0, t-dist])
# print tags[:t+1]
rms_start_idx = max([0, t-dist])
new_tags[rms_start_idx] = '<rms id="{}"/>'\
.format(t) + new_tags[rms_start_idx]\
.replace("<f/>", "")
reparandum = False # interregnum if edit term
for b in range(t-1, max([0, t-dist]), -1):
if "<e" not in new_tags[b]:
reparandum = True
new_tags[b] = '<rm id="{}"/>'.format(t) +\
new_tags[b].replace("<f/>", "")
if reparandum is False and "<e" in new_tags[b]:
new_tags[b] = '<i id="{}"/>'.\
format(t) + new_tags[b]
# repair ends
if "<rpEnd" in tags[t]:
rpns = re.findall("<rpEndSub/>", tags[t], re.S)
rpns_del = re.findall("<rpEndDel/>", tags[t], re.S)
rpnAll = rpns + rpns_del
if rpnAll:
for k, v in repair_dict.items():
if t >= int(k) and v[2] is False:
repair_dict[k][2] = True
# classify the repair
if rpns_del: # a delete
current_tag += '<rpndel id="{}"/>'.format(k)
rpns_del.pop(0)
continue
reparandum = [words[i] for i in range(0, len(new_tags))
if '<rms id="{}"/>'.
format(k) in new_tags[i] or
'<rm id="{}"/>'.
format(k) in new_tags[i]]
repair = [words[i] for i in range(0, len(new_tags))
if '<rps id="{}"/>'.format(k)
in new_tags[i] or '<rp id="{}"/>'.format(k)
in new_tags[i]] + [words[t]]
if reparandum == repair:
current_tag += '<rpnrep id="{}"/>'.format(k)
else:
current_tag += '<rpnsub id="{}"/>'.format(k)
# mid repair phases still in progress
for k, v in repair_dict.items():
if t > int(k) and v[2] is False:
current_tag += '<rp id="{}"/>'.format(k)
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
new_tags.append(current_tag)
return new_tags
def verify_dialogue_data_matrix(dialogue_data_matrix, word_dict=None,
pos_dict=None, tag_dict=None, n_lm=0,
n_acoustic=0):
"""Boolean check of whether dialogue data consistent
with args. Checks all idxs are valid and number of features is correct.
Standard form of each row of the matrix should be:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
"""
l = 3 + n_acoustic + n_lm + 1 # row length
try:
for i, row in enumerate(dialogue_data_matrix):
assert len(row) == l,\
"row {} wrong length {}, should be {}".format(i, len(row), l)
assert word_dict[row[1]] is not None,\
"row[1][{}] {} not in word dict".format(i, row[1])
assert pos_dict[row[2]] is not None,\
"row[2][{}] {} not in POS dict".format(i, row[2])
assert tag_dict[row[-1]] is not None,\
"row[-1][{}] {} not in tag dict".format(i, row[-1])
except AssertionError as a:
print(a)
return False
return True
def verify_dialogue_data_matrices_from_folder(matrices_folder_filepath,
word_dict=None,
pos_dict=None,
tag_dict=None,
n_lm=0,
n_acoustic=0):
"""A boolean check that the dialogue matrices make sense for the
particular configuration in args and tag2idx dicts.
"""
for dialogue_file in os.listdir(matrices_folder_filepath):
v = np.load(matrices_folder_filepath + "/" + dialogue_file,allow_pickle=True)
if not verify_dialogue_data_matrix(v,
word_dict=word_dict,
pos_dict=pos_dict,
tag_dict=tag_dict,
n_lm=n_lm,
n_acoustic=n_acoustic):
# print"{} failed test".format(dialogue_file)
return False
return True
def dialogue_data_and_indices_from_matrix(d_matrix,
n_extra,
pre_seg=False,
window_size=2,
bs=9,
tag_rep="disf1_uttseg",
tag_to_idx_map=None,
in_utterances=False):
"""Transforming from input format of row:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
to 5-tuple of:
word_idx, pos_idx, extra, labels, indices
where :word_idx: and :pos_idx: have the correct window context
according to @window_size
and :indices: is the start and stop points for consumption by the
net in training for each label in :labels:. :extra: is the matrix
of extra features.
"""
if len(d_matrix)==0:
return
utt_indices = d_matrix[:, 0]
words = d_matrix[:, 1]
pos = d_matrix[:, 2]
extra = None if n_extra == 0 else d_matrix[:, 3: -1]
labels = d_matrix[:, -1]
word_idx = []
pos_idx = []
current = []
indices = []
previous_idx = -1
for i, a_tuple in enumerate(zip(utt_indices, words, pos, labels)):
utt_idx, w, p, l = a_tuple
# print(w)
current.append((w, p, l))
if pre_seg:
if previous_idx != utt_idx or i == len(labels)-1:
if in_utterances:
start = 0 if indices == [] else indices[-1][1]+1
indices.append([start, start + (len(current)-1)])
else:
indices.extend(indices_from_length(len(current), bs,
start_index=len(indices)))
word_idx.extend(context_win_backwards([x[0] for x in current],
window_size))
pos_idx.extend(context_win_backwards([x[1] for x in current],
window_size))
current = []
# print('final')
# print(w)
# print(word_idx)
elif i == len(labels)-1:
# indices = indices_from_length(len(current), bs)
# currently a simple window of same size
indices = [[j, j + bs] for j in range(0, len(current))]
padding = [[-1, -1]] * (bs - window_size)
word_idx = padding + context_win_backwards([x[0] for x in current],
window_size)
pos_idx = padding + context_win_backwards([x[1] for x in current],
window_size)
previous_idx = utt_idx
# print(pos_idx)
# print(word_idx)
# print(extra)
# print(labels)
# print(indices)
# return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
# dtype=np.int32),\
# labels,\
# np.asarray(indices, dtype=np.int32)
return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
dtype=np.int32),\
extra,\
labels,\
np.asarray(indices, dtype=np.int32)
if __name__ == '__main__':
tags = '<f/>,<rms id="3"/>,<i id="3"/><e/>,<rps id="3"/>' +\
'<rpnsub id="3"/>,<f/>,<e/>,<f/>,' + \
'<f/>'
tags = tags.split(",")
words = "i,like,uh,love,to,uh,love,alot".split(",")
# print(tags)
# print(len(tags))
# print(len(words))
new_tags = convert_from_eval_tags_to_inc_disfluency_tags(
tags,
words,
representation="disf1")
# print(new_tags)
old_tags = convert_from_inc_disfluency_tags_to_eval_tags(
new_tags,
words,
representation="disf1")
assert old_tags == tags, "\n " + str(old_tags) + "\n" + str(tags)
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# print(context_win_backwards(x, 2))
# print "indices", indices_from_length(11, 9)
|
[
"itertools.chain",
"os.listdir",
"random.shuffle",
"numpy.asarray",
"random.seed",
"numpy.load",
"collections.defaultdict",
"re.findall",
"numpy.matrix",
"re.search"
] |
[((3680, 3697), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3691, 3697), False, 'from collections import defaultdict\n'), ((12142, 12159), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12153, 12159), False, 'from collections import defaultdict\n'), ((16551, 16568), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (16562, 16568), False, 'from collections import defaultdict\n'), ((23086, 23122), 'os.listdir', 'os.listdir', (['matrices_folder_filepath'], {}), '(matrices_folder_filepath)\n', (23096, 23122), False, 'import os\n'), ((925, 949), 're.search', 're.search', (['"""<[ct]*/>"""', 't'], {}), "('<[ct]*/>', t)\n", (934, 949), False, 'import re\n'), ((1372, 1398), 're.search', 're.search', (['"""<[ct]*/>"""', 'tag'], {}), "('<[ct]*/>', tag)\n", (1381, 1398), False, 'import re\n'), ((3435, 3480), 're.findall', 're.findall', (['"""<rps id\\\\="[0-9]+"\\\\/>"""', 'tags[i]'], {}), '(\'<rps id\\\\="[0-9]+"\\\\/>\', tags[i])\n', (3445, 3480), False, 'import re\n'), ((7678, 7695), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7689, 7695), False, 'import random\n'), ((7704, 7721), 'random.shuffle', 'random.shuffle', (['l'], {}), '(l)\n', (7718, 7721), False, 'import random\n'), ((11193, 11228), 'numpy.matrix', 'np.matrix', (['sentences'], {'dtype': '"""int32"""'}), "(sentences, dtype='int32')\n", (11202, 11228), True, 'import numpy as np\n'), ((23136, 23210), 'numpy.load', 'np.load', (["(matrices_folder_filepath + '/' + dialogue_file)"], {'allow_pickle': '(True)'}), "(matrices_folder_filepath + '/' + dialogue_file, allow_pickle=True)\n", (23143, 23210), True, 'import numpy as np\n'), ((26927, 26963), 'numpy.asarray', 'np.asarray', (['word_idx'], {'dtype': 'np.int32'}), '(word_idx, dtype=np.int32)\n', (26937, 26963), True, 'import numpy as np\n'), ((26965, 27000), 'numpy.asarray', 'np.asarray', (['pos_idx'], {'dtype': 'np.int32'}), '(pos_idx, dtype=np.int32)\n', (26975, 27000), True, 'import numpy as np\n'), ((27240, 27275), 'numpy.asarray', 'np.asarray', (['indices'], {'dtype': 'np.int32'}), '(indices, dtype=np.int32)\n', (27250, 27275), True, 'import numpy as np\n'), ((12345, 12375), 're.search', 're.search', (['"""<[ct]*/>"""', 'tags[t]'], {}), "('<[ct]*/>', tags[t])\n", (12354, 12375), False, 'import re\n'), ((12536, 12581), 're.search', 're.search', (['"""<diact type="[^\\\\s]*"/>"""', 'tags[t]'], {}), '(\'<diact type="[^\\\\s]*"/>\', tags[t])\n', (12545, 12581), False, 'import re\n'), ((12748, 12796), 're.search', 're.search', (['"""<speechLaugh/>|<laughter/>"""', 'tags[t]'], {}), "('<speechLaugh/>|<laughter/>', tags[t])\n", (12757, 12796), False, 'import re\n'), ((13171, 13222), 're.findall', 're.findall', (['"""<rms id\\\\="[0-9]+"\\\\/>"""', 'tags[t]', 're.S'], {}), '(\'<rms id\\\\="[0-9]+"\\\\/>\', tags[t], re.S)\n', (13181, 13222), False, 'import re\n'), ((13391, 13442), 're.findall', 're.findall', (['"""<rps id\\\\="[0-9]+"\\\\/>"""', 'tags[t]', 're.S'], {}), '(\'<rps id\\\\="[0-9]+"\\\\/>\', tags[t], re.S)\n', (13401, 13442), False, 'import re\n'), ((14283, 14337), 're.findall', 're.findall', (['"""<rpndel id\\\\="[0-9]+"\\\\/>"""', 'tags[t]', 're.S'], {}), '(\'<rpndel id\\\\="[0-9]+"\\\\/>\', tags[t], re.S)\n', (14293, 14337), False, 'import re\n'), ((16764, 16817), 're.findall', 're.findall', (['"""<rps id\\\\="[0-9]+"\\\\/>"""', 'tags[start - 1]'], {}), '(\'<rps id\\\\="[0-9]+"\\\\/>\', tags[start - 1])\n', (16774, 16817), False, 'import re\n'), ((16836, 16888), 're.findall', 're.findall', (['"""<rp id\\\\="[0-9]+"\\\\/>"""', 'tags[start - 1]'], {}), '(\'<rp id\\\\="[0-9]+"\\\\/>\', tags[start - 1])\n', (16846, 16888), False, 'import re\n'), ((17940, 17970), 're.search', 're.search', (['"""<[ct]*/>"""', 'tags[t]'], {}), "('<[ct]*/>', tags[t])\n", (17949, 17970), False, 'import re\n'), ((18159, 18202), 're.findall', 're.findall', (['"""<rm-[0-9]+\\\\/>"""', 'tags[t]', 're.S'], {}), "('<rm-[0-9]+\\\\/>', tags[t], re.S)\n", (18169, 18202), False, 'import re\n'), ((19630, 19670), 're.findall', 're.findall', (['"""<rpEndSub/>"""', 'tags[t]', 're.S'], {}), "('<rpEndSub/>', tags[t], re.S)\n", (19640, 19670), False, 'import re\n'), ((19694, 19734), 're.findall', 're.findall', (['"""<rpEndDel/>"""', 'tags[t]', 're.S'], {}), "('<rpEndDel/>', tags[t], re.S)\n", (19704, 19734), False, 'import re\n'), ((14134, 14188), 're.findall', 're.findall', (['"""<rpnrep id\\\\="[0-9]+"\\\\/>"""', 'tags[t]', 're.S'], {}), '(\'<rpnrep id\\\\="[0-9]+"\\\\/>\', tags[t], re.S)\n', (14144, 14188), False, 'import re\n'), ((14205, 14259), 're.findall', 're.findall', (['"""<rpnsub id\\\\="[0-9]+"\\\\/>"""', 'tags[t]', 're.S'], {}), '(\'<rpnsub id\\\\="[0-9]+"\\\\/>\', tags[t], re.S)\n', (14215, 14259), False, 'import re\n'), ((10524, 10559), 'itertools.chain', 'itertools.chain', (['*mywords[i - bs:i]'], {}), '(*mywords[i - bs:i])\n', (10539, 10559), False, 'import itertools\n')]
|
from __future__ import print_function, division, absolute_import
import copy
import numpy as np
import skimage.draw
import skimage.measure
from .. import imgaug as ia
from .utils import normalize_shape, project_coords
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right
corners. Both are given as x and y-coordinates. The corners are intended
to lie inside the bounding box area. As a result, a bounding box that lies
completely inside the image but has maximum extensions would have
coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that
coordinates are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
if y1 > y2:
y2, y1 = y1, y2
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def coords(self):
"""Get the top-left and bottom-right coordinates as one array.
Returns
-------
ndarray
A ``(N, 2)`` numpy array with ``N=2`` containing the top-left
and bottom-right coordinates.
"""
arr = np.empty((2, 2), dtype=np.float32)
arr[0, :] = (self.x1, self.y1)
arr[1, :] = (self.x2, self.y2)
return arr
@property
def x1_int(self):
"""Get the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.x1))
@property
def y1_int(self):
"""Get the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.y1))
@property
def x2_int(self):
"""Get the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.x2))
@property
def y2_int(self):
"""Get the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.y2))
@property
def height(self):
"""Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. ``height * width``.
"""
return self.height * self.width
# TODO add test for tuple of number
def contains(self, other):
"""Estimate whether the bounding box contains a given point.
Parameters
----------
other : tuple of number or imgaug.augmentables.kps.Keypoint
Point to check for.
Returns
-------
bool
``True`` if the point is contained in the bounding box,
``False`` otherwise.
"""
if isinstance(other, tuple):
x, y = other
else:
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
# TODO add tests for ndarray inputs
def project(self, from_shape, to_shape):
"""Project the bounding box onto a differently shaped image.
E.g. if the bounding box is on its original image at
``x1=(10 of 100 pixels)`` and ``y1=(20 of 100 pixels)`` and is
projected onto a new image with size ``(width=200, height=200)``,
its new position will be ``(x1=20, y1=40)``.
(Analogous for ``x2``/``y2``.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
imgaug.augmentables.bbs.BoundingBox
``BoundingBox`` instance with new coordinates.
"""
coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)],
from_shape, to_shape)
return self.copy(
x1=coords_proj[0][0],
y1=coords_proj[0][1],
x2=coords_proj[1][0],
y2=coords_proj[1][1],
label=self.label)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all
sides.
top : number, optional
Value by which to extend the bounding box size along its top
side.
right : number, optional
Value by which to extend the bounding box size along its right
side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom
side.
left : number, optional
Value by which to extend the bounding box size along its left
side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""Compute the intersection BB between this BB and another BB.
Note that in extreme cases, the intersection can be a single point.
In that case the intersection bounding box exists and it will be
returned, but it will have a height and width of zero.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.augmentables.bbs.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is
an intersection.
If there is no intersection, the default value will be returned,
which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""Compute the union BB between this BB and another BB.
This is equivalent to drawing a bounding box around all corner points
of both bounding boxes.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""Compute the IoU between this bounding box and another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B))
/ (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is fully inside the image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
return (
self.x1 >= 0
and self.x2 < width
and self.y1 >= 0
and self.y2 < height)
def is_partly_within_image(self, image):
"""Estimate whether the BB is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is at least partially inside the
image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""Estimate whether the BB is partially/fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
fully : bool, optional
Whether to return ``True`` if the bounding box is fully outside
of the image area.
partly : bool, optional
Whether to return ``True`` if the bounding box is at least
partially outside fo the image area.
Returns
-------
bool
``True`` if the bounding box is partially/fully outside of the
image area, depending on defined parameters.
``False`` otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
return fully
@ia.deprecated(alt_func="BoundingBox.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self, *args, **kwargs):
return self.clip_out_of_image(*args, **kwargs)
def clip_out_of_image(self, image):
"""Clip off all parts of the BB box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
assert height > 0, (
"Expected image with height>0, got shape %s." % (image.shape,))
assert width > 0, (
"Expected image with width>0, got shape %s." % (image.shape,))
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
# TODO convert this to x/y params?
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move this bounding box along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift this object *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift this object *from* the
right (towards the left).
bottom : None or int, optional
Amount of pixels by which to shift this object *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift this object *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding box.
Currently expected to be ``uint8``.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the
image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where ``1.0`` denotes
no transparency and ``0.0`` is invisible.
size : int, optional
The thickness of the bounding box in pixels. If the value is
larger than ``1``, then additional pixels will be added around
the bounding box (i.e. extension towards the outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is fully outside of
the image. If set to ``False``, no error will be raised and only
the parts inside the image will be drawn.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if thickness is not None:
ia.warn_deprecated(
"Usage of argument 'thickness' in BoundingBox.draw_on_image() "
"is deprecated. The argument was renamed to 'size'.")
size = thickness
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception(
"Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f "
"on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(size):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case
# of drawing means that the border lies just barely outside of
# the image, making the border disappear, even though the BB is
# fully inside the image. Here we correct for that because of
# beauty reasons. Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if ia.is_float_array(result):
# TODO use blend_alpha here
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255).astype(input_dtype)
return result
# TODO add tests for pad and pad_max
def extract_from_image(self, image, pad=True, pad_max=None,
prevent_zero_size=True):
"""Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is
partially/fully outside of the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
pad : bool, optional
Whether to zero-pad the image if the object is partially/fully
outside of it.
pad_max : None or int, optional
The maximum number of pixels that may be zero-paded on any side,
i.e. if this has value ``N`` the total maximum of added pixels
is ``4*N``.
This option exists to prevent extremely large images as a result of
single points being moved very far away during augmentation.
prevent_zero_size : bool, optional
Whether to prevent the height or width of the extracted image from
becoming zero.
If this is set to ``True`` and the height or width of the bounding
box is below ``1``, the height/width will be increased to ``1``.
This can be useful to prevent problems, e.g. with image saving or
plotting.
If it is set to ``False``, images will be returned as ``(H', W')``
or ``(H', W', 3)`` with ``H`` or ``W`` potentially being 0.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box
is partially/fully outside of the image.
If `prevent_zero_size` is activated, it is guarantueed that
``H'>0`` and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case of
# extraction leads to a black border, which is both ugly and
# unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons. Same is the case for x coordinates.
fully_within = self.is_fully_within_image(image)
if fully_within:
y1, y2 = np.clip([y1, y2], 0, height-1)
x1, x2 = np.clip([x1, x2], 0, width-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
if pad:
# if the bb is outside of the image area, the following pads the
# image first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that
# are natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + pad_left
width = width + pad_left
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + pad_top
height = height + pad_top
y1 = 0
if x2 >= width:
pad_right = x2 - width
if y2 >= height:
pad_bottom = y2 - height
paddings = [pad_top, pad_right, pad_bottom, pad_left]
any_padded = any([val > 0 for val in paddings])
if any_padded:
if pad_max is None:
pad_max = max(paddings)
image = ia.pad(
image,
top=min(pad_top, pad_max),
right=min(pad_right, pad_max),
bottom=min(pad_bottom, pad_max),
left=min(pad_left, pad_max)
)
return image[y1:y2, x1:x2]
else:
within_image = (
(0, 0, 0, 0)
<= (x1, y1, x2, y2)
< (width, height, width, height)
)
out_height, out_width = (y2 - y1), (x2 - x1)
nonzero_height = (out_height > 0)
nonzero_width = (out_width > 0)
if within_image and nonzero_height and nonzero_width:
return image[y1:y2, x1:x2]
if prevent_zero_size:
out_height = 1
out_width = 1
else:
out_height = 0
out_width = 0
if image.ndim == 2:
return np.zeros((out_height, out_width), dtype=image.dtype)
return np.zeros((out_height, out_width, image.shape[-1]),
dtype=image.dtype)
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""Convert the BB's corners to keypoints (clockwise, from top left).
Returns
-------
list of imgaug.augmentables.kps.Keypoint
Corners of the bounding box as keypoints.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def coords_almost_equals(self, other, max_distance=1e-4):
"""Estimate if this and another BB have almost identical coordinates.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other bounding box with which to compare this one.
If this is an ``iterable``, it is assumed to represent the top-left
and bottom-right coordinates of that bounding box, given as e.g.
an ``(2,2)`` ndarray or an ``(4,)`` ndarray or as a similar list.
max_distance : number, optional
The maximum euclidean distance between a corner on one bounding
box and the closest corner on the other bounding box. If the
distance is exceeded for any such pair, the two BBs are not
viewed as equal.
Returns
-------
bool
Whether the two bounding boxes have almost identical corner
coordinates.
"""
if ia.is_np_array(other):
# we use flat here in case other is (N,2) instead of (4,)
coords_b = other.flat
elif ia.is_iterable(other):
coords_b = list(ia.flatten(other))
else:
assert isinstance(other, BoundingBox), (
"Expected 'other' to be an iterable containing two "
"(x,y)-coordinate pairs or a BoundingBox. "
"Got type %s." % (type(other),))
coords_b = other.coords.flat
coords_a = self.coords
return np.allclose(coords_a.flat, coords_b, atol=max_distance, rtol=0)
def almost_equals(self, other, max_distance=1e-4):
"""Compare this and another BB's label and coordinates.
This is the same as
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals` but
additionally compares the labels.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other object to compare against. Expected to be a
``BoundingBox``.
max_distance : number, optional
See
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals`.
Returns
-------
bool
``True`` if the coordinates are almost equal and additionally
the labels are equal. Otherwise ``False``.
"""
if self.label != other.label:
return False
return self.coords_almost_equals(other, max_distance=max_distance)
@classmethod
def from_point_soup(cls, xy):
"""Convert a ``(2P,) or (P,2) ndarray`` to a BB instance.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xy : (2P,) ndarray or (P, 2) array or iterable of number or iterable of iterable of number
Array containing ``P`` points in xy-form denoting a soup of
points around which to place a bounding box.
The array should usually be of dtype ``float32``.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box around the points.
"""
xy = np.array(xy, dtype=np.float32)
assert len(xy) > 0, (
"Expected to get at least one point to place a bounding box "
"around, got shape %s." % (xy.shape,))
assert xy.ndim == 1 or (xy.ndim == 2 and xy.shape[-1] == 2), (
"Expected input array of shape (P,) or (P, 2), "
"got shape %s." % (xy.shape,))
if xy.ndim == 1:
xy = xy.reshape((-1, 2))
x1, y1 = np.min(xy, axis=0)
x2, y2 = np.max(xy, axis=0)
return cls(x1=x1, y1=y1, x2=x2, y2=y2)
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""Create a shallow copy of this BoundingBox instance.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=copy.deepcopy(self.label) if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Deep copy.
"""
# TODO write specific copy routine with deepcopy for label and remove
# the deepcopy from copy()
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""Container for the list of all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.augmentables.bbs.BoundingBox
List of bounding boxes on the image.
shape : tuple of int or ndarray
The shape of the image on which the objects are placed.
Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting
such an image shape.
Examples
--------
>>> import numpy as np
>>> from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
>>>
>>> image = np.zeros((100, 100))
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
self.shape = normalize_shape(shape)
@property
def items(self):
"""Get the bounding boxes in this container.
Returns
-------
list of BoundingBox
Bounding boxes within this container.
"""
return self.bounding_boxes
# TODO remove this? here it is image height, but in BoundingBox it is
# bounding box height
@property
def height(self):
"""Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width, but in BoundingBox it is
# bounding box width
@property
def width(self):
"""Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""Determine whether this instance contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""Project bounding boxes from one image (shape) to a another one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing the same bounding boxes after projection to
the new image shape.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
bounding_boxes = [bb.project(self.shape, shape)
for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""Convert an ``(N, 4) or (N, 2, 2) ndarray`` to a BBsOI instance.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N, 4) ndarray or (N, 2, 2) array
Array containing the corner coordinates of ``N`` bounding boxes.
Each bounding box is represented by its top-left and bottom-right
coordinates.
The array should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing a list of :class:`BoundingBox` instances
derived from the provided corner coordinates.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 2)
if xyxy.shape[0] == 0:
return BoundingBoxesOnImage([], shape)
assert (
(xyxy.ndim == 2 and xyxy.shape[-1] == 4)
or (xyxy.ndim == 3 and xyxy.shape[1:3] == (2, 2))), (
"Expected input array of shape (N, 4) or (N, 2, 2), "
"got shape %s." % (xyxy.shape,))
xyxy = xyxy.reshape((-1, 2, 2))
boxes = [BoundingBox.from_point_soup(row) for row in xyxy]
return cls(boxes, shape)
@classmethod
def from_point_soups(cls, xy, shape):
"""Convert an ``(N, 2P) or (N, P, 2) ndarray`` to a BBsOI instance.
Parameters
----------
xy : (N, 2P) ndarray or (N, P, 2) array or iterable of iterable of number or iterable of iterable of iterable of number
Array containing the corner coordinates of ``N`` bounding boxes.
Each bounding box is represented by a soup of ``P`` points.
If ``(N, P)`` then the second axis is expected to be in
xy-form (e.g. ``x1``, ``y1``, ``x2``, ``y2``, ...).
The final bounding box coordinates will be derived using ``min``
and ``max`` operations on the xy-values.
The array should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing a list of :class:`BoundingBox` instances
derived from the provided point soups.
"""
xy = np.array(xy, dtype=np.float32)
# from_xy_array() already checks the ndim/shape, so we don't have to
# do it here
boxes = [BoundingBox.from_point_soup(row) for row in xy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,4) ndarray``.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
``(N,4) ndarray``, where ``N`` denotes the number of bounding
boxes and ``4`` denotes the top-left and bottom-right bounding
box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def to_xy_array(self):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,2) ndarray``.
Returns
-------
ndarray
``(2*B,2) ndarray`` of xy-coordinates, where ``B`` denotes the
number of bounding boxes.
"""
return self.to_xyxy_array().reshape((-1, 2))
def fill_from_xyxy_array_(self, xyxy):
"""Modify the BB coordinates of this instance in-place.
.. note ::
This currently expects exactly one entry in `xyxy` per bounding
in this instance. (I.e. two corner coordinates per instance.)
Otherwise, an ``AssertionError`` will be raised.
.. note ::
This method will automatically flip x-coordinates if ``x1>x2``
for a bounding box. (Analogous for y-coordinates.)
Parameters
----------
xyxy : (N, 4) ndarray or iterable of iterable of number
Coordinates of ``N`` bounding boxes on an image, given as
a ``(N,4)`` array of two corner xy-coordinates per bounding box.
``N`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 4)
assert xyxy.shape[0] == 0 or (xyxy.ndim == 2 and xyxy.shape[-1] == 4), (
"Expected input array to have shape (N,4), "
"got shape %s." % (xyxy.shape,))
assert len(xyxy) == len(self.bounding_boxes), (
"Expected to receive an array with as many rows there are "
"bounding boxes in this instance. Got %d rows, expected %d." % (
len(xyxy), len(self.bounding_boxes)))
for bb, (x1, y1, x2, y2) in zip(self.bounding_boxes, xyxy):
bb.x1 = min([x1, x2])
bb.y1 = min([y1, y2])
bb.x2 = max([x1, x2])
bb.y2 = max([y1, y2])
return self
def fill_from_xy_array_(self, xy):
"""Modify the BB coordinates of this instance in-place.
See
:func:`imgaug.augmentables.bbs.BoundingBoxesOnImage.fill_from_xyxy_array_`.
Parameters
----------
xy : (2*B, 2) ndarray or iterable of iterable of number
Coordinates of ``B`` bounding boxes on an image, given as
a ``(2*B,2)`` array of two corner xy-coordinates per bounding box.
``B`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xy = np.array(xy, dtype=np.float32)
return self.fill_from_xyxy_array_(xy.reshape((-1, 4)))
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``BoundingBoxesOnImage.shape``.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes.
If a single ``int`` ``C``, then that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,3) ndarray
Image with drawn bounding boxes.
"""
image = np.copy(image) if copy else image
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
size=size,
copy=False,
raise_if_out_of_image=raise_if_out_of_image,
thickness=thickness
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""Remove all BBs that are fully/partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the
image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of
the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were
fully/partially outside of the image being removed.
"""
bbs_clean = [
bb
for bb
in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
@ia.deprecated(alt_func="BoundingBoxesOnImage.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self):
return self.clip_out_of_image()
def clip_out_of_image(self):
"""Clip off all parts from all BBs that are outside of the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [
bb.clip_out_of_image(self.shape)
for bb
in self.bounding_boxes
if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move all all BBs along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all objects *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift all objects *from* the
right (towads the left).
bottom : None or int, optional
Amount of pixels by which to shift all objects *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift all objects *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [
bb.shift(top=top, right=right, bottom=bottom, left=left)
for bb
in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def to_keypoints_on_image(self):
"""Convert the bounding boxes to one ``KeypointsOnImage`` instance.
Returns
-------
imgaug.augmentables.kps.KeypointsOnImage
A keypoints instance containing ``N*4`` coordinates for ``N``
bounding boxes. Order matches the order in ``bounding_boxes``.
"""
from .kps import KeypointsOnImage
# This currently uses 4 points instead of 2 points as the method
# is primarily used during augmentation and 4 points are overall
# the better choice there.
arr = np.zeros((len(self.bounding_boxes), 2*4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
arr[i] = [
box.x1, box.y1,
box.x2, box.y1,
box.x2, box.y2,
box.x1, box.y2
]
return KeypointsOnImage.from_xy_array(
arr.reshape((-1, 2)),
shape=self.shape
)
def invert_to_keypoints_on_image_(self, kpsoi):
"""Invert the output of ``to_keypoints_on_image()`` in-place.
This function writes in-place into this ``BoundingBoxesOnImage``
instance.
Parameters
----------
kpsoi : imgaug.augmentables.kps.KeypointsOnImages
Keypoints to convert back to bounding boxes, i.e. the outputs
of ``to_keypoints_on_image()``.
Returns
-------
BoundingBoxesOnImage
Bounding boxes container with updated coordinates.
Note that the instance is also updated in-place.
"""
assert len(kpsoi.keypoints) == len(self.bounding_boxes) * 4, (
"Expected %d coordinates, got %d." % (
len(self.bounding_boxes) * 2, len(kpsoi.keypoints)))
for i, bb in enumerate(self.bounding_boxes):
xx = [kpsoi.keypoints[4*i+0].x, kpsoi.keypoints[4*i+1].x,
kpsoi.keypoints[4*i+2].x, kpsoi.keypoints[4*i+3].x]
yy = [kpsoi.keypoints[4*i+0].y, kpsoi.keypoints[4*i+1].y,
kpsoi.keypoints[4*i+2].y, kpsoi.keypoints[4*i+3].y]
bb.x1 = min(xx)
bb.y1 = min(yy)
bb.x2 = max(xx)
bb.y2 = max(yy)
self.shape = kpsoi.shape
return self
def copy(self):
"""Create a shallow copy of the ``BoundingBoxesOnImage`` instance.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""Create a deep copy of the ``BoundingBoxesOnImage`` object.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return (
"BoundingBoxesOnImage(%s, shape=%s)"
% (str(self.bounding_boxes), self.shape))
|
[
"numpy.clip",
"numpy.copy",
"numpy.uint8",
"numpy.allclose",
"copy.deepcopy",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.min",
"numpy.finfo",
"copy.copy",
"imgaug.augmentables.kps.Keypoint",
"numpy.round"
] |
[((1845, 1879), 'numpy.empty', 'np.empty', (['(2, 2)'], {'dtype': 'np.float32'}), '((2, 2), dtype=np.float32)\n', (1853, 1879), True, 'import numpy as np\n'), ((14585, 14617), 'numpy.clip', 'np.clip', (['self.x1', '(0)', '(width - eps)'], {}), '(self.x1, 0, width - eps)\n', (14592, 14617), True, 'import numpy as np\n'), ((14631, 14663), 'numpy.clip', 'np.clip', (['self.x2', '(0)', '(width - eps)'], {}), '(self.x2, 0, width - eps)\n', (14638, 14663), True, 'import numpy as np\n'), ((14677, 14710), 'numpy.clip', 'np.clip', (['self.y1', '(0)', '(height - eps)'], {}), '(self.y1, 0, height - eps)\n', (14684, 14710), True, 'import numpy as np\n'), ((14724, 14757), 'numpy.clip', 'np.clip', (['self.y2', '(0)', '(height - eps)'], {}), '(self.y2, 0, height - eps)\n', (14731, 14757), True, 'import numpy as np\n'), ((27448, 27511), 'numpy.allclose', 'np.allclose', (['coords_a.flat', 'coords_b'], {'atol': 'max_distance', 'rtol': '(0)'}), '(coords_a.flat, coords_b, atol=max_distance, rtol=0)\n', (27459, 27511), True, 'import numpy as np\n'), ((29124, 29154), 'numpy.array', 'np.array', (['xy'], {'dtype': 'np.float32'}), '(xy, dtype=np.float32)\n', (29132, 29154), True, 'import numpy as np\n'), ((29568, 29586), 'numpy.min', 'np.min', (['xy'], {'axis': '(0)'}), '(xy, axis=0)\n', (29574, 29586), True, 'import numpy as np\n'), ((29604, 29622), 'numpy.max', 'np.max', (['xy'], {'axis': '(0)'}), '(xy, axis=0)\n', (29610, 29622), True, 'import numpy as np\n'), ((36293, 36325), 'numpy.array', 'np.array', (['xyxy'], {'dtype': 'np.float32'}), '(xyxy, dtype=np.float32)\n', (36301, 36325), True, 'import numpy as np\n'), ((38022, 38052), 'numpy.array', 'np.array', (['xy'], {'dtype': 'np.float32'}), '(xy, dtype=np.float32)\n', (38030, 38052), True, 'import numpy as np\n'), ((40495, 40527), 'numpy.array', 'np.array', (['xyxy'], {'dtype': 'np.float32'}), '(xyxy, dtype=np.float32)\n', (40503, 40527), True, 'import numpy as np\n'), ((41999, 42029), 'numpy.array', 'np.array', (['xy'], {'dtype': 'np.float32'}), '(xy, dtype=np.float32)\n', (42007, 42029), True, 'import numpy as np\n'), ((49025, 49040), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (49034, 49040), False, 'import copy\n'), ((2345, 2362), 'numpy.round', 'np.round', (['self.x1'], {}), '(self.x1)\n', (2353, 2362), True, 'import numpy as np\n'), ((2732, 2749), 'numpy.round', 'np.round', (['self.y1'], {}), '(self.y1)\n', (2740, 2749), True, 'import numpy as np\n'), ((3125, 3142), 'numpy.round', 'np.round', (['self.x2'], {}), '(self.x2)\n', (3133, 3142), True, 'import numpy as np\n'), ((3518, 3535), 'numpy.round', 'np.round', (['self.y2'], {}), '(self.y2)\n', (3526, 3535), True, 'import numpy as np\n'), ((12070, 12090), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (12078, 12090), True, 'import numpy as np\n'), ((14547, 14567), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (14555, 14567), True, 'import numpy as np\n'), ((18292, 18306), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (18299, 18306), True, 'import numpy as np\n'), ((18392, 18407), 'numpy.uint8', 'np.uint8', (['color'], {}), '(color)\n', (18400, 18407), True, 'import numpy as np\n'), ((22753, 22785), 'numpy.clip', 'np.clip', (['[y1, y2]', '(0)', '(height - 1)'], {}), '([y1, y2], 0, height - 1)\n', (22760, 22785), True, 'import numpy as np\n'), ((22805, 22836), 'numpy.clip', 'np.clip', (['[x1, x2]', '(0)', '(width - 1)'], {}), '([x1, x2], 0, width - 1)\n', (22812, 22836), True, 'import numpy as np\n'), ((25169, 25238), 'numpy.zeros', 'np.zeros', (['(out_height, out_width, image.shape[-1])'], {'dtype': 'image.dtype'}), '((out_height, out_width, image.shape[-1]), dtype=image.dtype)\n', (25177, 25238), True, 'import numpy as np\n'), ((25727, 25757), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'self.x1', 'y': 'self.y1'}), '(x=self.x1, y=self.y1)\n', (25735, 25757), False, 'from imgaug.augmentables.kps import Keypoint\n'), ((25771, 25801), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'self.x2', 'y': 'self.y1'}), '(x=self.x2, y=self.y1)\n', (25779, 25801), False, 'from imgaug.augmentables.kps import Keypoint\n'), ((25815, 25845), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'self.x2', 'y': 'self.y2'}), '(x=self.x2, y=self.y2)\n', (25823, 25845), False, 'from imgaug.augmentables.kps import Keypoint\n'), ((25859, 25889), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'self.x1', 'y': 'self.y2'}), '(x=self.x1, y=self.y2)\n', (25867, 25889), False, 'from imgaug.augmentables.kps import Keypoint\n'), ((43348, 43362), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (43355, 43362), True, 'import numpy as np\n'), ((19038, 19072), 'numpy.clip', 'np.clip', (['y1', '(0)', '(image.shape[0] - 1)'], {}), '(y1, 0, image.shape[0] - 1)\n', (19045, 19072), True, 'import numpy as np\n'), ((19092, 19126), 'numpy.clip', 'np.clip', (['y2', '(0)', '(image.shape[0] - 1)'], {}), '(y2, 0, image.shape[0] - 1)\n', (19099, 19126), True, 'import numpy as np\n'), ((19146, 19180), 'numpy.clip', 'np.clip', (['x1', '(0)', '(image.shape[1] - 1)'], {}), '(x1, 0, image.shape[1] - 1)\n', (19153, 19180), True, 'import numpy as np\n'), ((19200, 19234), 'numpy.clip', 'np.clip', (['x2', '(0)', '(image.shape[1] - 1)'], {}), '(x2, 0, image.shape[1] - 1)\n', (19207, 19234), True, 'import numpy as np\n'), ((25097, 25149), 'numpy.zeros', 'np.zeros', (['(out_height, out_width)'], {'dtype': 'image.dtype'}), '((out_height, out_width), dtype=image.dtype)\n', (25105, 25149), True, 'import numpy as np\n'), ((19746, 19769), 'numpy.clip', 'np.clip', (['result', '(0)', '(255)'], {}), '(result, 0, 255)\n', (19753, 19769), True, 'import numpy as np\n'), ((30907, 30932), 'copy.deepcopy', 'copy.deepcopy', (['self.label'], {}), '(self.label)\n', (30920, 30932), False, 'import copy\n'), ((20062, 20085), 'numpy.clip', 'np.clip', (['result', '(0)', '(255)'], {}), '(result, 0, 255)\n', (20069, 20085), True, 'import numpy as np\n')]
|
from typing import Callable, Tuple
import numpy as np
def posterior_factory(y: np.ndarray, sigma_y: float, sigma_theta: float) -> Tuple[Callable]:
"""The banana distribution is a distribution that exhibits a characteristic
banana-shaped ridge that resembles the posterior that can emerge from
models that are not identifiable. The distribution is the posterior of the
following generative model.
y ~ Normal(theta[0] + theta[1]**2, sigma_sq_y)
theta[i] ~ Normal(0, sigma_sq_theta)
Args:
y: Observations of the banana model.
sigma_y: Standard deviation of the observations.
sigma_theta: Standard deviation of prior over linear coefficients.
Returns:
log_posterior: Function to compute the log-posterior.
metric: Function to compute the Fisher information metric.
euclidean_auxiliaries: Function to compute the log-posterior and its
gradient.
riemannian_auxiliaries: Function to compute the log-posterior, the
gradient of the log-posterior, the Fisher information metric, and the
derivatives of the Fisher information metric.
"""
sigma_sq_y = np.square(sigma_y)
sigma_sq_theta = np.square(sigma_theta)
def log_posterior(theta: np.ndarray) -> float:
"""The banana-shaped distribution posterior.
Args:
theta: Linear coefficients.
Returns:
out: The log-posterior of the banana-shaped distribution.
"""
p = theta[0] + np.square(theta[1])
ll = -0.5 / sigma_sq_y * np.square(y - p).sum()
lp = -0.5 / sigma_sq_theta * np.square(theta).sum()
return ll + lp
def grad_log_posterior(theta: np.ndarray) -> np.ndarray:
"""Gradient of the banana-shaped distribution with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
out: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
p = theta[0] + np.square(theta[1])
d = np.sum(y - p)
ga = d / sigma_sq_y - theta[0] / sigma_sq_theta
gb = 2.0*d / sigma_sq_y * theta[1] - theta[1] / sigma_sq_theta
return np.hstack((ga, gb))
def metric(theta: np.ndarray) -> np.ndarray:
"""The Fisher information is the negative expected outer product of the
gradient of the posterior.
Args:
theta: Linear coefficients.
Returns:
G: The Fisher information metric of the banana-shaped distribution.
"""
n = y.size
s = 2.0*n*theta[1] / sigma_sq_y
G = np.array([[n / sigma_sq_y + 1.0 / sigma_sq_theta, s],
[s, 4.0*n*np.square(theta[1]) / sigma_sq_y + 1.0 / sigma_sq_theta]])
return G
def grad_metric(theta: np.ndarray) -> np.ndarray:
"""The gradient of the Fisher information metric with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
n = y.size
dG = np.array([
[[0.0, 0.0], [0.0, 2.0*n / sigma_sq_y]],
[[0.0, 2.0*n / sigma_sq_y], [0.0, 8.0*n*theta[1] / sigma_sq_y]]
])
return dG
def euclidean_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior and the gradient of the
log-posterior.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
return lp, glp
def riemannnian_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior, the gradient of the log-posterior,
the Fisher information metric and the derivatives of the Fisher
information metric.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
G: The Fisher information metric of the banana-shaped distribution.
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
G = metric(theta)
dG = grad_metric(theta)
return lp, glp, G, dG
def log_posterior_and_metric(theta: np.ndarray) -> Tuple[np.ndarray]:
lp = log_posterior(theta)
G = metric(theta)
return lp, G
return log_posterior, metric, log_posterior_and_metric, euclidean_auxiliaries, riemannnian_auxiliaries
def generate_data(t: float, sigma_y: float, sigma_theta: float, num_obs: int) -> np.ndarray:
"""Generate data from the banana-shaped posterior distribution.
Args:
t: Free-parameter determining the thetas.
sigma_y: Noise standard deviation.
sigma_theta: Prior standard deviation over the thetas.
num_obs: Number of observations to generate.
Returns:
theta: Linear coefficients of the banana-shaped distribution.
y: Observations from the unidentifiable model.
"""
theta = np.array([t, np.sqrt(1.0 - t)])
y = theta[0] + np.square(theta[1]) + sigma_y * np.random.normal(size=(num_obs, ))
return theta, y
|
[
"numpy.random.normal",
"numpy.sqrt",
"numpy.hstack",
"numpy.square",
"numpy.sum",
"numpy.array"
] |
[((1187, 1205), 'numpy.square', 'np.square', (['sigma_y'], {}), '(sigma_y)\n', (1196, 1205), True, 'import numpy as np\n'), ((1227, 1249), 'numpy.square', 'np.square', (['sigma_theta'], {}), '(sigma_theta)\n', (1236, 1249), True, 'import numpy as np\n'), ((2141, 2154), 'numpy.sum', 'np.sum', (['(y - p)'], {}), '(y - p)\n', (2147, 2154), True, 'import numpy as np\n'), ((2297, 2316), 'numpy.hstack', 'np.hstack', (['(ga, gb)'], {}), '((ga, gb))\n', (2306, 2316), True, 'import numpy as np\n'), ((3279, 3407), 'numpy.array', 'np.array', (['[[[0.0, 0.0], [0.0, 2.0 * n / sigma_sq_y]], [[0.0, 2.0 * n / sigma_sq_y], [\n 0.0, 8.0 * n * theta[1] / sigma_sq_y]]]'], {}), '([[[0.0, 0.0], [0.0, 2.0 * n / sigma_sq_y]], [[0.0, 2.0 * n /\n sigma_sq_y], [0.0, 8.0 * n * theta[1] / sigma_sq_y]]])\n', (3287, 3407), True, 'import numpy as np\n'), ((1534, 1553), 'numpy.square', 'np.square', (['theta[1]'], {}), '(theta[1])\n', (1543, 1553), True, 'import numpy as np\n'), ((2109, 2128), 'numpy.square', 'np.square', (['theta[1]'], {}), '(theta[1])\n', (2118, 2128), True, 'import numpy as np\n'), ((5746, 5762), 'numpy.sqrt', 'np.sqrt', (['(1.0 - t)'], {}), '(1.0 - t)\n', (5753, 5762), True, 'import numpy as np\n'), ((5784, 5803), 'numpy.square', 'np.square', (['theta[1]'], {}), '(theta[1])\n', (5793, 5803), True, 'import numpy as np\n'), ((5816, 5849), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_obs,)'}), '(size=(num_obs,))\n', (5832, 5849), True, 'import numpy as np\n'), ((1587, 1603), 'numpy.square', 'np.square', (['(y - p)'], {}), '(y - p)\n', (1596, 1603), True, 'import numpy as np\n'), ((1647, 1663), 'numpy.square', 'np.square', (['theta'], {}), '(theta)\n', (1656, 1663), True, 'import numpy as np\n'), ((2805, 2824), 'numpy.square', 'np.square', (['theta[1]'], {}), '(theta[1])\n', (2814, 2824), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal
from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer
try:
import dakota
except ImportError:
dakota = None
@unittest.skipIf(dakota is None, "only run if Dakota is installed.")
class TestDakotaOptimization(unittest.TestCase):
def test_2D_opt_max_iterations(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.5)
def test_2D_opt_EGO(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"initial_samples": 5, "method": "efficient_global", "seed": 123456}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.999996864)
def test_two_variables(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]]), "z": [1.0, 2.0]}
desvars = {"x": np.array([0.0, 0.25]), "z": 1.5}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_two_variable as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 1.0)
def test_constraint(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y", "con"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_low_model as model"
output_scalers = [1.0, 1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 0.5)
assert_near_equal(np.min(np.array(results["con"])), 0.0)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"numpy.array",
"unittest.skipIf",
"wisdem.optimization_drivers.dakota_driver.DakotaOptimizer"
] |
[((243, 310), 'unittest.skipIf', 'unittest.skipIf', (['(dakota is None)', '"""only run if Dakota is installed."""'], {}), "(dakota is None, 'only run if Dakota is installed.')\n", (258, 310), False, 'import unittest\n'), ((3093, 3108), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3106, 3108), False, 'import unittest\n'), ((817, 846), 'wisdem.optimization_drivers.dakota_driver.DakotaOptimizer', 'DakotaOptimizer', (['template_dir'], {}), '(template_dir)\n', (832, 846), False, 'from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer\n'), ((1468, 1497), 'wisdem.optimization_drivers.dakota_driver.DakotaOptimizer', 'DakotaOptimizer', (['template_dir'], {}), '(template_dir)\n', (1483, 1497), False, 'from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer\n'), ((2146, 2175), 'wisdem.optimization_drivers.dakota_driver.DakotaOptimizer', 'DakotaOptimizer', (['template_dir'], {}), '(template_dir)\n', (2161, 2175), False, 'from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer\n'), ((2797, 2826), 'wisdem.optimization_drivers.dakota_driver.DakotaOptimizer', 'DakotaOptimizer', (['template_dir'], {}), '(template_dir)\n', (2812, 2826), False, 'from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer\n'), ((428, 462), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, 1.0]]'], {}), '([[0.0, 1.0], [0.0, 1.0]])\n', (436, 462), True, 'import numpy as np\n'), ((489, 510), 'numpy.array', 'np.array', (['[0.0, 0.25]'], {}), '([0.0, 0.25])\n', (497, 510), True, 'import numpy as np\n'), ((1069, 1103), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, 1.0]]'], {}), '([[0.0, 1.0], [0.0, 1.0]])\n', (1077, 1103), True, 'import numpy as np\n'), ((1130, 1151), 'numpy.array', 'np.array', (['[0.0, 0.25]'], {}), '([0.0, 0.25])\n', (1138, 1151), True, 'import numpy as np\n'), ((1731, 1765), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, 1.0]]'], {}), '([[0.0, 1.0], [0.0, 1.0]])\n', (1739, 1765), True, 'import numpy as np\n'), ((1809, 1830), 'numpy.array', 'np.array', (['[0.0, 0.25]'], {}), '([0.0, 0.25])\n', (1817, 1830), True, 'import numpy as np\n'), ((2397, 2431), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, 1.0]]'], {}), '([[0.0, 1.0], [0.0, 1.0]])\n', (2405, 2431), True, 'import numpy as np\n'), ((2458, 2479), 'numpy.array', 'np.array', (['[0.0, 0.25]'], {}), '([0.0, 0.25])\n', (2466, 2479), True, 'import numpy as np\n'), ((980, 1002), 'numpy.array', 'np.array', (["results['y']"], {}), "(results['y'])\n", (988, 1002), True, 'import numpy as np\n'), ((1631, 1653), 'numpy.array', 'np.array', (["results['y']"], {}), "(results['y'])\n", (1639, 1653), True, 'import numpy as np\n'), ((2309, 2331), 'numpy.array', 'np.array', (["results['y']"], {}), "(results['y'])\n", (2317, 2331), True, 'import numpy as np\n'), ((2960, 2982), 'numpy.array', 'np.array', (["results['y']"], {}), "(results['y'])\n", (2968, 2982), True, 'import numpy as np\n'), ((3024, 3048), 'numpy.array', 'np.array', (["results['con']"], {}), "(results['con'])\n", (3032, 3048), True, 'import numpy as np\n')]
|
#-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
#------------------------------------------------------------
filename: lab4_runTFCurveFitting.py
This is an example for linear regression in tensorflow
Which is a curve fitting example
written by <NAME> @ Aug 2017
#------------------------------------------------------------
'''
from os import getcwd
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_io
# from __future__ import print_function
# Preparing data set ================================================
from tensorflow.examples.tutorials.mnist import input_data
# generation of sinusoid data set
total_size = 5000
training_size = 4000
validation_size = total_size - training_size
xsize = 50 # the size of single x_data
x_data = np.zeros([xsize, total_size])
cos_x = np.zeros([xsize, total_size])
mag = 1.0
phase_rad = np.pi/4
rad_freq = np.pi / 2.0
for i in range(total_size):
x_data[:,i] = np.linspace(-4,4,xsize)
cos_x = np.cos(rad_freq*x_data + phase_rad)
noise_var = 0.01
noise = np.sqrt(noise_var) * np.random.randn(xsize,total_size)
y_clean = cos_x
y_data = y_clean + noise
x_training_data = x_data[:,0:training_size]
y_training_data = y_data[:,0:training_size]
x_validation_data = x_data[:,training_size:-1]
y_validation_data = y_data[:,training_size:-1]
# signal plot
# hfig1= plt.figure(1,figsize=[10,10])
# plt.plot(cos_x[:,1],color='b',label='clean')
# plt.plot(y_data[:,1],color='r',label='noisy')
# plt.legend()
# configure training parameters =====================================
learning_rate = 0.01
training_epochs = 20
batch_size = 100
display_step = 1
# computational TF graph construction ================================
##---------------- Define graph nodes -------------------
# tf Graph data input holder
# (x,y) : input / output of prediction model
# which will be feeded by training data in the TF graph computation
# (a,b,c,d) : model parameters
# which will be learned from training data in the TF graph computation
x = tf.placeholder(tf.float32, [xsize,None])
y = tf.placeholder(tf.float32, [xsize,None])
# Set model weights which is calculated in the TF graph
a = tf.Variable(1.) # initialization by 1
b = tf.Variable(1.)
c = tf.Variable(1.)
d = tf.Variable(1.)
print ('TF graph nodes are defined')
##--------------------- Define function -----------------
# define relationshitp btw instance data x and label data y
# define optimizer used in the learning phase
# define cost function for optimization
# Construct model
pred_y = c*tf.cos(a*x+b)+d
# Minimize error using MSE function
cost = tf.reduce_mean(tf.reduce_sum( tf.square(y - pred_y) , reduction_indices=1), name="mse")
# Gradient Descent
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
print ('Functions in TF graph are ready')
## Performance evaluation model ========================_y===========
# y : data output
# pred_y: prediction output by model, a x^3 + b x^2 + c x + d
correct_prediction = cost
# Calculate error rate using data --------------
# where
# tf_reduce_mean(input_tensor, axis) : reduce dimension of tensor by computing the mean of elements
# # 'x' is [[1., 1.]
# [2., 2.]]
# tf.reduce_mean(x) ==> 1.5
# tf.reduce_mean(x, 0) ==> [1.5, 1.5]
# tf.reduce_mean(x, 1) ==> [1., 2.]
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
error_rate_training = np.zeros(training_epochs)
error_rate_validation = np.zeros(training_epochs)
# Launch the graph (execution) ========================================
# Initializing the variables
init = tf.global_variables_initializer()
## -------------------- Learning iteration start --------------------
with tf.Session() as sess:
sess.run(init) # this for variable use
# Training cycle
for epoch in range(training_epochs): # iteration loop
avg_cost = 0.
total_batch = int(training_size/batch_size) #
# Loop over all batches
for i in range(total_batch): # batch loop
data_start_index = i * batch_size
data_end_index = (i + 1) * batch_size
# feed traing data --------------------------
batch_xs = x_training_data[:,data_start_index:data_end_index]
batch_ys = y_training_data[:,data_start_index:data_end_index]
#----------------------------------------------
# Run optimization op (backprop) and cost op (to get loss value)
# feedign training data
_, local_batch_cost = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += local_batch_cost / total_batch
# print ("At %d-th batch in %d-epoch, avg_cost = %f" % (i,epoch,avg_cost) )
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost/batch_size))
batch_xs = x_training_data
batch_ys = y_training_data
error_rate_training[epoch] = accuracy.eval({x: batch_xs, y: batch_ys},session=sess)/training_size
error_rate_validation[epoch] = accuracy.eval({x: x_validation_data, y: y_validation_data},session=sess)/validation_size
print("Training set MSE:", error_rate_training[epoch])
print("Validation set MSE:", error_rate_validation[epoch])
print("--------------------------------------------")
print("Optimization Finished!")
pred_a = sess.run(a)
pred_b = sess.run(b)
pred_c = sess.run(c)
pred_d = sess.run(d)
hfig1 = plt.figure(1,figsize=(10,10))
epoch_index = np.array([elem for elem in range(training_epochs)])
plt.plot(epoch_index,error_rate_training,label='Training data',color='r',marker='o')
plt.plot(epoch_index,error_rate_validation,label='Validation data',color='b',marker='x')
plt.legend()
plt.title('MSE of prediction:')
plt.xlabel('Iteration epoch')
plt.ylabel('MSE')
hfig2 = plt.figure(2,figsize=(10,10))
pred_y = pred_c * np.cos(pred_a * x_data[:,0] + pred_b) +pred_d
plt.plot(x_validation_data[:,0],y_validation_data[:,0],label='noisy data',color='b',marker='*')
plt.plot(x_validation_data[:,0], pred_y,label='prediction',color='r')
plt.legend()
plt.title('A line fitting example:')
plt.xlabel('X data')
plt.ylabel('Y data')
# FIG_SAVE_DIR = getcwd() + '/figs/'
# hfig1.savefig(FIG_SAVE_DIR + 'runExample_TFLogisticReg_aymeric_ErrRate.png')
# hfig1.clear()
|
[
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"tensorflow.cast",
"tensorflow.placeholder",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"tensorflow.Session",
"numpy.linspace",
"tensorflow.square",
"tensorflow.train.AdamOptimizer",
"tensorflow.cos",
"tensorflow.Variable",
"numpy.cos",
"matplotlib.pyplot.title",
"numpy.random.randn",
"matplotlib.pyplot.legend",
"tensorflow.global_variables_initializer",
"numpy.zeros",
"matplotlib.pyplot.figure"
] |
[((989, 1018), 'numpy.zeros', 'np.zeros', (['[xsize, total_size]'], {}), '([xsize, total_size])\n', (997, 1018), True, 'import numpy as np\n'), ((1028, 1057), 'numpy.zeros', 'np.zeros', (['[xsize, total_size]'], {}), '([xsize, total_size])\n', (1036, 1057), True, 'import numpy as np\n'), ((1197, 1234), 'numpy.cos', 'np.cos', (['(rad_freq * x_data + phase_rad)'], {}), '(rad_freq * x_data + phase_rad)\n', (1203, 1234), True, 'import numpy as np\n'), ((2265, 2306), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[xsize, None]'], {}), '(tf.float32, [xsize, None])\n', (2279, 2306), True, 'import tensorflow as tf\n'), ((2310, 2351), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[xsize, None]'], {}), '(tf.float32, [xsize, None])\n', (2324, 2351), True, 'import tensorflow as tf\n'), ((2412, 2428), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (2423, 2428), True, 'import tensorflow as tf\n'), ((2454, 2470), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (2465, 2470), True, 'import tensorflow as tf\n'), ((2474, 2490), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (2485, 2490), True, 'import tensorflow as tf\n'), ((2494, 2510), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (2505, 2510), True, 'import tensorflow as tf\n'), ((3721, 3746), 'numpy.zeros', 'np.zeros', (['training_epochs'], {}), '(training_epochs)\n', (3729, 3746), True, 'import numpy as np\n'), ((3771, 3796), 'numpy.zeros', 'np.zeros', (['training_epochs'], {}), '(training_epochs)\n', (3779, 3796), True, 'import numpy as np\n'), ((3906, 3939), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3937, 3939), True, 'import tensorflow as tf\n'), ((5983, 6014), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(10, 10)'}), '(1, figsize=(10, 10))\n', (5993, 6014), True, 'import matplotlib.pyplot as plt\n'), ((6079, 6171), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_index', 'error_rate_training'], {'label': '"""Training data"""', 'color': '"""r"""', 'marker': '"""o"""'}), "(epoch_index, error_rate_training, label='Training data', color='r',\n marker='o')\n", (6087, 6171), True, 'import matplotlib.pyplot as plt\n'), ((6164, 6261), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_index', 'error_rate_validation'], {'label': '"""Validation data"""', 'color': '"""b"""', 'marker': '"""x"""'}), "(epoch_index, error_rate_validation, label='Validation data', color\n ='b', marker='x')\n", (6172, 6261), True, 'import matplotlib.pyplot as plt\n'), ((6253, 6265), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6263, 6265), True, 'import matplotlib.pyplot as plt\n'), ((6266, 6297), 'matplotlib.pyplot.title', 'plt.title', (['"""MSE of prediction:"""'], {}), "('MSE of prediction:')\n", (6275, 6297), True, 'import matplotlib.pyplot as plt\n'), ((6298, 6327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration epoch"""'], {}), "('Iteration epoch')\n", (6308, 6327), True, 'import matplotlib.pyplot as plt\n'), ((6328, 6345), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSE"""'], {}), "('MSE')\n", (6338, 6345), True, 'import matplotlib.pyplot as plt\n'), ((6355, 6386), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(10, 10)'}), '(2, figsize=(10, 10))\n', (6365, 6386), True, 'import matplotlib.pyplot as plt\n'), ((6451, 6557), 'matplotlib.pyplot.plot', 'plt.plot', (['x_validation_data[:, 0]', 'y_validation_data[:, 0]'], {'label': '"""noisy data"""', 'color': '"""b"""', 'marker': '"""*"""'}), "(x_validation_data[:, 0], y_validation_data[:, 0], label=\n 'noisy data', color='b', marker='*')\n", (6459, 6557), True, 'import matplotlib.pyplot as plt\n'), ((6547, 6619), 'matplotlib.pyplot.plot', 'plt.plot', (['x_validation_data[:, 0]', 'pred_y'], {'label': '"""prediction"""', 'color': '"""r"""'}), "(x_validation_data[:, 0], pred_y, label='prediction', color='r')\n", (6555, 6619), True, 'import matplotlib.pyplot as plt\n'), ((6617, 6629), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6627, 6629), True, 'import matplotlib.pyplot as plt\n'), ((6630, 6666), 'matplotlib.pyplot.title', 'plt.title', (['"""A line fitting example:"""'], {}), "('A line fitting example:')\n", (6639, 6666), True, 'import matplotlib.pyplot as plt\n'), ((6667, 6687), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X data"""'], {}), "('X data')\n", (6677, 6687), True, 'import matplotlib.pyplot as plt\n'), ((6688, 6708), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y data"""'], {}), "('Y data')\n", (6698, 6708), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1189), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', 'xsize'], {}), '(-4, 4, xsize)\n', (1175, 1189), True, 'import numpy as np\n'), ((1269, 1287), 'numpy.sqrt', 'np.sqrt', (['noise_var'], {}), '(noise_var)\n', (1276, 1287), True, 'import numpy as np\n'), ((1290, 1324), 'numpy.random.randn', 'np.random.randn', (['xsize', 'total_size'], {}), '(xsize, total_size)\n', (1305, 1324), True, 'import numpy as np\n'), ((3657, 3696), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (3664, 3696), True, 'import tensorflow as tf\n'), ((4016, 4028), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4026, 4028), True, 'import tensorflow as tf\n'), ((2784, 2801), 'tensorflow.cos', 'tf.cos', (['(a * x + b)'], {}), '(a * x + b)\n', (2790, 2801), True, 'import tensorflow as tf\n'), ((2874, 2895), 'tensorflow.square', 'tf.square', (['(y - pred_y)'], {}), '(y - pred_y)\n', (2883, 2895), True, 'import tensorflow as tf\n'), ((3042, 3079), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (3064, 3079), True, 'import tensorflow as tf\n'), ((6404, 6442), 'numpy.cos', 'np.cos', (['(pred_a * x_data[:, 0] + pred_b)'], {}), '(pred_a * x_data[:, 0] + pred_b)\n', (6410, 6442), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Various utils that should be used during migrations and migrations tests because the AiiDA ORM cannot be used."""
import datetime
import errno
import os
import re
import numpy
from aiida.common import json
ISOFORMAT_DATETIME_REGEX = re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+(\+\d{2}:\d{2})?$')
def ensure_repository_folder_created(uuid):
"""Make sure that the repository sub folder for the node with the given UUID exists or create it.
:param uuid: UUID of the node
"""
dirpath = get_node_repository_sub_folder(uuid)
try:
os.makedirs(dirpath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def put_object_from_string(uuid, name, content):
"""Write a file with the given content in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
:param content: the content to write to the file
"""
ensure_repository_folder_created(uuid)
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath, 'w', encoding='utf-8') as handle:
handle.write(content)
def get_object_from_repository(uuid, name):
"""Return the content of a file with the given name in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
"""
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath) as handle:
return handle.read()
def get_node_repository_sub_folder(uuid):
"""Return the absolute path to the sub folder `path` within the repository of the node with the given UUID.
:param uuid: UUID of the node
:return: absolute path to node repository folder, i.e `/some/path/repository/node/12/ab/c123134-a123/path`
"""
from aiida.manage.configuration import get_profile
uuid = str(uuid)
repo_dirpath = os.path.join(get_profile().repository_path, 'repository')
node_dirpath = os.path.join(repo_dirpath, 'node', uuid[:2], uuid[2:4], uuid[4:], 'path')
return node_dirpath
def get_numpy_array_absolute_path(uuid, name):
"""Return the absolute path of a numpy array with the given name in the repository of the node with the given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
:return: the absolute path of the numpy array file
"""
return os.path.join(get_node_repository_sub_folder(uuid), name + '.npy')
def store_numpy_array_in_repository(uuid, name, array):
"""Store a numpy array in the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:param array: the numpy array to store
"""
ensure_repository_folder_created(uuid)
filepath = get_numpy_array_absolute_path(uuid, name)
with open(filepath, 'wb') as handle:
numpy.save(handle, array)
def delete_numpy_array_from_repository(uuid, name):
"""Delete the numpy array with a given name from the repository corresponding to a node with a given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
try:
os.remove(filepath)
except (IOError, OSError):
pass
def load_numpy_array_from_repository(uuid, name):
"""Load and return a numpy array from the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:return: the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
return numpy.load(filepath)
def recursive_datetime_to_isoformat(value):
"""Convert all datetime objects in the given value to string representations in ISO format.
:param value: a mapping, sequence or single value optionally containing datetime objects
"""
if isinstance(value, list):
return [recursive_datetime_to_isoformat(_) for _ in value]
if isinstance(value, dict):
return dict((key, recursive_datetime_to_isoformat(val)) for key, val in value.items())
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def dumps_json(dictionary):
"""Transforms all datetime object into isoformat and then returns the JSON."""
return json.dumps(recursive_datetime_to_isoformat(dictionary))
|
[
"os.makedirs",
"re.compile",
"aiida.manage.configuration.get_profile",
"os.path.join",
"numpy.load",
"numpy.save",
"os.remove"
] |
[((903, 991), 're.compile', 're.compile', (['"""^\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+(\\\\+\\\\d{2}:\\\\d{2})?$"""'], {}), "(\n '^\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+(\\\\+\\\\d{2}:\\\\d{2})?$')\n", (913, 991), False, 'import re\n'), ((2693, 2766), 'os.path.join', 'os.path.join', (['repo_dirpath', '"""node"""', 'uuid[:2]', 'uuid[2:4]', 'uuid[4:]', '"""path"""'], {}), "(repo_dirpath, 'node', uuid[:2], uuid[2:4], uuid[4:], 'path')\n", (2705, 2766), False, 'import os\n'), ((4339, 4359), 'numpy.load', 'numpy.load', (['filepath'], {}), '(filepath)\n', (4349, 4359), False, 'import numpy\n'), ((1237, 1257), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (1248, 1257), False, 'import os\n'), ((3596, 3621), 'numpy.save', 'numpy.save', (['handle', 'array'], {}), '(handle, array)\n', (3606, 3621), False, 'import numpy\n'), ((3954, 3973), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (3963, 3973), False, 'import os\n'), ((2629, 2642), 'aiida.manage.configuration.get_profile', 'get_profile', ([], {}), '()\n', (2640, 2642), False, 'from aiida.manage.configuration import get_profile\n')]
|
"""
matmul autotvm
[batch,in_dim] x [in_dim,out_dim]
search_matmul_config(batch,in_dim,out_dim,num_trials):
input: batch,in_dim,out_dim,num_trials
[batch,in_dim] x [in_dim,out_dim]
num_trials: num of trials, default: 1000
output: log (json format)
use autotvm to search configs for the matmul
lookup_matmul_config():
find a proper matmul config
note: trade off kernel's performance and grid & block size
launch_matmul_from_config(config):
input: config (json string)
usage:
1. use search_matmul_config(batch,in_dim,out_dim,num_trials) to search configs
2. use lookup_matmul_config() to get a proper config
3. write the config (in json format) to "matmul_config.json"
4. use launch_matmul_from_config("matmul_config.json") to print the matmul kernel code
"""
import numpy as np
import tvm
import logging
import sys
from tvm import autotvm
import topi
import json
import os
from topi.util import get_const_tuple
import tensorflow as tf
flags = tf.flags
flags.DEFINE_string("input_path", "", "path of input file")
flags.DEFINE_string("autotvm_log", "../autotvm_logs/all_tuned_tilling_dense_nn.1000.log", "path of autotvm tuning log")
flags.DEFINE_string("tvm_profile_log",
"/tmp/tvm_profile.log", "path of tvm profile")
flags.DEFINE_string("output_path", "", "path of output file")
FLAGS = flags.FLAGS
@autotvm.template
def tvm_matmul_tune_op(batch, in_dim, out_dim):
"""
autotvm tuning template
D=A*B
[batch, in_dim] x [in_dim, out_dim]
"""
A = tvm.placeholder((batch, in_dim), name='A', dtype="float32")
B = tvm.placeholder((in_dim, out_dim), name='B', dtype="float32")
k = tvm.reduce_axis((0, in_dim), name='k')
C = tvm.compute((batch, out_dim), lambda i, j: tvm.sum(
A[i, k] * B[k, j], axis=k), name='C')
cfg = autotvm.get_config()
s = tvm.create_schedule(C.op)
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
y, x = C.op.axis
k = CC.op.reduce_axis[0]
cfg.define_split('tile_k', cfg.axis(k), num_outputs=3)
ko, kt, ki = cfg['tile_k'].apply(s, CC, k)
block_x = tvm.thread_axis('blockIdx.x')
block_y = tvm.thread_axis('blockIdx.y')
thread_x = tvm.thread_axis('threadIdx.x')
thread_y = tvm.thread_axis('threadIdx.y')
cfg.define_split('tile_y', cfg.axis(y), num_outputs=4)
cfg.define_split('tile_x', cfg.axis(x), num_outputs=4)
by, tyz, ty, yi = cfg['tile_y'].apply(s, C, y)
bx, txz, tx, xi = cfg['tile_x'].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, tvm.thread_axis('vthread'))
s[C].bind(txz, tvm.thread_axis('vthread'))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage in [AL, BL]:
s[stage].compute_at(s[CC], kt)
s[stage].double_buffer()
for stage in [AA, BB]:
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg['tile_y'].size[2])
tx, xi = s[stage].split(tx, nparts=cfg['tile_x'].size[2])
_, xi = s[stage].split(xi, factor=4)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob('auto_unroll_max_step', [512, 1500])
s[C].pragma(by, 'auto_unroll_max_step', cfg['auto_unroll_max_step'].val)
s[C].pragma(by, 'unroll_explicit', False)
cfg.add_flop(batch * in_dim * out_dim * 2)
return s, [A, B, C]
def search_matmul_config(batch, in_dim, out_dim, num_trials):
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
task = autotvm.task.create(tvm_matmul_tune_op, args=(
batch, in_dim, out_dim), target='cuda')
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4)
)
op_name = "tuned_dot_op_float_%d_%d_%d" % (batch, in_dim, out_dim)
log_name = "tuned_kernels/" + op_name + ".log"
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(n_trial=num_trials, measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)])
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
print('\nBest config:')
print(best_config)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(batch, in_dim, out_dim)
func = tvm.build(s, arg_bufs, 'cuda', name='matmul')
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=(batch, in_dim)).astype("float32")
b_np = np.random.uniform(size=(in_dim, out_dim)).astype("float32")
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((batch, out_dim), dtype='float32'), ctx)
print(func.imported_modules[0].get_source()) # print kernel code
func(a, b, c)
num_flops = 2 * batch * in_dim * out_dim
num_runs = 10
timer_f = func.time_evaluator(func.entry_name, ctx, number=num_runs)
t = timer_f(a, b, c).mean
GFLOPS = num_flops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GFLOPS." %
(num_runs, t * 1e3, GFLOPS))
def lookup_matmul_config(batch, in_dim, out_dim, output_log):
op_name = "tuned_dot_op_float_%d_%d_%d" % (batch, in_dim, out_dim)
log_name = FLAGS.autotvm_log
with open(log_name, "r") as fin:
log_lines = fin.readlines()
# log_records=tvm.autotvm.record.load_from_file(log_name)
log_records_all = []
log_records = []
for line in log_lines:
line = line.rstrip('\n')
# print(line)
record_json = json.loads(line)
tm = record_json['r'][0][0]
if tm > 10000000: # filter bad configs
continue
if record_json['i'][2][0] != batch or record_json['i'][2][1] != in_dim or record_json['i'][2][2] != out_dim: # filter other configs
continue
griddim_x = record_json['i'][5]["e"][2][2][0]
if griddim_x == -1:
griddim_x = int(out_dim / record_json['i'][5]["e"][2][2][1] / record_json['i'][5]["e"][2][2][2] / record_json['i'][5]["e"][2][2][3])
griddim_y = record_json['i'][5]["e"][1][2][0]
if griddim_y == -1:
griddim_y = int(batch / record_json['i'][5]["e"][1][2][1] / record_json['i'][5]["e"][1][2][2] / record_json['i'][5]["e"][1][2][3])
record = {"time": tm,
"grid": [griddim_x, griddim_y, 1],
"block": [record_json['i'][5]["e"][2][2][2], record_json['i'][5]["e"][1][2][2], 1],
"config": line}
log_records_all.append((tm, record))
# if record["block"][0] * record["block"][1] * record["block"][2] % 32 != 0:
# continue
# if record["grid"][0] * record["grid"][1] * record["grid"][2] < 16:
# continue
opt = tm * record["grid"][0] * record["grid"][1] * record["grid"][2] * record["block"][0] * record["block"][1] * record["block"][2]
if record["block"][0] * record["block"][1] * record["block"][2] % 32 != 0:
opt = tm * record["grid"][0] * record["grid"][1] * record["grid"][2] * (record["block"][0] * record["block"][1] * record["block"][2] / 32 + 1) * 32
record.update({"opt": opt})
log_records.append((tm, record))
# print(log_records[-1])
log_records_all.sort(key=lambda item: item[0])
log_records.sort(key=lambda item: item[0])
print(op_name)
log_records_fast = log_records[0:100]
# log_records_fast = log_records
log_records = []
for i in range(len(log_records_fast)):
log_records.append((log_records_fast[i][1]["opt"], log_records_fast[i][1]))
log_records.sort(key=lambda item: item[0])
print("fastest kernel:", log_records_all[0][1]["time"], "grid:", log_records_all[0][1]["grid"], "block:", log_records_all[0][1]["block"])
# print(log_records_fast[0][1]["config"])
print("efficient kernel:",log_records[0][1]["time"], "grid:", log_records[0][1]["grid"], "block:", log_records[0][1]["block"])
with open(output_log, 'a') as fout:
fout.write(log_records[0][1]["config"] + "\n")
def launch_matmul_from_config(config_json_path):
with open(config_json_path, "r") as fin:
config = json.load(fin)
batch = config["i"][2][0]
in_dim = config["i"][2][1]
out_dim = config["i"][2][2]
# print(batch, in_dim, out_dim)
task = autotvm.task.create(
tvm_matmul_tune_op, args=(batch, in_dim, out_dim), target='cuda')
# dispatch_context = autotvm.task.ApplyConfig(config)
dispatch_context = autotvm.apply_history_best(config_json_path)
best_config = dispatch_context.query(task.target, task.workload)
print("Using pretuned config:")
print(best_config)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(batch, in_dim, out_dim)
func = tvm.build(s, arg_bufs, 'cuda', name='matmul')
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=(batch, in_dim)).astype("float32")
b_np = np.random.uniform(size=(in_dim, out_dim)).astype("float32")
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((batch, out_dim), dtype='float32'), ctx)
print(func.imported_modules[0].get_source()) # print kernel code
func(a, b, c)
num_flops = 2 * batch * in_dim * out_dim
num_runs = 10
timer_f = func.time_evaluator(func.entry_name, ctx, number=num_runs)
t = timer_f(a, b, c).mean
GFLOPS = num_flops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GFLOPS." %
(num_runs, t * 1e3, GFLOPS))
output_log_file = "matmul_nn_autotvm_select_result.log"
if os.path.exists(output_log_file):
os.remove(output_log_file)
lookup_matmul_config(4, 256, 256, output_log_file)
lookup_matmul_config(16, 256, 256, output_log_file)
def tune_dot_codegen(m, k, n, log_path):
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
task = autotvm.task.create(tvm_matmul_tune_op, args=(m, k, n), target='cuda')
op_name = "tuned_dot_nn_op_float_m%d_k%d_n%d" % (m, k, n)
# log_name = "tuned_dot_op_float_%d_%d_%d" % (m, k, n)
# log_name = "tuned_kernels/" + log_name + ".log"
log_name = log_path
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(m,k,n)
func = tvm.build(s, arg_bufs, 'cuda', name=op_name)
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=[m,k]).astype("float32")
w_np = np.random.uniform(size=[k,n]).astype("float32")
c_np = np.zeros([m,n]).astype("float32")
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
c = tvm.nd.array(c_np, ctx)
kernel_code = func.imported_modules[0].get_source()
func(a, w, c)
return kernel_code
def extract_ops_from_log():
dot_ops = []
dot_ops.append({'arg0_shape': [4, 256], 'arg1_shape': [256, 256], 'out_shape': [4, 256], 'transpose_A': False, 'transpose_B': False})
dot_ops.append({'arg0_shape': [16, 256], 'arg1_shape': [256, 256], 'out_shape': [16, 256], 'transpose_A': False, 'transpose_B': False})
return dot_ops
def get_tvm_topi_func_name(m, k, n):
func_name = "tuned_dot_nn_op_float_m%d_k%d_n%d_kernel0" % (m, k, n)
return func_name
def extract_tvm_profiling_from_log(log_path):
lines = open(log_path).readlines()
deduped_lines = list(set(lines))
# print(deduped_lines)
# print("#convs:", len(lines), "#deduped_convs:", len(deduped_lines))
profiling_result = {}
for line in deduped_lines:
items = line.rstrip('\n').split('|')
profiling_data = {
'gridDim': [int(items[1]), int(items[2]), int(items[3])],
'blockDim': [int(items[4]), int(items[5]), int(items[6])]
}
profiling_result.update({items[0]: profiling_data})
return profiling_result
def generate_db_topi_ops(dot_ops, log_path):
topi_ops = []
tvm_profiling_log_path = FLAGS.tvm_profile_log
if os.path.exists(tvm_profiling_log_path):
os.remove(tvm_profiling_log_path)
for dot_op in dot_ops:
m = dot_op['arg0_shape'][0]
k = dot_op['arg0_shape'][1]
n = dot_op['arg1_shape'][1]
topi_code = tune_dot_codegen(m, k, n, log_path)
topi_op = {
'tvm_func_name': get_tvm_topi_func_name(m, k, n),
'op_type': 'Dot',
'parameters': dot_op,
'code': topi_code
}
topi_ops.append(topi_op)
profiling_result = extract_tvm_profiling_from_log(tvm_profiling_log_path)
for topi_op in topi_ops:
tvm_func_name = topi_op['tvm_func_name']
topi_op.update(profiling_result[tvm_func_name])
return topi_ops
dot_ops = extract_ops_from_log()
topi_ops = generate_db_topi_ops(dot_ops, output_log_file)
with open(FLAGS.output_path, 'w') as fout:
json.dump(topi_ops, fout)
os.remove(output_log_file)
|
[
"logging.getLogger",
"logging.StreamHandler",
"tvm.autotvm.apply_history_best",
"tvm.context",
"os.remove",
"tvm.autotvm.tuner.XGBTuner",
"os.path.exists",
"tvm.create_schedule",
"tvm.autotvm.LocalRunner",
"tvm.target.create",
"tvm.autotvm.get_config",
"tvm.nd.array",
"json.loads",
"tvm.sum",
"tvm.reduce_axis",
"tvm.autotvm.task.create",
"tvm.placeholder",
"tvm.autotvm.LocalBuilder",
"tvm.thread_axis",
"numpy.zeros",
"tvm.build",
"tvm.autotvm.callback.log_to_file",
"numpy.random.uniform",
"json.load",
"json.dump"
] |
[((10268, 10299), 'os.path.exists', 'os.path.exists', (['output_log_file'], {}), '(output_log_file)\n', (10282, 10299), False, 'import os\n'), ((13700, 13726), 'os.remove', 'os.remove', (['output_log_file'], {}), '(output_log_file)\n', (13709, 13726), False, 'import os\n'), ((1554, 1613), 'tvm.placeholder', 'tvm.placeholder', (['(batch, in_dim)'], {'name': '"""A"""', 'dtype': '"""float32"""'}), "((batch, in_dim), name='A', dtype='float32')\n", (1569, 1613), False, 'import tvm\n'), ((1622, 1683), 'tvm.placeholder', 'tvm.placeholder', (['(in_dim, out_dim)'], {'name': '"""B"""', 'dtype': '"""float32"""'}), "((in_dim, out_dim), name='B', dtype='float32')\n", (1637, 1683), False, 'import tvm\n'), ((1692, 1730), 'tvm.reduce_axis', 'tvm.reduce_axis', (['(0, in_dim)'], {'name': '"""k"""'}), "((0, in_dim), name='k')\n", (1707, 1730), False, 'import tvm\n'), ((1848, 1868), 'tvm.autotvm.get_config', 'autotvm.get_config', ([], {}), '()\n', (1866, 1868), False, 'from tvm import autotvm\n'), ((1877, 1902), 'tvm.create_schedule', 'tvm.create_schedule', (['C.op'], {}), '(C.op)\n', (1896, 1902), False, 'import tvm\n'), ((2271, 2300), 'tvm.thread_axis', 'tvm.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (2286, 2300), False, 'import tvm\n'), ((2315, 2344), 'tvm.thread_axis', 'tvm.thread_axis', (['"""blockIdx.y"""'], {}), "('blockIdx.y')\n", (2330, 2344), False, 'import tvm\n'), ((2360, 2390), 'tvm.thread_axis', 'tvm.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (2375, 2390), False, 'import tvm\n'), ((2406, 2436), 'tvm.thread_axis', 'tvm.thread_axis', (['"""threadIdx.y"""'], {}), "('threadIdx.y')\n", (2421, 2436), False, 'import tvm\n'), ((4031, 4120), 'tvm.autotvm.task.create', 'autotvm.task.create', (['tvm_matmul_tune_op'], {'args': '(batch, in_dim, out_dim)', 'target': '"""cuda"""'}), "(tvm_matmul_tune_op, args=(batch, in_dim, out_dim),\n target='cuda')\n", (4050, 4120), False, 'from tvm import autotvm\n'), ((4456, 4484), 'tvm.autotvm.tuner.XGBTuner', 'autotvm.tuner.XGBTuner', (['task'], {}), '(task)\n', (4478, 4484), False, 'from tvm import autotvm\n'), ((4642, 4678), 'tvm.autotvm.apply_history_best', 'autotvm.apply_history_best', (['log_name'], {}), '(log_name)\n', (4668, 4678), False, 'from tvm import autotvm\n'), ((5012, 5034), 'tvm.context', 'tvm.context', (['"""cuda"""', '(0)'], {}), "('cuda', 0)\n", (5023, 5034), False, 'import tvm\n'), ((5185, 5208), 'tvm.nd.array', 'tvm.nd.array', (['a_np', 'ctx'], {}), '(a_np, ctx)\n', (5197, 5208), False, 'import tvm\n'), ((5217, 5240), 'tvm.nd.array', 'tvm.nd.array', (['b_np', 'ctx'], {}), '(b_np, ctx)\n', (5229, 5240), False, 'import tvm\n'), ((8942, 9031), 'tvm.autotvm.task.create', 'autotvm.task.create', (['tvm_matmul_tune_op'], {'args': '(batch, in_dim, out_dim)', 'target': '"""cuda"""'}), "(tvm_matmul_tune_op, args=(batch, in_dim, out_dim),\n target='cuda')\n", (8961, 9031), False, 'from tvm import autotvm\n'), ((9118, 9162), 'tvm.autotvm.apply_history_best', 'autotvm.apply_history_best', (['config_json_path'], {}), '(config_json_path)\n', (9144, 9162), False, 'from tvm import autotvm\n'), ((9504, 9526), 'tvm.context', 'tvm.context', (['"""cuda"""', '(0)'], {}), "('cuda', 0)\n", (9515, 9526), False, 'import tvm\n'), ((9677, 9700), 'tvm.nd.array', 'tvm.nd.array', (['a_np', 'ctx'], {}), '(a_np, ctx)\n', (9689, 9700), False, 'import tvm\n'), ((9709, 9732), 'tvm.nd.array', 'tvm.nd.array', (['b_np', 'ctx'], {}), '(b_np, ctx)\n', (9721, 9732), False, 'import tvm\n'), ((10305, 10331), 'os.remove', 'os.remove', (['output_log_file'], {}), '(output_log_file)\n', (10314, 10331), False, 'import os\n'), ((10628, 10698), 'tvm.autotvm.task.create', 'autotvm.task.create', (['tvm_matmul_tune_op'], {'args': '(m, k, n)', 'target': '"""cuda"""'}), "(tvm_matmul_tune_op, args=(m, k, n), target='cuda')\n", (10647, 10698), False, 'from tvm import autotvm\n'), ((10928, 10964), 'tvm.autotvm.apply_history_best', 'autotvm.apply_history_best', (['log_name'], {}), '(log_name)\n', (10954, 10964), False, 'from tvm import autotvm\n'), ((11229, 11251), 'tvm.context', 'tvm.context', (['"""cuda"""', '(0)'], {}), "('cuda', 0)\n", (11240, 11251), False, 'import tvm\n'), ((11425, 11448), 'tvm.nd.array', 'tvm.nd.array', (['a_np', 'ctx'], {}), '(a_np, ctx)\n', (11437, 11448), False, 'import tvm\n'), ((11457, 11480), 'tvm.nd.array', 'tvm.nd.array', (['w_np', 'ctx'], {}), '(w_np, ctx)\n', (11469, 11480), False, 'import tvm\n'), ((11489, 11512), 'tvm.nd.array', 'tvm.nd.array', (['c_np', 'ctx'], {}), '(c_np, ctx)\n', (11501, 11512), False, 'import tvm\n'), ((12805, 12843), 'os.path.exists', 'os.path.exists', (['tvm_profiling_log_path'], {}), '(tvm_profiling_log_path)\n', (12819, 12843), False, 'import os\n'), ((13673, 13698), 'json.dump', 'json.dump', (['topi_ops', 'fout'], {}), '(topi_ops, fout)\n', (13682, 13698), False, 'import json\n'), ((2733, 2759), 'tvm.thread_axis', 'tvm.thread_axis', (['"""vthread"""'], {}), "('vthread')\n", (2748, 2759), False, 'import tvm\n'), ((2780, 2806), 'tvm.thread_axis', 'tvm.thread_axis', (['"""vthread"""'], {}), "('vthread')\n", (2795, 2806), False, 'import tvm\n'), ((3985, 4018), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (4006, 4018), False, 'import logging\n'), ((5262, 5305), 'numpy.zeros', 'np.zeros', (['(batch, out_dim)'], {'dtype': '"""float32"""'}), "((batch, out_dim), dtype='float32')\n", (5270, 5305), True, 'import numpy as np\n'), ((6165, 6181), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6175, 6181), False, 'import json\n'), ((8784, 8798), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (8793, 8798), False, 'import json\n'), ((9754, 9797), 'numpy.zeros', 'np.zeros', (['(batch, out_dim)'], {'dtype': '"""float32"""'}), "((batch, out_dim), dtype='float32')\n", (9762, 9797), True, 'import numpy as np\n'), ((10582, 10615), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (10603, 10615), False, 'import logging\n'), ((12853, 12886), 'os.remove', 'os.remove', (['tvm_profiling_log_path'], {}), '(tvm_profiling_log_path)\n', (12862, 12886), False, 'import os\n'), ((1782, 1816), 'tvm.sum', 'tvm.sum', (['(A[i, k] * B[k, j])'], {'axis': 'k'}), '(A[i, k] * B[k, j], axis=k)\n', (1789, 1816), False, 'import tvm\n'), ((3888, 3916), 'logging.getLogger', 'logging.getLogger', (['"""autotvm"""'], {}), "('autotvm')\n", (3905, 3916), False, 'import logging\n'), ((3945, 3973), 'logging.getLogger', 'logging.getLogger', (['"""autotvm"""'], {}), "('autotvm')\n", (3962, 3973), False, 'import logging\n'), ((4216, 4238), 'tvm.autotvm.LocalBuilder', 'autotvm.LocalBuilder', ([], {}), '()\n', (4236, 4238), False, 'from tvm import autotvm\n'), ((4255, 4314), 'tvm.autotvm.LocalRunner', 'autotvm.LocalRunner', ([], {'repeat': '(3)', 'min_repeat_ms': '(100)', 'timeout': '(4)'}), '(repeat=3, min_repeat_ms=100, timeout=4)\n', (4274, 4314), False, 'from tvm import autotvm\n'), ((4840, 4865), 'tvm.target.create', 'tvm.target.create', (['"""cuda"""'], {}), "('cuda')\n", (4857, 4865), False, 'import tvm\n'), ((4955, 5000), 'tvm.build', 'tvm.build', (['s', 'arg_bufs', '"""cuda"""'], {'name': '"""matmul"""'}), "(s, arg_bufs, 'cuda', name='matmul')\n", (4964, 5000), False, 'import tvm\n'), ((5047, 5086), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(batch, in_dim)'}), '(size=(batch, in_dim))\n', (5064, 5086), True, 'import numpy as np\n'), ((5116, 5157), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(in_dim, out_dim)'}), '(size=(in_dim, out_dim))\n', (5133, 5157), True, 'import numpy as np\n'), ((9332, 9357), 'tvm.target.create', 'tvm.target.create', (['"""cuda"""'], {}), "('cuda')\n", (9349, 9357), False, 'import tvm\n'), ((9447, 9492), 'tvm.build', 'tvm.build', (['s', 'arg_bufs', '"""cuda"""'], {'name': '"""matmul"""'}), "(s, arg_bufs, 'cuda', name='matmul')\n", (9456, 9492), False, 'import tvm\n'), ((9539, 9578), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(batch, in_dim)'}), '(size=(batch, in_dim))\n', (9556, 9578), True, 'import numpy as np\n'), ((9608, 9649), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(in_dim, out_dim)'}), '(size=(in_dim, out_dim))\n', (9625, 9649), True, 'import numpy as np\n'), ((10485, 10513), 'logging.getLogger', 'logging.getLogger', (['"""autotvm"""'], {}), "('autotvm')\n", (10502, 10513), False, 'import logging\n'), ((10542, 10570), 'logging.getLogger', 'logging.getLogger', (['"""autotvm"""'], {}), "('autotvm')\n", (10559, 10570), False, 'import logging\n'), ((11075, 11100), 'tvm.target.create', 'tvm.target.create', (['"""cuda"""'], {}), "('cuda')\n", (11092, 11100), False, 'import tvm\n'), ((11173, 11217), 'tvm.build', 'tvm.build', (['s', 'arg_bufs', '"""cuda"""'], {'name': 'op_name'}), "(s, arg_bufs, 'cuda', name=op_name)\n", (11182, 11217), False, 'import tvm\n'), ((11264, 11294), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[m, k]'}), '(size=[m, k])\n', (11281, 11294), True, 'import numpy as np\n'), ((11323, 11353), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[k, n]'}), '(size=[k, n])\n', (11340, 11353), True, 'import numpy as np\n'), ((11382, 11398), 'numpy.zeros', 'np.zeros', (['[m, n]'], {}), '([m, n])\n', (11390, 11398), True, 'import numpy as np\n'), ((4577, 4615), 'tvm.autotvm.callback.log_to_file', 'autotvm.callback.log_to_file', (['log_name'], {}), '(log_name)\n', (4605, 4615), False, 'from tvm import autotvm\n')]
|
#!/usr/bin/env python
#
# This file is part of the Emotions project. The complete source code is
# available at https://github.com/luigivieira/emotions.
#
# Copyright (c) 2016-2017, <NAME> (http://www.luiz.vieira.nom.br)
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import argparse
import cv2
import numpy as np
from collections import OrderedDict
from datetime import datetime, timedelta
from faces import FaceDetector
from data import FaceData
from gabor import GaborBank
from emotions import EmotionsDetector
#---------------------------------------------
class VideoData:
"""
Helper class to present the detected face region, landmarks and emotions.
"""
#-----------------------------------------
def __init__(self):
"""
Class constructor.
"""
self._faceDet = FaceDetector()
'''
The instance of the face detector.
'''
self._bank = GaborBank()
'''
The instance of the bank of Gabor filters.
'''
self._emotionsDet = EmotionsDetector()
'''
The instance of the emotions detector.
'''
self._face = FaceData()
'''
Data of the last face detected.
'''
self._emotions = OrderedDict()
'''
Data of the last emotions detected.
'''
#-----------------------------------------
def detect(self, frame):
"""
Detects a face and the prototypic emotions on the given frame image.
Parameters
----------
frame: numpy.ndarray
Image where to perform the detections from.
Returns
-------
ret: bool
Indication of success or failure.
"""
ret, face = self._faceDet.detect(frame)
if ret:
self._face = face
# Crop just the face region
frame, face = face.crop(frame)
# Filter it with the Gabor bank
responses = self._bank.filter(frame)
# Detect the prototypic emotions based on the filter responses
self._emotions = self._emotionsDet.detect(face, responses)
return True
else:
self._face = None
return False
#-----------------------------------------
def draw(self, frame):
"""
Draws the detected data of the given frame image.
Parameters
----------
frame: numpy.ndarray
Image where to draw the information to.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
white = (255, 255, 255)
yellow = (0, 255, 255)
red = (0, 0, 255)
empty = True
# Plot the face landmarks and face distance
x = 5
y = 0
w = int(frame.shape[1]* 0.2)
try:
face = self._face
empty = face.isEmpty()
face.draw(frame)
except:
pass
# Plot the emotion probabilities
try:
emotions = self._emotions
if empty:
labels = []
values = []
else:
labels = list(emotions.keys())
values = list(emotions.values())
bigger = labels[values.index(max(values))]
# Draw the header
text = 'emotions'
size, _ = cv2.getTextSize(text, font, scale, thick)
y += size[1] + 20
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
y += 5
cv2.line(frame, (x,y), (x+w,y), black, 1)
size, _ = cv2.getTextSize('happiness', font, scale, thick)
t = size[0] + 20
w = 150
h = size[1]
for l, v in zip(labels, values):
lab = '{}:'.format(l)
val = '{:.2f}'.format(v)
size, _ = cv2.getTextSize(l, font, scale, thick)
# Set a red color for the emotion with bigger probability
color = red if l == bigger else yellow
y += size[1] + 15
p1 = (x+t, y-size[1]-5)
p2 = (x+t+w, y-size[1]+h+5)
cv2.rectangle(frame, p1, p2, black, 1)
# Draw the filled rectangle proportional to the probability
p2 = (p1[0] + int((p2[0] - p1[0]) * v), p2[1])
cv2.rectangle(frame, p1, p2, color, -1)
cv2.rectangle(frame, p1, p2, black, 1)
# Draw the emotion label
cv2.putText(frame, lab, (x, y), font, scale, black, glow)
cv2.putText(frame, lab, (x, y), font, scale, color, thick)
# Draw the value of the emotion probability
cv2.putText(frame, val, (x+t+5, y), font, scale, black, glow)
cv2.putText(frame, val, (x+t+5, y), font, scale, white, thick)
except Exception as e:
print(e)
pass
#---------------------------------------------
def main(argv):
"""
Main entry of this script.
Parameters
------
argv: list of str
Arguments received from the command line.
"""
# Parse the command line
args = parseCommandLine(argv)
# Loads the video or starts the webcam
if args.source == 'cam':
video = cv2.VideoCapture(args.id)
if not video.isOpened():
print('Error opening webcam of id {}'.format(args.id))
sys.exit(-1)
fps = 0
frameCount = 0
sourceName = 'Webcam #{}'.format(args.id)
else:
video = cv2.VideoCapture(args.file)
if not video.isOpened():
print('Error opening video file {}'.format(args.file))
sys.exit(-1)
fps = int(video.get(cv2.CAP_PROP_FPS))
frameCount = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
sourceName = args.file
# Force HD resolution (if the video was not recorded in this resolution or
# if the camera does not support it, the frames will be stretched to fit it)
# The intention is just to standardize the input (and make the help window
# work as intended)
video.set(cv2.CAP_PROP_FRAME_WIDTH, 1280);
video.set(cv2.CAP_PROP_FRAME_HEIGHT, 720);
# Create the helper class
data = VideoData()
# Text settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1
thick = 1
glow = 3 * thick
# Color settings
color = (255, 255, 255)
paused = False
frameNum = 0
# Process the video input
while True:
if not paused:
start = datetime.now()
ret, img = video.read()
if ret:
frame = img.copy()
else:
paused = True
drawInfo(frame, frameNum, frameCount, paused, fps, args.source)
data.detect(frame)
data.draw(frame)
cv2.imshow(sourceName, frame)
if paused:
key = cv2.waitKey(0)
else:
end = datetime.now()
delta = (end - start)
if fps != 0:
delay = int(max(1, ((1 / fps) - delta.total_seconds()) * 1000))
else:
delay = 1
key = cv2.waitKey(delay)
if key == ord('q') or key == ord('Q') or key == 27:
break
elif key == ord('p') or key == ord('P'):
paused = not paused
elif args.source == 'video' and (key == ord('r') or key == ord('R')):
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and paused and key == 2424832: # Left key
frameNum -= 1
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and paused and key == 2555904: # Right key
frameNum += 1
if frameNum >= frameCount:
frameNum = frameCount - 1
elif args.source == 'video' and key == 2162688: # Pageup key
frameNum -= (fps * 10)
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and key == 2228224: # Pagedown key
frameNum += (fps * 10)
if frameNum >= frameCount:
frameNum = frameCount - 1
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif key == 7340032: # F1
showHelp(sourceName, frame.shape)
if not paused:
frameNum += 1
video.release()
cv2.destroyAllWindows()
#---------------------------------------------
def drawInfo(frame, frameNum, frameCount, paused, fps, source):
"""
Draws text info related to the given frame number into the frame image.
Parameters
----------
image: numpy.ndarray
Image data where to draw the text info.
frameNum: int
Number of the frame of which to drawn the text info.
frameCount: int
Number total of frames in the video.
paused: bool
Indication if the video is paused or not.
fps: int
Frame rate (in frames per second) of the video for time calculation.
source: str
Source of the input images (either "video" or "cam").
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
yellow = (0, 255, 255)
# Print the current frame number and timestamp
if source == 'video':
text = 'Frame: {:d}/{:d} {}'.format(frameNum, frameCount - 1,
'(paused)' if paused else '')
else:
text = 'Frame: {:d} {}'.format(frameNum, '(paused)' if paused else '')
size, _ = cv2.getTextSize(text, font, scale, thick)
x = 5
y = frame.shape[0] - 2 * size[1]
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
if source == 'video':
timestamp = datetime.min + timedelta(seconds=(frameNum / fps))
elapsedTime = datetime.strftime(timestamp, '%H:%M:%S')
timestamp = datetime.min + timedelta(seconds=(frameCount / fps))
totalTime = datetime.strftime(timestamp, '%H:%M:%S')
text = 'Time: {}/{}'.format(elapsedTime, totalTime)
size, _ = cv2.getTextSize(text, font, scale, thick)
y = frame.shape[0] - 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
# Print the help message
text = 'Press F1 for help'
size, _ = cv2.getTextSize(text, font, scale, thick)
x = frame.shape[1] - size[0] - 5
y = frame.shape[0] - size[1] + 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
#---------------------------------------------
def showHelp(windowTitle, shape):
"""
Displays an image with helping text.
Parameters
----------
windowTitle: str
Title of the window where to display the help
shape: tuple
Height and width of the window to create the help image.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1.0
thick = 1
# Color settings
black = (0, 0, 0)
red = (0, 0, 255)
# Create the background image
image = np.ones((shape[0], shape[1], 3)) * 255
# The help text is printed in one line per item in this list
helpText = [
'Controls:',
'-----------------------------------------------',
'[q] or [ESC]: quits from the application.',
'[p]: toggles paused/playing the video/webcam input.',
'[r]: restarts the video playback (video input only).',
'[left/right arrow]: displays the previous/next frame (video input only).',
'[page-up/down]: rewinds/fast forwards by 10 seconds (video input only).',
' ',
' ',
'Press any key to close this window...'
]
# Print the controls help text
xCenter = image.shape[1] // 2
yCenter = image.shape[0] // 2
margin = 20 # between-lines margin in pixels
textWidth = 0
textHeight = margin * (len(helpText) - 1)
lineHeight = 0
for line in helpText:
size, _ = cv2.getTextSize(line, font, scale, thick)
textHeight += size[1]
textWidth = size[0] if size[0] > textWidth else textWidth
lineHeight = size[1] if size[1] > lineHeight else lineHeight
x = xCenter - textWidth // 2
y = yCenter - textHeight // 2
for line in helpText:
cv2.putText(image, line, (x, y), font, scale, black, thick * 3)
cv2.putText(image, line, (x, y), font, scale, red, thick)
y += margin + lineHeight
# Show the image and wait for a key press
cv2.imshow(windowTitle, image)
cv2.waitKey(0)
#---------------------------------------------
def parseCommandLine(argv):
"""
Parse the command line of this utility application.
This function uses the argparse package to handle the command line
arguments. In case of command line errors, the application will be
automatically terminated.
Parameters
------
argv: list of str
Arguments received from the command line.
Returns
------
object
Object with the parsed arguments as attributes (refer to the
documentation of the argparse package for details)
"""
parser = argparse.ArgumentParser(description='Tests the face and emotion '
'detector on a video file input.')
parser.add_argument('source', nargs='?', const='Yes',
choices=['video', 'cam'], default='cam',
help='Indicate the source of the input images for '
'the detectors: "video" for a video file or '
'"cam" for a webcam. The default is "cam".')
parser.add_argument('-f', '--file', metavar='<name>',
help='Name of the video file to use, if the source is '
'"video". The supported formats depend on the codecs '
'installed in the operating system.')
parser.add_argument('-i', '--id', metavar='<number>', default=0, type=int,
help='Numerical id of the webcam to use, if the source '
'is "cam". The default is 0.')
args = parser.parse_args()
if args.source == 'video' and args.file is None:
parser.error('-f is required when source is "video"')
return args
#---------------------------------------------
# namespace verification for invoking main
#---------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"gabor.GaborBank",
"cv2.rectangle",
"cv2.imshow",
"cv2.destroyAllWindows",
"sys.exit",
"datetime.timedelta",
"faces.FaceDetector",
"argparse.ArgumentParser",
"cv2.line",
"data.FaceData",
"cv2.waitKey",
"collections.OrderedDict",
"numpy.ones",
"cv2.putText",
"cv2.getTextSize",
"datetime.datetime.now",
"cv2.VideoCapture",
"emotions.EmotionsDetector",
"datetime.datetime.strftime"
] |
[((9815, 9838), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9836, 9838), False, 'import cv2\n'), ((11030, 11071), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'scale', 'thick'], {}), '(text, font, scale, thick)\n', (11045, 11071), False, 'import cv2\n'), ((11123, 11181), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, text, (x, y), font, scale, black, glow)\n', (11134, 11181), False, 'import cv2\n'), ((11186, 11246), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'yellow', 'thick'], {}), '(frame, text, (x, y), font, scale, yellow, thick)\n', (11197, 11246), False, 'import cv2\n'), ((11905, 11946), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'scale', 'thick'], {}), '(text, font, scale, thick)\n', (11920, 11946), False, 'import cv2\n'), ((12025, 12083), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, text, (x, y), font, scale, black, glow)\n', (12036, 12083), False, 'import cv2\n'), ((12088, 12148), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'yellow', 'thick'], {}), '(frame, text, (x, y), font, scale, yellow, thick)\n', (12099, 12148), False, 'import cv2\n'), ((14070, 14100), 'cv2.imshow', 'cv2.imshow', (['windowTitle', 'image'], {}), '(windowTitle, image)\n', (14080, 14100), False, 'import cv2\n'), ((14105, 14119), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (14116, 14119), False, 'import cv2\n'), ((14717, 14819), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tests the face and emotion detector on a video file input."""'}), "(description=\n 'Tests the face and emotion detector on a video file input.')\n", (14740, 14819), False, 'import argparse\n'), ((1860, 1874), 'faces.FaceDetector', 'FaceDetector', ([], {}), '()\n', (1872, 1874), False, 'from faces import FaceDetector\n'), ((1964, 1975), 'gabor.GaborBank', 'GaborBank', ([], {}), '()\n', (1973, 1975), False, 'from gabor import GaborBank\n'), ((2080, 2098), 'emotions.EmotionsDetector', 'EmotionsDetector', ([], {}), '()\n', (2096, 2098), False, 'from emotions import EmotionsDetector\n'), ((2192, 2202), 'data.FaceData', 'FaceData', ([], {}), '()\n', (2200, 2202), False, 'from data import FaceData\n'), ((2293, 2306), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2304, 2306), False, 'from collections import OrderedDict\n'), ((6594, 6619), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.id'], {}), '(args.id)\n', (6610, 6619), False, 'import cv2\n'), ((6861, 6888), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.file'], {}), '(args.file)\n', (6877, 6888), False, 'import cv2\n'), ((8121, 8150), 'cv2.imshow', 'cv2.imshow', (['sourceName', 'frame'], {}), '(sourceName, frame)\n', (8131, 8150), False, 'import cv2\n'), ((11367, 11407), 'datetime.datetime.strftime', 'datetime.strftime', (['timestamp', '"""%H:%M:%S"""'], {}), "(timestamp, '%H:%M:%S')\n", (11384, 11407), False, 'from datetime import datetime, timedelta\n'), ((11501, 11541), 'datetime.datetime.strftime', 'datetime.strftime', (['timestamp', '"""%H:%M:%S"""'], {}), "(timestamp, '%H:%M:%S')\n", (11518, 11541), False, 'from datetime import datetime, timedelta\n'), ((11621, 11662), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'scale', 'thick'], {}), '(text, font, scale, thick)\n', (11636, 11662), False, 'import cv2\n'), ((11702, 11760), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, text, (x, y), font, scale, black, glow)\n', (11713, 11760), False, 'import cv2\n'), ((11769, 11829), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'yellow', 'thick'], {}), '(frame, text, (x, y), font, scale, yellow, thick)\n', (11780, 11829), False, 'import cv2\n'), ((12676, 12708), 'numpy.ones', 'np.ones', (['(shape[0], shape[1], 3)'], {}), '((shape[0], shape[1], 3))\n', (12683, 12708), True, 'import numpy as np\n'), ((13546, 13587), 'cv2.getTextSize', 'cv2.getTextSize', (['line', 'font', 'scale', 'thick'], {}), '(line, font, scale, thick)\n', (13561, 13587), False, 'import cv2\n'), ((13856, 13919), 'cv2.putText', 'cv2.putText', (['image', 'line', '(x, y)', 'font', 'scale', 'black', '(thick * 3)'], {}), '(image, line, (x, y), font, scale, black, thick * 3)\n', (13867, 13919), False, 'import cv2\n'), ((13928, 13985), 'cv2.putText', 'cv2.putText', (['image', 'line', '(x, y)', 'font', 'scale', 'red', 'thick'], {}), '(image, line, (x, y), font, scale, red, thick)\n', (13939, 13985), False, 'import cv2\n'), ((4886, 4934), 'cv2.getTextSize', 'cv2.getTextSize', (['"""happiness"""', 'font', 'scale', 'thick'], {}), "('happiness', font, scale, thick)\n", (4901, 4934), False, 'import cv2\n'), ((6732, 6744), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (6740, 6744), False, 'import sys\n'), ((7001, 7013), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (7009, 7013), False, 'import sys\n'), ((7851, 7865), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7863, 7865), False, 'from datetime import datetime, timedelta\n'), ((8189, 8203), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8200, 8203), False, 'import cv2\n'), ((8236, 8250), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8248, 8250), False, 'from datetime import datetime, timedelta\n'), ((8453, 8471), 'cv2.waitKey', 'cv2.waitKey', (['delay'], {}), '(delay)\n', (8464, 8471), False, 'import cv2\n'), ((11309, 11342), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(frameNum / fps)'}), '(seconds=frameNum / fps)\n', (11318, 11342), False, 'from datetime import datetime, timedelta\n'), ((11443, 11478), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(frameCount / fps)'}), '(seconds=frameCount / fps)\n', (11452, 11478), False, 'from datetime import datetime, timedelta\n'), ((4552, 4593), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'scale', 'thick'], {}), '(text, font, scale, thick)\n', (4567, 4593), False, 'import cv2\n'), ((4645, 4703), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, text, (x, y), font, scale, black, glow)\n', (4656, 4703), False, 'import cv2\n'), ((4720, 4780), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y)', 'font', 'scale', 'yellow', 'thick'], {}), '(frame, text, (x, y), font, scale, yellow, thick)\n', (4731, 4780), False, 'import cv2\n'), ((4821, 4866), 'cv2.line', 'cv2.line', (['frame', '(x, y)', '(x + w, y)', 'black', '(1)'], {}), '(frame, (x, y), (x + w, y), black, 1)\n', (4829, 4866), False, 'import cv2\n'), ((5158, 5196), 'cv2.getTextSize', 'cv2.getTextSize', (['l', 'font', 'scale', 'thick'], {}), '(l, font, scale, thick)\n', (5173, 5196), False, 'import cv2\n'), ((5463, 5501), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'p1', 'p2', 'black', '(1)'], {}), '(frame, p1, p2, black, 1)\n', (5476, 5501), False, 'import cv2\n'), ((5658, 5697), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'p1', 'p2', 'color', '(-1)'], {}), '(frame, p1, p2, color, -1)\n', (5671, 5697), False, 'import cv2\n'), ((5714, 5752), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'p1', 'p2', 'black', '(1)'], {}), '(frame, p1, p2, black, 1)\n', (5727, 5752), False, 'import cv2\n'), ((5811, 5868), 'cv2.putText', 'cv2.putText', (['frame', 'lab', '(x, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, lab, (x, y), font, scale, black, glow)\n', (5822, 5868), False, 'import cv2\n'), ((5885, 5943), 'cv2.putText', 'cv2.putText', (['frame', 'lab', '(x, y)', 'font', 'scale', 'color', 'thick'], {}), '(frame, lab, (x, y), font, scale, color, thick)\n', (5896, 5943), False, 'import cv2\n'), ((6021, 6086), 'cv2.putText', 'cv2.putText', (['frame', 'val', '(x + t + 5, y)', 'font', 'scale', 'black', 'glow'], {}), '(frame, val, (x + t + 5, y), font, scale, black, glow)\n', (6032, 6086), False, 'import cv2\n'), ((6099, 6165), 'cv2.putText', 'cv2.putText', (['frame', 'val', '(x + t + 5, y)', 'font', 'scale', 'white', 'thick'], {}), '(frame, val, (x + t + 5, y), font, scale, white, thick)\n', (6110, 6165), False, 'import cv2\n')]
|
"""Semi continuous unit operations.
Unit operations that accept constant or box-shaped flow rate profile
and provide periodic flow rate profile.
"""
__all__ = ['AlternatingChromatography', 'ACC', 'PCC', 'PCCWithWashDesorption']
__version__ = '0.7.1'
__author__ = '<NAME>'
import typing as _typing
import numpy as _np
import scipy.interpolate as _interp
from bio_rtd.chromatography import bt_load as _bt_load
import bio_rtd.utils as _utils
import bio_rtd.core as _core
import bio_rtd.pdf as _pdf
class AlternatingChromatography(_core.UnitOperation):
"""Simulation of alternating chromatography.
This class implements logic common to various types of alternating
chromatography. It has a role of a base class for
specific types of alternating chromatography to extend.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "AC".
Notes
-----
**Quick description of which attributes are available:**
Non-binding species (optional):
* :attr:`non_binding_species`
Column volume (exactly one required):
* :attr:`cv`
* :attr:`ft_mean_retentate` and :attr:`column_porosity_retentate`
Column porosity for binding species (required in case of
:attr:`ft_mean_retentate` or wash or load recycling):
* :attr:`column_porosity_retentate`
Equilibration step duration (optional, if both, the values are
added together):
* :attr:`equilibration_cv`
* :attr:`equilibration_t`
Equilibration step flow rate (exactly one needed):
* :attr:`equilibration_f` - absolute, has priority if defined
* :attr:`equilibration_f_rel` - relative, default = 1
Load step duration:
* :attr:`load_cv` - preferred
* :attr:`load_c_end_ss` - concentration limit for breakthrough; also
requires :attr:`load_recycle_pdf`
* :attr:`load_c_end_relative_ss` - concentration limit for
breakthrough relative to steady-state load concentration; also
requires :attr:`load_recycle_pdf`
Iterative optimization of estimation of load step duration
(ignored if :attr:`load_cv` is defined):
* :attr:`load_c_end_estimate_with_iterative_solver` - default = True
* :attr:`load_c_end_estimate_with_iter_solver_max_iter` - default =
1000
Extension of first load step (optional; ignored if no recycling):
* :attr:`load_extend_first_cycle` - default = `False`
* :attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t` - added together if both defined
Load linear velocity - only for column height determination
(optional):
* :attr:`load_target_lin_velocity`
Wash step duration (optional, if both, the values are
added together):
* :attr:`wash_cv`
* :attr:`wash_t`
Wash step flow rate (exactly one needed):
* :attr:`wash_f` - absolute, has priority if defined
* :attr:`wash_f_rel` - relative, default = 1
Unaccounted losses - applied before peak cut (optional):
* :attr:`unaccounted_losses_rel` - relative, default = 1
Elution step duration (optional, if both, the values are
added together):
* :attr:`elution_cv`
* :attr:`elution_t`
Elution step flow rate (exactly one needed):
* :attr:`elution_f` - absolute, has priority if defined
* :attr:`elution_f_rel` - relative, default = 1
Elution buffer composition (optional):
* :attr:`elution_buffer_c`
Elution peak position duration - first momentum
(optional, if both, the values are added together):
* :attr:`elution_peak_position_cv`
* :attr:`elution_peak_position_t`
Elution peak cut start (one is required):
* :attr:`elution_peak_cut_start_t`
* :attr:`elution_peak_cut_start_cv`
* :attr:`elution_peak_cut_start_c_rel_to_peak_max`
* :attr:`elution_peak_cut_start_peak_area_share`
Elution peak cut end (one is required):
* :attr:`elution_peak_cut_end_t`
* :attr:`elution_peak_cut_end_cv`
* :attr:`elution_peak_cut_end_c_rel_to_peak_max`
* :attr:`elution_peak_cut_end_peak_area_share`
Regeneration step duration (optional, if both, the values are
added together):
* :attr:`regeneration_cv`
* :attr:`regeneration_t`
Regeneration step flow rate (exactly one needed):
* :attr:`regeneration_f` - absolute, has priority if defined
* :attr:`regeneration_f_rel` - relative, default = 1
Wash desorption (optional, also check if class supports it):
* :attr:`wash_desorption` - default = `False`
Load breakthrough recycle (optional):
* :attr:`load_recycle` - default = `False`
Load breakthrough propagation dynamics
(required if :attr:`load_recycle` is `True`
or :attr:`load_c_end_ss` is defined or
or :attr:`load_c_end_relative_ss` is defined):
* :attr:`load_recycle_pdf`
Wash recycle (optional):
* :attr:`wash_recycle` - default = `False`
Duration of wash recycling
(optional; ignored if :attr:`wash_recycle` is `False`):
* :attr:`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t` - summed together if both defined.
* Entire wash step if
:attr:`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t` are not defined.
Please note that subclasses might introduce new attributes or change
the default values of existing attributes.
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
peak_shape_pdf: _core.PDF,
gui_title: str = "AC"):
super().__init__(t, uo_id, gui_title)
# Bind parameters.
self.load_bt: _core.ChromatographyLoadBreakthrough = load_bt
"""Determines what part of load material binds to the column."""
self.elution_peak_shape: _core.PDF = peak_shape_pdf
"""Elution peak shape."""
self.non_binding_species: _typing.Sequence[int] = []
"""Process buffer species that are NOT binding to the column.
Indexing starts with 0.
"""
self.cv: float = -1
"""Column volume.
Column volume should be defined by exactly one of the following
attribute groups:
* :attr:`cv` (this one)
* :attr:`ft_mean_retentate`
and :attr:`column_porosity_retentate`
"""
self.ft_mean_retentate: float = -1
"""Flow-through time of retentate under non-binding conditions.
Used to define column volume (independently of scale).
Column volume should be defined by exactly one of the following
attribute groups:
* :attr:`cv`
* :attr:`ft_mean_retentate` (this one) and
:attr:`column_porosity_retentate`
"""
self.column_porosity_retentate: float = -1
"""Column porosity for retentate under non-binding conditions.
Required in case :attr:`ft_mean_retentate` is used to define
column volume.
Required in case :attr:`load_c_end_ss` or
:attr:`load_c_end_relative_ss` are used to estimate
load step duration.
Required in case of load or wash recycling.
"""
self.equilibration_cv: float = -1
"""Duration of equilibration step.
The values of :attr:`equilibration_t` and
:attr:`equilibration_cv` are added together.
"""
self.equilibration_t: float = -1
"""Duration of equilibration step.
The values of :attr:`equilibration_t` and
:attr:`equilibration_cv` are added together.
"""
self.equilibration_f: float = -1
"""Equilibration step flow rate.
Equilibration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`equilibration_f` (this one)
* :attr:`equilibration_f_rel`
"""
self.equilibration_f_rel: float = 1
"""Equilibration step flow rate relative to load flow rate.
Default = 1.
Equilibration step flow rate = :attr:`equilibration_f_rel`
* `load flow rate`
Equilibration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`equilibration_f`
* :attr:`equilibration_f_rel` (this one)
"""
# Duration of the load phase.
self.load_cv: float = -1 # load duration in CV
"""Load phase duration in CV.
This is preferable way to define the duration of the load step
as it does not require any estimations about steady state.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (this one)
* :attr:`load_c_end_ss`
* :attr:`load_c_end_relative_ss`
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_ss: _typing.Optional[_np.ndarray] = None
"""Load phase switch based on target product breakthrough conc.
Load phase duration is estimated from simulating steady state
operation and determining when the breakthrough reaches
specified concentration.
Steady state simulation requires
:attr:`column_porosity_retentate`
:attr:`load_recycle_pdf`.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (preferred)
* :attr:`load_c_end_ss` (this one)
* :attr:`load_c_end_relative_ss`
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_relative_ss: float = -1
"""Load phase switch based on relative breakthrough conc.
Load phase duration is estimated from simulating steady state
operation and determining when the product (binding species)
in the breakthrough reaches specified relative concentration
(relative to load concentration in steady-state operation).
Steady state simulation requires
:attr:`column_porosity_retentate`
:attr:`load_recycle_pdf`.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (preferred)
* :attr:`load_c_end_ss`
* :attr:`load_c_end_relative_ss` (this one)
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_estimate_with_iterative_solver: bool = True
"""Finer optimization of cycle length estimation.
Default = `True`.
In case load step duration is estimated based of breakthrough
criteria (i.e. by :attr:`load_c_end_ss` or
:attr:`load_c_end_relative_ss`), the model needs to simulate
steady-state operation in order to determine fixed load time.
This parameters enables iterative solver that allows more
precise estimation but might slow down the simulation.
Notes
-----
Max number of iteration steps is defined by
:attr:`load_c_end_estimate_with_iter_solver_max_iter`.
"""
self.load_c_end_estimate_with_iter_solver_max_iter: int = 1000
"""Max steps for optimization of cycle length estimation.
Default = 1000.
See Also
--------
:attr:`load_c_end_estimate_with_iterative_solver`
"""
self.load_extend_first_cycle: bool = False
"""Extend first load phase to achieve a faster steady-state.
Only relevant in case wash or load is recycled.
The duration of extension is defined by:
* :attr:`load_extend_first_cycle_cv` or
* :attr:`load_extend_first_cycle_t` or
* is determined automatically.
"""
self.load_extend_first_cycle_cv: float = -1
"""Duration of first load phase extension in column volumes.
Only relevant if :attr:`load_extend_first_cycle` is `True`.
If the duration if defined by
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`
then the values are added together.
"""
self.load_extend_first_cycle_t: float = -1
"""Duration of first load phase extension (time).
Only relevant if :attr:`load_extend_first_cycle` is `True`.
If the duration if defined by
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`
then the values are added together.
"""
self.load_target_lin_velocity: float = -1
"""Target load linear velocity.
It is used to provide information about required column height.
It does not have any impact on the rest of the model.
Units need to match other units in the model.
"""
self.wash_cv: float = -1
"""Duration of wash step.
The values of :attr:`wash_t` and
:attr:`wash_cv` are added together.
"""
self.wash_t: float = -1
"""Duration of wash step.
The values of :attr:`wash_t` and
:attr:`wash_cv` are added together.
"""
self.wash_f: float = -1
"""Wash step flow rate.
Wash step flow rate should be defined by
exactly one of the following attributes:
* :attr:`wash_f` (this one)
* :attr:`wash_f_rel`
"""
self.wash_f_rel: float = 1
"""Wash step flow rate relative to load flow rate. Default = 1.
Wash step flow rate = :attr:`wash_f_rel`
* `load flow rate`
Wash step flow rate should be defined by
exactly one of the following attributes:
* :attr:`wash_f`
* :attr:`wash_f_rel` (this one)
"""
self.unaccounted_losses_rel: float = 0
"""Unaccounted losses as a share of bound material.
Elution peak is scaled down by 1 - `unaccounted_losses_rel`
before applying peak cut criteria.
"""
self.elution_cv: float = -1
"""Duration of elution step.
The values of :attr:`elution_t` and
:attr:`elution_cv` are added together.
"""
self.elution_t: float = -1
"""Duration of elution step.
The values of :attr:`elution_t` and
:attr:`elution_cv` are added together.
"""
self.elution_f: float = -1
"""Elution step flow rate.
Elution step flow rate should be defined by
exactly one of the following attributes:
* :attr:`elution_f` (this one)
* :attr:`elution_f_rel`
"""
self.elution_f_rel: float = 1
"""Elution step flow rate relative to load flow rate.
Default = 1.
Elution step flow rate = :attr:`elution_f_rel`
* `load flow rate`
Elution step flow rate should be defined by
exactly one of the following attributes:
* :attr:`elution_f`
* :attr:`elution_f_rel` (this one)
"""
self.elution_buffer_c: _np.ndarray = _np.array([])
"""Elution buffer composition.
Default = empty array (= all components are 0).
If defined it must have a value for each specie.
"""
self.elution_peak_position_cv: float = -1
"""Position (cv) of elution peak in the elution step.
This is for 1st moment or mean residence time (and not
necessarily peak max position).
The values of :attr:`elution_peak_position_t` and
:attr:`elution_peak_position_cv` are added together.
"""
self.elution_peak_position_t: float = -1
"""Position (time) of elution peak in the elution step.
This is for 1st moment or mean residence time (and not
necessarily peak max position).
The values of :attr:`elution_peak_position_t` and
:attr:`elution_peak_position_cv` are added together.
"""
self.elution_peak_cut_start_t: float = -1
"""Elution peak cut start (time).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_cv: float = -1
"""Elution peak cut start (cv).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_c_rel_to_peak_max: float = -1
"""Elution peak cut start (signal relative to peak max).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_peak_area_share: float = -1
"""Elution peak cut start (share of total peak area).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_end_t: float = -1
"""Elution peak cut end (time).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_cv: float = -1
"""Elution peak cut end (cv).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_c_rel_to_peak_max: float = -1
"""Elution peak cut end (signal relative to peak max).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_peak_area_share: float = -1
"""Elution peak cut end (share of total peak area).
Exactly one peak cut end criteria should be defined.
"""
self.regeneration_cv: float = -1
"""Duration of regeneration step.
The values of :attr:`regeneration_t` and
:attr:`regeneration_cv` are added together.
"""
self.regeneration_t: float = -1
"""Duration of regeneration step.
The values of :attr:`regeneration_t` and
:attr:`regeneration_cv` are added together.
"""
self.regeneration_f: float = -1
"""Regeneration step flow rate.
Regeneration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`regeneration_f` (this one)
* :attr:`regeneration_f_rel`
"""
self.regeneration_f_rel: float = 1
"""Regeneration step flow rate relative to load flow rate.
Default = 1.
Regeneration step flow rate = :attr:`regeneration_f_rel`
* `load flow rate`
Regeneration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`regeneration_f`
* :attr:`regeneration_f_rel` (this one)
"""
self.wash_desorption: bool = False
"""Enable wash desorption.
Make sure the class implements the desorption dynamics.
"""
self.load_recycle: bool = False
"""Recycle load breakthrough. Default = False."""
self.load_recycle_pdf: _typing.Optional[_core.PDF] = None
"""PDF of wash and/or unbound load traveling through the column.
The unbound (not captured) part and desorbed part are propagated
through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
"""
self.wash_recycle: bool = False
"""Recycle wash. Default = False.
Wash is recycled onto 3rd column while the 2nd is on load step.
After the wash recycle, the 3rd column is connected to 2nd
column to recycle load breakthrough material.
"""
self.wash_recycle_duration_cv: float = -1
"""Duration of wash recycle (cv).
Relevant if :attr:`wash_recycle` is `True`.
If both (`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t`) are defined, then the values
are added together. If none of those is defined, then the
entire wash step is recycled.
"""
self.wash_recycle_duration_t: float = -1
"""Duration of wash recycle (time).
Relevant if :attr:`wash_recycle` is `True`.
If both (`wash_recycle_duration_t` and
:attr:`wash_recycle_duration_cv`) are defined, then the values
are added together. If none of those is defined, then the
entire wash step is recycled.
"""
@_core.UnitOperation.log.setter
def log(self, logger: _core._logger.RtdLogger):
"""Propagates logger across other elements that support it."""
# Default logic.
self._logger = logger
self._logger.set_data_tree(self._log_entity_id, self._log_tree)
# Propagate logger across other elements with logging.
if self.load_recycle_pdf is not None:
self.load_recycle_pdf.set_logger_from_parent(self.uo_id, logger)
if self.load_recycle_pdf is not None:
self.elution_peak_shape.set_logger_from_parent(self.uo_id, logger)
if self.load_recycle_pdf is not None:
self.load_bt.set_logger_from_parent(self.uo_id, logger)
def _get_flow_value(self,
step_name: str, var_name: str,
flow: float, rel_flow: float) -> float:
"""Calc flow rate of chromatographic step.
If `flow` is specified, `flow` is used.
Otherwise `rel_flow` == flow rate relative to load flow rate is
used.
If none are positive, then the load flow rate is used
and a warning is logged.
Parameters
----------
step_name
Step name (e.g. "Wash") for log messages.
var_name
Step variable name (e.g. "wash_t") for log data.
flow
Flow rate.
rel_flow
Flow rate relative to load flow rate.
Returns
-------
float
Flow rate.
"""
if flow > 0:
self.log.i_data(self._log_tree, var_name, flow)
elif rel_flow > 0:
flow = rel_flow * self._load_f
self.log.i_data(self._log_tree, var_name, flow)
else:
self.log.w(f"{step_name} step flow rate is not defined,"
f" using load flow rate instead.")
flow = self._load_f
return flow
def _get_time_value(self,
step_name: str, var_name: str,
t: float, cv: float, flow: float) -> float:
"""Calc duration of chromatographic step.
If the step duration is specified in cv and in t, then the
value are added together.
Parameters
----------
step_name
Step name (e.g. "Wash") for log messages.
var_name
Step variable name (e.g. "wash_t") for log data.
t
Duration (time).
cv
Duration (cv).
flow
Flow rate (required if `cv` > 0).
Returns
-------
float
Total step duration (time).
"""
# Calc.
t_sum = max(t, 0)
if cv > 0:
assert flow > 0, f"{step_name}: Flow rate must be defined (> 0)" \
f" if the duration is specified in CVs."
assert self._cv > 0, f"CV must be determined (by `calc_cv`)" \
f" before calculating duration based on CVs."
t_sum += cv * self._cv / flow # sum
# Log.
if t <= 0 and cv <= 0:
self.log.w(step_name + " time is not defined")
else:
self.log.i_data(self._log_tree, var_name, t_sum)
return t_sum
def _assert_non_binding_species(self):
"""Make sure binding species list is valid."""
if len(self.non_binding_species) > 0:
assert max(self.non_binding_species) < self._n_species, \
"Index of non_binding_species too large (indexes start with 0)"
assert list(set(self.non_binding_species)) \
== list(self.non_binding_species), \
"List of non_binding_species should have ascending order"
assert len(self.non_binding_species) < self._n_species, \
"All species cannot be non-binding."
# Log.
self.log.i_data(self._log_tree,
'non_binding_species',
self.non_binding_species)
def _calc_load_f(self):
"""Determine load flow rate (when on)."""
assert self._is_flow_box_shaped(), "Inlet flow must be box shaped."
self._load_f = self._f.max()
self.log.d_data(self._log_tree, 'load_f', self._load_f)
def _calc_cv(self):
"""Determine column volume."""
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.ERROR,
cv=self.cv,
ft_mean_retentate=self.ft_mean_retentate,
)
if self.cv > 0:
self._cv = self.cv
else: # `self.ft_mean_retentate` > 0.
assert self.column_porosity_retentate > 0, \
f"porosity_retentate must be defined to calc CV from " \
f" `self.ft_mean_retentate`."
assert self._load_f > 0, f"Load flow rate must be defined to" \
f" calc CV from `self.ft_mean_retentate`."
self._cv = self.ft_mean_retentate * self._load_f \
/ self.column_porosity_retentate
# Log.
self.log.i_data(self._log_tree, 'cv', self._cv)
def _report_column_dimensions(self):
"""Report column dimensions based on load linear velocity."""
if self.load_target_lin_velocity > 0:
self._col_h = self._cv * self.load_target_lin_velocity \
/ self._load_f
self.log.i_data(self._log_tree, "column_h", self._col_h)
self.log.i_data(self._log_tree,
"column_d",
(self._cv / self._col_h / _np.pi) ** 0.5 * 2)
def _calc_equilibration_t(self):
"""Determine equilibration step duration."""
if self.equilibration_cv > 0:
# Flow rate.
eq_f = self._get_flow_value("Equilibration",
"equilibration_f",
self.equilibration_f,
self.equilibration_f_rel)
# Duration.
self._equilibration_t = self._get_time_value("Equilibration",
"equilibration_t",
self.equilibration_t,
self.equilibration_cv,
eq_f)
else:
# Duration.
self._equilibration_t = max(self.equilibration_t, 0)
# Log.
self.log.i_data(self._log_tree,
'equilibration_t',
self._equilibration_t)
def _calc_wash_t_and_f(self):
"""Determine wash step flow rate and duration."""
# Flow rate.
self._wash_f = self._get_flow_value("Wash",
"wash_f",
self.wash_f,
self.wash_f_rel)
# Duration.
self._wash_t = self._get_time_value("Wash",
"wash_t",
self.wash_t,
self.wash_cv,
self._wash_f)
def _calc_elution_t_and_f(self):
"""Determine elution step flow rate and duration."""
# Flow rate.
self._elution_f = self._get_flow_value("Elution",
"elution_f",
self.elution_f,
self.elution_f_rel)
# Duration.
self._elution_t = self._get_time_value("Elution",
"elution_t",
self.elution_t,
self.elution_cv,
self._elution_f)
def _calc_elution_peak_t(self):
"""Determine elution peak mean position (1st momentum)."""
self._elution_peak_t = self._get_time_value(
"elution peak position",
"elution_peak_position_t",
self.elution_peak_position_t,
self.elution_peak_position_cv,
self._elution_f
)
def _update_elution_peak_pdf(self):
"""Update elution peak PDF."""
assert self._elution_peak_t > 0
assert self._elution_f > 0
# Calc elution peak shape.
self.elution_peak_shape.update_pdf(
rt_mean=self._elution_peak_t,
v_void=self._elution_peak_t * self._elution_f,
f=self._elution_f
)
self._p_elution_peak = \
self.elution_peak_shape.get_p() * (1 - self.unaccounted_losses_rel)
self.log.d_data(self._log_tree,
"p_elution_peak",
self._p_elution_peak)
def _calc_elution_peak_cut_i_start_and_i_end(self):
"""Calc elution peak cut start and end in form of time steps.
Values are relative to the beginning of the elution step.
"""
elution_peak_pdf: _np.ndarray = self._p_elution_peak.copy()
# Peak cut start.
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.WARNING,
elution_peak_cut_start_peak_area_share=self
.elution_peak_cut_start_peak_area_share,
elution_peak_cut_start_c_rel_to_peak_max=self
.elution_peak_cut_start_c_rel_to_peak_max,
elution_peak_cut_start_cv=self.elution_peak_cut_start_cv,
elution_peak_cut_start_t=self.elution_peak_cut_start_t
)
# Calc `elution_peak_cut_start_i`.
if self.elution_peak_cut_start_peak_area_share >= 0:
elution_peak_cut_start_i = _utils.vectors.true_start(
_np.cumsum(elution_peak_pdf * self._dt)
>= self.elution_peak_cut_start_peak_area_share
)
elif self.elution_peak_cut_start_c_rel_to_peak_max >= 0:
elution_peak_cut_start_i = _utils.vectors.true_start(
elution_peak_pdf
>= self.elution_peak_cut_start_c_rel_to_peak_max
* elution_peak_pdf.max()
)
elif self.elution_peak_cut_start_cv >= 0:
elution_peak_cut_start_i = \
int(self.elution_peak_cut_start_cv
* self._cv / self._elution_f / self._dt)
elif self.elution_peak_cut_start_t >= 0:
elution_peak_cut_start_i = \
int(self.elution_peak_cut_start_t / self._dt)
else:
self.log.w(f"Elution peak cut start is not defined."
f" Now collecting from the beginning"
f" of the elution phase.")
elution_peak_cut_start_i = 0
# Log.
self.log.i_data(self._log_tree,
"elution_peak_cut_start_i",
elution_peak_cut_start_i)
self.log.i_data(self._log_tree,
"elution_peak_cut_start_t",
elution_peak_cut_start_i * self._dt)
# Peak cut end.
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.WARNING,
elution_peak_cut_end_peak_area_share=self
.elution_peak_cut_end_peak_area_share,
elution_peak_cut_end_c_rel_to_peak_max=self
.elution_peak_cut_end_c_rel_to_peak_max,
elution_peak_cut_end_cv=self.elution_peak_cut_end_cv,
elution_peak_cut_end_t=self.elution_peak_cut_end_t,
)
# Calc `elution_peak_cut_end_i`.
if self.elution_peak_cut_end_peak_area_share >= 0:
elution_peak_cut_end_i = _utils.vectors.true_start(
_np.cumsum(elution_peak_pdf * self._dt)
>= (1 - self.elution_peak_cut_end_peak_area_share)
)
elif self.elution_peak_cut_end_c_rel_to_peak_max >= 0:
elution_peak_cut_end_i = _utils.vectors.true_end(
elution_peak_pdf
>= self.elution_peak_cut_end_c_rel_to_peak_max
* elution_peak_pdf.max()
)
elif self.elution_peak_cut_end_cv >= 0:
elution_peak_cut_end_i = \
int(self.elution_peak_cut_end_cv
* self._cv / self._elution_f / self._dt)
elif self.elution_peak_cut_end_t >= 0:
elution_peak_cut_end_i = \
_utils.vectors.true_end(self._t < self.elution_peak_cut_end_t)
else:
self.log.w(f"Elution peak cut end is not defined."
f" Now collecting to the end of the elution phase.")
elution_peak_cut_end_i = elution_peak_pdf.size
self._elution_peak_cut_start_i = elution_peak_cut_start_i
self._elution_peak_cut_end_i = elution_peak_cut_end_i
# Log.
self.log.i_data(self._log_tree,
"elution_peak_cut_end_i",
elution_peak_cut_end_i)
self.log.i_data(self._log_tree,
"elution_peak_cut_end_t",
elution_peak_cut_end_i * self._dt)
if self._elution_peak_cut_end_i * self._dt < self._elution_peak_t:
self.log.w(f"Peak end is cut before its maximum.")
if self._elution_peak_cut_end_i * self._dt > self._elution_t:
self.log.w(f"Peak cut end exceeds elution step duration.")
def _calc_elution_peak_mask(self):
"""Calc where the elution peak gets collected."""
self._elution_peak_mask = \
_np.ones(int(round(self._elution_t / self._dt)), dtype=bool)
self._elution_peak_mask[self._elution_peak_cut_end_i:] = False
self._elution_peak_mask[:self._elution_peak_cut_start_i] = False
self.log.d_data(self._log_tree,
"elution_peak_interval",
self._elution_peak_mask)
def _update_load_btc(self):
"""Update load breakthrough profile."""
assert self._cv > 0, "CV must be defined by now."
self.load_bt.update_btc_parameters(cv=self._cv)
def _calc_regeneration_t(self):
"""Calc regeneration step duration."""
if self.regeneration_cv > 0:
eq_f = self._get_flow_value("Regeneration",
"regeneration_f",
self.regeneration_f,
self.regeneration_f_rel)
self._regeneration_t = self._get_time_value("Regeneration",
"regeneration_t",
self.regeneration_t,
self.regeneration_cv,
eq_f)
else:
self._regeneration_t = max(self.regeneration_t, 0)
# Log.
self.log.i_data(self._log_tree, 'regeneration_t', self._regeneration_t)
def _update_load_recycle_pdf(self, flow):
"""Update pdf that describes propagation of recycled material.
Recycled material si composed of unbound (load) and desorbed
(wash) material throughout the column.
`self.load_recycle_pdf` gets updated.
"""
assert self.load_recycle_pdf is not None, \
f"`load_recycle_pdf` must be defined by now."
assert self.column_porosity_retentate > 0, \
f"Retentate porosity must be defined by now."
assert self._cv > 0, "CV must be defined by now."
v_void = self._cv * self.column_porosity_retentate
self.load_recycle_pdf.update_pdf(v_void=v_void,
f=flow,
rt_mean=v_void / flow)
self._p_load_recycle_pdf = self.load_recycle_pdf.get_p()
def _calc_load_recycle_wash_i(self):
"""Calculate wash recycle duration in form of time steps."""
if self.wash_recycle_duration_t > 0 \
or self.wash_recycle_duration_cv > 0:
self._wash_recycle_i_duration = int(self._get_time_value(
"Wash recycle", "load_wash_recycle_t",
self.wash_recycle_duration_t,
self.wash_recycle_duration_cv,
self._wash_f
) / self._dt)
else:
# Same as wash duration.
assert self._wash_t > 0
self._wash_recycle_i_duration = int(round(self._wash_t / self._dt))
def _get_load_bt_cycle_switch_criteria(self,
load_c_ss: _np.ndarray
) -> _np.ndarray:
"""Get steady-state cycle switch (== end of load) criteria.
Parameters
----------
load_c_ss
Load concentration during steady state operation.
Returns
-------
ndarray
Threshold concentration for load breakthrough.
"""
assert self.load_c_end_ss is not None \
or self.load_c_end_relative_ss > 0, \
f"Load step duration should be defined!"
if self.load_c_end_ss is not None:
load_c_end_ss = self.load_c_end_ss
if self.load_c_end_relative_ss > 0:
self.log.w(f"Cycle time defined by `load_c_end_ss`"
f" and `load_c_end_relative_ss`."
f" Simulation is using `load_c_end_ss`.")
else: # self.load_c_end_relative_ss > 0
load_c_end_ss = self.load_c_end_relative_ss * load_c_ss
# Log.
self.log.i_data(self._log_tree,
'load_c_end_ss',
load_c_end_ss)
return load_c_end_ss
# noinspection DuplicatedCode
def _calc_cycle_t(self):
"""Calculates cycle time (== load time for a single column).
Optional delay of first cycle is not part of this calculation.
"""
assert self._cv > 0
assert self._load_f > 0
if self.load_cv > 0:
t_cycle = self.load_cv * self._cv / self._load_f
if self.load_c_end_ss is not None \
or self.load_c_end_relative_ss > 0:
self.log.w(f"Cycle time defined in more than one way."
f" Simulation is using `load_cv`.")
else:
# Get bt profile for constant inlet.
# Inlet conc.
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
load_c_ss = self._estimate_steady_state_mean_c(binding_species)
# Simulate first cycle at constant load concentration.
f_first_load = self._load_f * _np.ones(self._t.size)
c_first_load = load_c_ss * _np.ones([len(binding_species),
self._t.size])
bt_first_load: _np.ndarray = \
load_c_ss - self.load_bt.calc_c_bound(f_first_load,
c_first_load)
# Propagate breakthrough.
bt_first_load_out, bt_first_wash_out = \
self._sim_c_recycle_propagation(f_first_load,
bt_first_load,
None)
# Calc cycle duration.
load_c_end_ss = self._get_load_bt_cycle_switch_criteria(load_c_ss)
# noinspection PyTypeChecker
i_t_first_cycle = _utils.vectors.true_start(
bt_first_load_out.sum(0) >= load_c_end_ss.sum())
t_cycle = i_t_first_cycle * self._dt
# Wash desorption.
if self.wash_desorption and self.wash_recycle:
c_wash_desorbed = self._sim_c_wash_desorption(
f_first_load[:i_t_first_cycle],
c_first_load[:, :i_t_first_cycle]
- bt_first_load[:, :i_t_first_cycle])
else:
c_wash_desorbed = None
bt_first_load_out, bt_first_wash_out = \
self._sim_c_recycle_propagation(
f_first_load[:i_t_first_cycle],
bt_first_load[:, :i_t_first_cycle],
c_wash_desorbed)
if self.load_recycle:
if not self.load_c_end_estimate_with_iterative_solver:
self.log.w(f"Estimating cycle duration:"
f" Assuming sharp breakthrough profile.")
i_load_recycle_start = self._wash_recycle_i_duration \
if self.wash_recycle else 0
m_load_recycle = \
bt_first_load_out[
:,
i_load_recycle_start:i_t_first_cycle
].sum() * self._load_f * self._dt
_t_diff = m_load_recycle / self._load_f / load_c_ss.sum()
t_cycle -= _t_diff
self._load_recycle_m_ss = m_load_recycle
self.log.i_data(self._log_tree,
'm_load_recycle_ss',
m_load_recycle)
self.log.i_data(self._log_tree,
'shorten_cycle_t_due_to_bt_recycle',
_t_diff)
if self.wash_recycle:
if not self.load_c_end_estimate_with_iterative_solver:
self.log.w(f"Estimating cycle duration:"
f" Assuming sharp breakthrough profile.")
m_wash_recycle = bt_first_wash_out[
:,
:self._wash_recycle_i_duration
].sum() * self._wash_f * self._dt
_t_diff = m_wash_recycle / self._load_f / load_c_ss.sum()
t_cycle -= _t_diff
self._wash_recycle_m_ss = m_wash_recycle
self.log.i_data(self._log_tree,
'm_wash_recycle_ss',
m_wash_recycle)
self.log.i_data(self._log_tree,
'shorten_cycle_t_due_to_wash_recycle',
_t_diff)
if self.load_c_end_estimate_with_iterative_solver \
and (self.wash_recycle or self.load_recycle):
c_load_fist_cycle = load_c_ss * _np.ones([len(binding_species),
i_t_first_cycle * 2])
def sim_cycle(f_load: _np.ndarray,
c_load: _np.ndarray,
i_prev_cycle: int) -> _typing.Tuple[_np.ndarray,
_np.ndarray,
int]:
"""Simulates load-wash cycle. Calc load duration.
Load duration is determined based on breakthrough
criteria.
Parameters
----------
f_load
Load flow rate profile.
c_load
Load conc profile.
i_prev_cycle
Previous cycle duration in time steps.
Returns
-------
f_load_next_cycle
Load and wash breakthrough flow rate profile.
c_load_next_cycle
Load and wash breakthrough conc profile.
i_cycle
Current cycle duration in time steps.
"""
# Load.
bt_load: _np.ndarray = \
c_load - self.load_bt.calc_c_bound(f_load, c_load)
# Propagate breakthrough.
bt_load_out, _ = self._sim_c_recycle_propagation(
f_load,
bt_load,
None)
# 'Stop' load at specified breakthrough criteria.
# noinspection PyTypeChecker
i_cycle_duration = _utils.vectors.true_start(
bt_load_out.sum(0) >= load_c_end_ss.sum())
# Cut load at specified time.
bt_load = bt_load[:, :i_cycle_duration]
# Wash desorption.
if self.wash_desorption and self.wash_recycle:
c_first_wash_desorbed = self._sim_c_wash_desorption(
f_load[:i_cycle_duration],
c_load[:, :i_cycle_duration]
- bt_load[:, :i_cycle_duration])
else:
c_first_wash_desorbed = None
# Propagate load and wash leftovers.
bt_load_out, bt_wash_out = self._sim_c_recycle_propagation(
f_load[:i_cycle_duration],
bt_load,
c_first_wash_desorbed)
# Construct load for next cycle.
# Recycle load.
if self.load_recycle:
rec_load = bt_load_out[:,
i_prev_cycle:i_cycle_duration]
else:
rec_load = _np.zeros_like(
bt_load_out[:, i_prev_cycle:i_cycle_duration])
# Next load profiles.
c_next_load = _np.concatenate((rec_load,
c_load_fist_cycle),
axis=1)
f_next_load = self._load_f * _np.ones(c_next_load.shape[1])
wash_recycle_i_duration = self._wash_recycle_i_duration \
if self.wash_recycle else 0
# Log.
m_load_recycle_ss = \
bt_first_load_out[
:,
wash_recycle_i_duration:i_t_first_cycle
].sum() * self._load_f * self._dt
self._load_recycle_m_ss = m_load_recycle_ss
self.log.i_data(self._log_tree,
'm_load_recycle_ss',
m_load_recycle_ss)
# Recycle wash.
if self.wash_recycle:
c_next_load[:, :self._wash_recycle_i_duration] = \
bt_wash_out[:, :self._wash_recycle_i_duration]
f_next_load[:self._wash_recycle_i_duration] = \
self._wash_f
m_wash_recycle_ss = \
bt_wash_out[:,
:self._wash_recycle_i_duration
].sum() * self._wash_f * self._dt
self._wash_recycle_m_ss = m_wash_recycle_ss
self.log.i_data(self._log_tree,
'm_wash_recycle_ss',
m_wash_recycle_ss)
# Return next load and cycle duration.
return f_next_load, c_next_load, \
i_cycle_duration - i_prev_cycle
f_load_cycle = \
self._load_f * _np.ones(c_load_fist_cycle.shape[1])
c_load_cycle = c_load_fist_cycle
i_t_cycle_prev = i_t_first_cycle
i_t_cycle_estimate = 0
# Loop until cycle duration converges.
for i in range(
self.load_c_end_estimate_with_iter_solver_max_iter):
if abs(i_t_cycle_prev - i_t_cycle_estimate) <= 1:
self.log.i_data(self._log_tree,
"t_cycle_optimization_loop_iter",
i)
break
i_t_cycle_prev = i_t_cycle_estimate
f_load_cycle, c_load_cycle, i_t_cycle_estimate = \
sim_cycle(f_load_cycle, c_load_cycle, i_t_cycle_prev)
# print([i, i_t_cycle_prev, i_t_cycle_estimate])
if abs(i_t_cycle_prev - i_t_cycle_estimate) > 1:
self.log.w("Cycle duration estimator did not converge.")
t_cycle = i_t_cycle_estimate * self._dt
elif self.load_c_end_estimate_with_iterative_solver:
self.log.i(f"No need to use iterative solver in case of"
f" no recycling of load and/or wash.")
self._cycle_t = t_cycle
self.log.i_data(self._log_tree, 'cycle_t', t_cycle)
# noinspection DuplicatedCode
def _calc_first_cycle_extension_t(self):
"""Calc extension of first load.
First load step might be extended for processes with load and/or
wash recycle in order to get faster into steady-state regime.
"""
if not self.load_recycle and not self.wash_recycle:
self.log.w(f"Estimation of first cycle extension requested"
f" on a process without load recycle.")
self._first_cycle_extension_t = 0
return
elif not self.load_extend_first_cycle:
self.log.w(f"Estimation of first cycle extension requested"
f" on a process without extended first cycle.")
self._first_cycle_extension_t = 0
return
elif self.load_extend_first_cycle_t > 0:
self._first_cycle_extension_t = self.load_extend_first_cycle_t
return
elif self.load_extend_first_cycle_cv >= 0:
assert self._cv > 0, "CV should be defined by now."
assert self._load_f > 0, "Load flow rate should be defined by now."
self._first_cycle_extension_t = \
self.load_extend_first_cycle_cv * self._cv / self._load_f
elif self.load_cv > 0:
raise NotImplementedError(
f"Estimation of first cycle extension is only supported"
f" if the cycle length is defined by breakthrough cutoff"
f" criteria. This is due to the fact that if all the"
f" breakthrough material gets recycles,"
f" there is no single steady-state.")
else:
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
load_c_ss = self._estimate_steady_state_mean_c(binding_species)
# simulate first cycle at constant load concentration
f_first_load = self._load_f * _np.ones(self._t.size)
c_first_load = load_c_ss * _np.ones([len(binding_species),
self._t.size])
bt_first_load: _np.ndarray = \
load_c_ss - self.load_bt.calc_c_bound(f_first_load,
c_first_load)
# propagate breakthrough
bt_first_load_out, _ = \
self._sim_c_recycle_propagation(f_first_load,
bt_first_load,
None)
load_c_end_ss = self._get_load_bt_cycle_switch_criteria(load_c_ss)
# noinspection PyTypeChecker
i_t_first_cycle = _utils.vectors.true_start(
bt_first_load_out.sum(0) >= load_c_end_ss.sum())
dm = 0
if self.load_recycle:
assert hasattr(self, "_load_recycle_m_ss"), \
f"Function `_calc_cycle_t()` should already be called."
dm += self._load_recycle_m_ss
if self.wash_recycle:
assert hasattr(self, "_wash_recycle_m_ss"), \
f"Function `_calc_cycle_t()` should already be called."
dm += self._wash_recycle_m_ss
di = 0
if dm > 0:
m_ext_bt = _np.cumsum(
bt_first_load_out.sum(0)[i_t_first_cycle:]
) * self._load_f * self._dt
di += _utils.vectors.true_start(m_ext_bt >= dm)
self._first_cycle_extension_t = di * self._dt
def _calc_cycle_start_i_list(self):
"""Calculate load switch positions in form of time steps."""
assert self._cycle_t > 0, \
f"Cycle length must have been determined" \
f" (by `_calc_cycle_t()`) by now"
flow_i_start, flow_i_end = \
_utils.vectors.true_start_and_end(self._f > 0)
if self.load_extend_first_cycle:
assert self._first_cycle_extension_t >= 0, \
f"Prolong of first load cycle is set to `True`," \
f" but the length is undefined."
if self._first_cycle_extension_t == 0:
self.log.w(f"Prolong of first load cycle is set to `True`,"
f" but the length of the extension is 0.")
load_extend_first_cycle_t = self._first_cycle_extension_t
self.log.i_data(self._log_tree,
"load_extend_first_cycle_t",
load_extend_first_cycle_t)
else:
load_extend_first_cycle_t = 0
cycle_start_t_list = _np.arange(
self._t[flow_i_start] + load_extend_first_cycle_t,
self._t[flow_i_end - 1],
self._cycle_t
)
cycle_start_t_list[0] = self._t[flow_i_start]
self._cycle_start_i_list = _np.rint(
cycle_start_t_list / self._dt).astype(_np.int32)
self.log.i_data(self._log_tree,
"cycle_start_t_list",
cycle_start_t_list)
def _prepare_simulation(self):
"""Prepare everything before cycle-by-cycle simulation."""
self._assert_non_binding_species()
self._calc_load_f()
self._calc_cv() # might depend on load_f
self._report_column_dimensions() # optional
# Equilibration.
self._calc_equilibration_t()
# Wash.
self._calc_wash_t_and_f()
# Elution.
self._calc_elution_t_and_f()
self._calc_elution_peak_t()
self._update_elution_peak_pdf()
self._calc_elution_peak_cut_i_start_and_i_end()
self._calc_elution_peak_mask()
# Regeneration.
self._calc_regeneration_t()
# Prepare for estimation of cycle length.
self._update_load_btc()
if self.load_recycle:
self._update_load_recycle_pdf(self._wash_f)
if self.wash_recycle:
self._calc_load_recycle_wash_i()
# Cycle time.
self._calc_cycle_t()
if self.load_extend_first_cycle:
self._calc_first_cycle_extension_t()
# Cycle start positions == column load switch time points.
self._calc_cycle_start_i_list()
# Make sure cycle duration is long enough.
_t_cycle_except_load = self._equilibration_t + self._wash_t \
+ self._elution_t + self._regeneration_t
if self._cycle_t < _t_cycle_except_load:
self.log.e(f"Load step ({self._cycle_t}) should not be shorter"
f" than eq_t + wash_t + elution_t + regeneration_t"
f" ({_t_cycle_except_load: .6})!")
def _sim_c_load_binding(self,
f_load: _np.ndarray,
c_load: _np.ndarray
) -> _typing.Tuple[_np.ndarray, _np.ndarray]:
"""Determine what part of load binds.
Load in this context might also contain wash and load recycle
from previous steps.
Parameters
----------
f_load
Load flow rate profile.
c_load
Load concentration profile.
Returns
-------
c_bound
Conc profile of bound material.
c_unbound
Conc profile of unbound material = `c_load` - `c_bound`.
"""
assert f_load.size == c_load.shape[1], \
"f_load and c_load must have the same length"
assert c_load.shape[0] == \
self._n_species - len(self.non_binding_species), \
"c_load must contain all binding species"
c_bound = self.load_bt.calc_c_bound(f_load, c_load)
# Returns bound and unbound part.
return c_bound, c_load - c_bound
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Get conc profile of desorbed material during wash step.
The step has no default logic.
Thus it raises `NotImplementedError` if called.
Parameters
----------
f_load
Flow rate profile during 'effective load' step.
The step includes wash recycle, load recycle and load step
as a column sees it in a single cycle.
c_bound
Conc profile of captured material.
Returns
-------
ndarray
Conc profile of desorbed material during wash step.
Raises
------
NotImplementedError
This method has no default implementation. Thus it being
called it will raise the error.
"""
# Not implemented in core this class, as there is
# no consensus on typical dynamics and the way to describe it.
raise NotImplementedError("Function not implemented in this class")
def _sim_c_recycle_propagation(
self,
f_unbound: _np.ndarray,
c_unbound: _np.ndarray,
c_wash_desorbed: _typing.Optional[_np.ndarray]
) -> _typing.Tuple[_np.ndarray, _np.ndarray]:
"""Propagate unbound and desorbed material through the column.
Unbound (breakthrough during load) and desorbed (during wash)
sections might have a different flow rates as they come from
different steps - load and wash.
Parameters
----------
f_unbound
Flow rate profile during 'total load' step for a cycle.
The step includes wash recycle, load recycle and load step.
c_unbound
Conc profile of overloaded material during load step
(plus previous wash and load recycle).
c_wash_desorbed
Conc profile of desorbed material during wash step.
Returns
-------
c_unbound_propagated
Propagated conc profile of overloaded material.
c_wash_desorbed_propagated
Propagated conc profile of desorbed material.
"""
assert hasattr(self, "_wash_f") and self._wash_f > 0
assert hasattr(self, "_wash_t") and self._wash_t > 0
assert self.load_recycle_pdf is not None
assert c_unbound.shape[0] == \
self._n_species - len(self.non_binding_species)
assert c_unbound.shape[1] == f_unbound.size
if c_wash_desorbed is None or c_wash_desorbed.size == 0:
c_wash_desorbed = _np.zeros([
self._n_species - len(self.non_binding_species),
int(round(self._wash_t / self._dt))])
else:
assert c_wash_desorbed.shape[0] == \
self._n_species - len(self.non_binding_species)
assert c_wash_desorbed.shape[1] == \
int(round(self._wash_t / self._dt))
# Combine on volumetric scale.
v_load = self._dt * f_unbound.cumsum()
v_wash = v_load[-1] + \
self._dt * _np.arange(1, c_wash_desorbed.shape[1] + 1) \
* self._wash_f
min_flow = min(f_unbound.min(), self._wash_f)
dv = min_flow * self._dt
v = _np.arange(dv,
(v_wash[-1] if v_wash.size > 0 else v_load[-1]) + dv,
dv)
c_v_combined = _interp.interp1d(
_np.concatenate((v_load, v_wash), axis=0),
_np.concatenate((c_unbound, c_wash_desorbed), axis=1),
fill_value="extrapolate"
)(v)
c_v_combined[c_v_combined < 0] = 0
# Simulate traveling of leftover material through the column.
self._update_load_recycle_pdf(min_flow)
c_v_combined_propagated = _utils.convolution.time_conv(
self._dt, c_v_combined, self._p_load_recycle_pdf)
# Split back on time scale.
c_combined_propagated = _interp.interp1d(
v,
c_v_combined_propagated,
fill_value="extrapolate"
)(_np.concatenate((v_load, v_wash), axis=0))
c_combined_propagated[c_combined_propagated < 0] = 0
c_unbound_propagated = c_combined_propagated[:, :v_load.size]
c_wash_desorbed_propagated = c_combined_propagated[:, v_load.size:]
return c_unbound_propagated, c_wash_desorbed_propagated
def _sim_c_elution_desorption(self,
m_bound: _np.ndarray
) -> _typing.Tuple[_np.ndarray,
_np.ndarray]:
"""Simulate elution step.
Parameters
----------
m_bound
Vector with amount of product being bound to the column.
`m_bound.size == n_species`
Returns
-------
c_elution
Outlet concentration profile during the elution.
b_elution_peak
Boolean vector. Peak is collected where the value is `True`.
"""
assert self._elution_f > 0
assert self._elution_t > 0
i_elution_duration = int(round(self._elution_t / self._dt))
# Multiply elution peak with the amount of captured product.
c_elution = \
self._p_elution_peak[_np.newaxis, :i_elution_duration] * \
m_bound[:, _np.newaxis] / self._elution_f
# Pad with zeros to cover the entire elution step duration.
if c_elution.shape[1] < i_elution_duration:
c_elution = _np.pad(c_elution,
((0, 0),
(0, i_elution_duration - c_elution.shape[1])),
mode="constant")
# Boolean mask - `True` where peak is being collected.
b_elution_peak = self._elution_peak_mask
return c_elution, b_elution_peak
def _sim_c_elution_buffer(self, n_time_steps: int) -> _np.ndarray:
"""Get elution buffer composition at the outlet of the column.
By default the buffer composition is constant throughout the
elution step.
Feel free to override this function if you want to simulate
linear gradient or if the transient phenomena at the beginning
of peak cut needs to be considered.
Parameters
----------
n_time_steps
Duration of elution step in number of time steps.
Returns
-------
ndarray
Buffer concentration profile at the outlet of the column
during the elution step.
"""
# Elution buffer composition.
elution_buffer_composition = \
self.elution_buffer_c.reshape(self.elution_buffer_c.size, 1)
assert elution_buffer_composition.size == 0 \
or elution_buffer_composition.size == self._n_species, \
f"Elution buffer composition must be either empty or have" \
f" a concentration value for each specie."
assert _np.all(elution_buffer_composition >= 0), \
"Concentration values in elution buffer must be >= 0"
if elution_buffer_composition.size == 0:
elution_buffer_composition = _np.zeros([self._n_species, 1])
self.log.i_data(self._log_tree,
"elution_buffer_composition",
elution_buffer_composition)
# Constant profile.
c_elution_buffer = elution_buffer_composition \
* _np.ones_like(self._t[:n_time_steps])
return c_elution_buffer
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _sim_c_regeneration(self,
m_bound: _np.ndarray
) -> _typing.Optional[_np.ndarray]:
"""Simulate regeneration step.
Parameters
----------
m_bound
Vector with amount of product being bound to the column at
the beginning of the regeneration step.
`m_bound.size == n_species`.
Returns
-------
Optional[ndarray]
Outlet concentration profile during regeneration step.
E.g. regeneration peak.
"""
# No default implementation.
c_regeneration = None
return c_regeneration
def _sim_c_out_cycle(self,
f_load: _np.ndarray,
c_load: _np.ndarray
) -> _typing.Tuple[_typing.Optional[_np.ndarray],
_typing.Optional[_np.ndarray],
_np.ndarray,
_np.ndarray,
_typing.Optional[_np.ndarray]]:
"""Simulates load-wash-elution-regeneration steps.
Regeneration is optional.
This function can be replaced in case user wants to use some
other variation of bind-elution dynamics.
Elution peak cut is applied in this function.
Elution peak shape must be defined by now.
Return profiles that are `None` are considered being zero.
Parameters
----------
f_load
Inlet (recycle + load) flow rate profile for a cycle.
The flow rate might be different during wash recycle.
c_load
Inlet (recycle + load) concentration profile.
Returns
-------
c_load
Conc profile at the outlet of the column during load.
c_wash
Conc profile at the outlet of the column during wash.
c_elution
Conc profile at the outlet of the column during elution.
b_elution
Boolean mask for elution step. `True` where peak is being
collected.
c_regeneration
Conc profile at the outlet of the column during
regeneration.
"""
assert self._load_f > 0
assert self._wash_f > 0
assert self._wash_t > 0
assert self._elution_f > 0
assert self._elution_t > 0
assert self._load_f > 0
assert self._cv > 0
# Evaluate binding.
c_bound, c_unbound = self._sim_c_load_binding(f_load, c_load)
# Log.
m_load = (c_load * f_load[_np.newaxis, :]).sum(1) * self._dt
m_bound = (c_bound * f_load[_np.newaxis, :]).sum(1) * self._dt
self.log.i_data(self._cycle_tree,
"column_utilization",
m_bound / self._cv / self.load_bt.get_total_bc())
self.log.i_data(self._cycle_tree, "m_load", m_load)
self.log.i_data(self._cycle_tree, "m_bound", m_bound)
self.log.i_data(self._cycle_tree, "m_unbound", m_load - m_bound)
self.log.d_data(self._cycle_tree, "f_load", f_load)
self.log.d_data(self._cycle_tree, "c_load", c_load)
self.log.d_data(self._cycle_tree, "c_bound", c_bound)
self.log.d_data(self._cycle_tree, "c_unbound", c_unbound)
# Evaluate desorption during wash.
c_wash_desorbed = None
if self.wash_desorption:
c_wash_desorbed = self._sim_c_wash_desorption(f_load, c_bound)
if c_wash_desorbed.size > 0:
# Subtract desorbed material from bound material.
m_bound -= c_wash_desorbed.sum(1)
# Log.
self.log.i_data(self._cycle_tree,
"m_wash_desorbed",
c_wash_desorbed.sum(1) * self._wash_f * self._dt)
self.log.d_data(self._cycle_tree,
"c_wash_desorbed",
c_wash_desorbed)
# Propagate unbound and desorbed material throughout the column.
c_out_load = c_unbound
c_out_wash = c_wash_desorbed
if self.load_recycle or self.wash_recycle:
c_out_load, c_out_wash = \
self._sim_c_recycle_propagation(f_load,
c_unbound,
c_wash_desorbed)
# Get elution peak.
c_out_elution, elution_peak_mask = \
self._sim_c_elution_desorption(m_bound)
# Log.
m_elution_peak = (c_out_elution * elution_peak_mask[_np.newaxis, :]
).sum(1) * self._elution_f * self._dt
m_elution = c_out_elution.sum(1) * self._elution_f * self._dt
self.log.i_data(self._cycle_tree,
"m_elution_peak", m_elution_peak)
self.log.i_data(self._cycle_tree,
"m_elution", m_elution)
self.log.i_data(self._cycle_tree,
"m_elution_peak_cut_loss", m_elution - m_elution_peak)
# Get regeneration peak.
c_out_regeneration = self._sim_c_regeneration(
m_bound - c_out_elution.sum(1) * self._elution_f * self._dt)
return c_out_load, c_out_wash, c_out_elution, \
elution_peak_mask, c_out_regeneration
def _calculate(self):
# Pre calculate parameters and repetitive profiles.
self._prepare_simulation()
# Assert proper list of binding species.
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
assert len(binding_species) > 0
# Copy inlet vectors.
c_in_load = self._c[binding_species].copy()
f_in_load = self._f.copy()
f_in_i_end = min(_utils.vectors.true_end(f_in_load > 0), self._t.size)
c_in_load[:, f_in_i_end:] = 0
# Clear for results.
self._c[:] = 0
self._f[:] = 0
# Prepare logger.
log_data_cycles = list()
self.log.set_branch(self._log_tree, "cycles", log_data_cycles)
# Variable to store wash recycle to.
previous_c_bt_wash: _typing.Optional[_np.ndarray] = None
# Loop across cycles.
for i in range(self._cycle_start_i_list.size):
# Load-wash-elution-regeneration-equilibration steps for a column.
# Load step starts at `self._cycle_start_i_list[i]`.
# Prepare logger for this cycle.
self._cycle_tree = dict()
log_data_cycles.append(self._cycle_tree)
# Load start and end time as the column sees it.
if i > 0 and self.load_recycle:
# Column sees leftovers from previous load during recycling.
cycle_load_i_start = self._cycle_start_i_list[i - 1]
else:
cycle_load_i_start = self._cycle_start_i_list[i]
# Calc cycle end (either next cycle or end or simulation time).
if i + 1 < self._cycle_start_i_list.size:
cycle_load_i_end = self._cycle_start_i_list[i + 1]
else:
cycle_load_i_end = f_in_i_end - 1
# Log results.
self.log.i_data(self._cycle_tree,
"i_cycle_load_start",
cycle_load_i_start)
self.log.i_data(self._cycle_tree,
"i_cycle_load_step_start",
self._cycle_start_i_list[i])
self.log.i_data(self._cycle_tree,
"i_cycle_load_end",
cycle_load_i_end)
# Calc profiles at column outlet.
c_out_load, c_out_wash, c_out_elution, \
b_out_elution, c_out_regeneration = self._sim_c_out_cycle(
f_in_load[cycle_load_i_start:cycle_load_i_end],
c_in_load[:, cycle_load_i_start:cycle_load_i_end]
)
self.log.d_data(self._cycle_tree,
"c_out_load", c_out_load)
self.log.d_data(self._cycle_tree,
"c_out_wash", c_out_wash)
self.log.d_data(self._cycle_tree,
"c_out_elution", c_out_elution)
self.log.d_data(self._cycle_tree,
"b_out_elution", b_out_elution)
self.log.d_data(self._cycle_tree,
"c_out_regeneration", c_out_regeneration)
# Load recycle.
if self.load_recycle:
# Recycle load during the load step.
i_load_start_rel = self._cycle_start_i_list[i] \
- cycle_load_i_start
c_load_recycle = c_out_load[:, i_load_start_rel:]
c_in_load[:, self._cycle_start_i_list[i]:cycle_load_i_end] = \
c_load_recycle
self.log.i_data(self._cycle_tree, "m_load_recycle",
c_load_recycle.sum(1)
* self._load_f * self._dt)
self.log.d_data(self._cycle_tree, "c_load_recycle",
c_load_recycle)
# Losses during load == bt through 2nd column.
c_loss_bt_2nd_column = c_out_load[:, i_load_start_rel]
self.log.i_data(self._cycle_tree, "m_loss_bt_2nd_column",
c_loss_bt_2nd_column.sum()
* self._dt * self._load_f)
self.log.d_data(self._cycle_tree, "c_loss_bt_2nd_column",
c_loss_bt_2nd_column)
else:
# report losses during load
m_loss_load = c_out_load.sum() * self._dt * self._load_f
self.log.i_data(self._cycle_tree, "m_loss_load", m_loss_load)
# Wash recycle.
if self.wash_recycle:
if previous_c_bt_wash is not None \
and previous_c_bt_wash.size > 0:
# Clip wash recycle duration if needed.
i_wash_duration = min(
self._wash_recycle_i_duration,
self._t.size - self._cycle_start_i_list[i])
# Log losses due to discarding load bt during wash recycle.
s = c_in_load[
:,
self._cycle_start_i_list[i]:self._cycle_start_i_list[i]
+ i_wash_duration]
self.log.i_data(self._cycle_tree,
"m_loss_load_bt_during_wash_recycle",
s.sum() * self._dt * self._load_f)
self.log.d_data(self._cycle_tree,
"c_lost_load_during_wash_recycle", s)
self.log.d_data(self._cycle_tree, "c_wash_recycle",
previous_c_bt_wash[:, :i_wash_duration])
self.log.i_data(
self._cycle_tree, "m_wash_recycle",
previous_c_bt_wash[:, :i_wash_duration].sum(1)
* self._dt * self._wash_f)
# Apply previous wash recycle onto the inlet profile.
s[:] = previous_c_bt_wash[:, :i_wash_duration]
f_in_load[self._cycle_start_i_list[i]:
self._cycle_start_i_list[i]
+ i_wash_duration] = self._wash_f
# Save wash from this cycle to be used during the next cycle.
previous_c_bt_wash = c_out_wash
else:
# Report losses during wash.
if c_out_wash is None:
c_out_wash = _np.zeros(
[len(binding_species),
int(round(self._wash_t / self._dt))])
m_loss_wash = c_out_wash.sum() * self._dt * self._load_f
self.log.i_data(self._cycle_tree, "m_loss_wash", m_loss_wash)
# Elution.
[i_el_rel_start, i_el_rel_end] = \
_utils.vectors.true_start_and_end(b_out_elution)
i_el_start = min(
self._t.size,
cycle_load_i_end + c_out_wash.shape[1] + i_el_rel_start)
i_el_end = min(
self._t.size,
cycle_load_i_end + c_out_wash.shape[1] + i_el_rel_end)
i_el_rel_end = i_el_rel_start + i_el_end - i_el_start
# Log.
self.log.i_data(self._cycle_tree, "i_elution_start", i_el_start)
self.log.i_data(self._cycle_tree, "i_elution_end", i_el_end)
# Write to global outlet.
self._f[i_el_start:i_el_end] = self._elution_f
self._c[binding_species, i_el_start:i_el_end] = \
c_out_elution[:, i_el_rel_start:i_el_rel_end]
class ACC(AlternatingChromatography):
"""Alternating column chromatography without recycling.
Alternating load-bind-elution twin-column chromatography without
recycling of overloaded or washed out material.
This class offers no dynamics for desorption during wash step.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "ACC".
Notes
-----
For list of attributes refer to :class:`AlternatingChromatography`.
See Also
--------
:class:`AlternatingChromatography`
Examples
--------
>>> dt = 0.5 # min
>>> t = _np.arange(0, 24.1 * 60, dt)
>>> load_bt = _bt_load.ConstantPatternSolution(dt, dbc_100=50, k=0.12)
>>> peak_shape_pdf = _pdf.ExpModGaussianFixedRelativeWidth(t, 0.15, 0.3)
>>> acc_pro_a = ACC(
... t,
... load_bt=load_bt,
... peak_shape_pdf=peak_shape_pdf,
... uo_id="pro_a_acc",
... gui_title="ProteinA ACC",
... )
>>> acc_pro_a.cv = 100 # mL
>>> # Equilibration step.
>>> acc_pro_a.equilibration_cv = 1.5
>>> # Equilibration flow rate is same as load flow rate.
>>> acc_pro_a.equilibration_f_rel = 1
>>> # Load 10 CVs.
>>> acc_pro_a.load_cv = 20
>>> # Define wash step.
>>> acc_pro_a.wash_cv = 5
>>> # Elution step.
>>> acc_pro_a.elution_cv = 3
>>> # 1st momentum of elution peak from data from above.
>>> acc_pro_a.elution_peak_position_cv = 1.2
>>> acc_pro_a.elution_peak_cut_start_c_rel_to_peak_max = 0.05
>>> acc_pro_a.elution_peak_cut_end_c_rel_to_peak_max = 0.05
>>> # Regeneration step.
>>> acc_pro_a.regeneration_cv = 1.5
>>> # Inlet flow rate profile.
>>> f_in = _np.ones_like(t) * 15 # mL/min
>>> c_in = _np.ones([1, t.size]) * 2.5 # mg/mL
>>> # Simulate ACC.
>>> f_out, c_out = acc_pro_a.evaluate(f_in, c_in)
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
peak_shape_pdf: _core.PDF,
gui_title: str = "ACC"):
super().__init__(t, uo_id, load_bt, peak_shape_pdf, gui_title)
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Desorbed material during wash step is not supported by ACC.
Raises
------
NotImplementedError
Raises exception when function if called.
"""
raise NotImplementedError("Function not implemented in this class.")
class PCC(AlternatingChromatography):
"""Alternating column chromatography with recycling of load.
Alternating load-bind-elution twin-column chromatography with
optional recycling of overloaded or washed out material.
This class offers no dynamics for desorption during wash step.
PCC uses :attr:`load_bt` to determine what parts of the load (and
recycled material) bind to the column. The unbound (not captured)
part is propagated through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
load_recycle_pdf
Propagation of load breakthrough and/or washed out material
through the column.
column_porosity_retentate
Porosity of the column for binding species (protein).
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "PCC".
Notes
-----
For list of additional attributes refer to
:class:`AlternatingChromatography`.
See Also
--------
:class:`AlternatingChromatography`
Examples
--------
>>> dt = 0.5 # min
>>> t = _np.arange(0, 24.1 * 60, dt)
>>> load_bt = _bt_load.ConstantPatternSolution(dt, dbc_100=50, k=0.12)
>>> peak_shape_pdf = _pdf.ExpModGaussianFixedRelativeWidth(t, 0.15, 0.3)
>>> load_recycle_pdf = _pdf.GaussianFixedDispersion(t, 2 * 2 / 30)
>>> pcc_pro_a = PCC(
... t,
... load_bt=load_bt,
... peak_shape_pdf=peak_shape_pdf,
... load_recycle_pdf=load_recycle_pdf,
... # Porosity of the column for protein.
... column_porosity_retentate=0.64,
... uo_id="pro_a_pcc",
... gui_title="ProteinA PCC",
... )
>>> pcc_pro_a.cv = 100 # mL
>>> # Equilibration step.
>>> pcc_pro_a.equilibration_cv = 1.5
>>> # Equilibration flow rate is same as load flow rate.
>>> pcc_pro_a.equilibration_f_rel = 1
>>> # Load until 70 % breakthrough.
>>> pcc_pro_a.load_c_end_relative_ss = 0.7
>>> # Automatically prolong first cycle to faster achieve a steady-state.
>>> pcc_pro_a.load_extend_first_cycle = True
>>> # Define wash step.
>>> # There is no desorption during wash step in this example.
>>> pcc_pro_a.wash_cv = 5
>>> pcc_pro_a.wash_recycle = True
>>> pcc_pro_a.wash_recycle_duration_cv = 2
>>> # Elution step.
>>> pcc_pro_a.elution_cv = 3
>>> # 1st momentum of elution peak from data from above.
>>> pcc_pro_a.elution_peak_position_cv = 1.2
>>> pcc_pro_a.elution_peak_cut_start_c_rel_to_peak_max = 0.05
>>> pcc_pro_a.elution_peak_cut_end_c_rel_to_peak_max = 0.05
>>> # Regeneration step.
>>> pcc_pro_a.regeneration_cv = 1.5
>>> # Inlet flow rate profile.
>>> f_in = _np.ones_like(t) * 15 # mL/min
>>> c_in = _np.ones([1, t.size]) * 2.5 # mg/mL
>>> # Simulate ACC.
>>> f_out, c_out = pcc_pro_a.evaluate(f_in, c_in) # doctest: +ELLIPSIS
pro_a_pcc: Steady-state concentration is being estimated ...
pro_a_pcc: Steady-state concentration is being estimated ...
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
load_recycle_pdf: _core.PDF,
column_porosity_retentate: float,
peak_shape_pdf: _core.PDF,
gui_title: str = "PCC"):
super().__init__(t, uo_id, load_bt, peak_shape_pdf, gui_title)
self.load_recycle = True
"""Recycle load breakthrough. Default = `True`."""
self.wash_recycle = False
"""Recycle wash. Default = False."""
self.column_porosity_retentate = column_porosity_retentate
"""Column porosity for binding species.
See Also
--------
:class:`PCC`
Examples
--------
`column_porosity_retentate` is a mean residence time of the
product (protein) traveling through the column during
non-binding conditions (in CVs).
"""
self.load_recycle_pdf = load_recycle_pdf
"""PDF of wash and/or unbound load traveling through the column.
See Also
--------
:class:`PCC`
"""
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Desorbed material during wash step is not supported by PCC.
Raises
------
NotImplementedError
Raises exception when function if called.
"""
raise NotImplementedError("Function not implemented in this class.")
class PCCWithWashDesorption(PCC):
"""Alternating column chromatography with recycling of load.
Alternating load-bind-elution twin-column chromatography with
optional recycling of overloaded or washed out material.
The material desorption during wash step is defined by exponential
half life time
* :attr:`wash_desorption_tail_half_time_cv`
and the amount of desorbable material which is defined by
* :attr:`wash_desorption_desorbable_material_share` or
* :attr:`wash_desorption_desorbable_above_dbc`.
PCC uses :attr:`load_bt` to determine what parts of the load (and
recycled material) bind to the column.
The unbound (not captured) part and desorbed part are propagated
through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
load_recycle_pdf
Propagation of load breakthrough and/or washed out material
through the column.
column_porosity_retentate
Porosity of the column for binding species (protein).
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "PCCWithWashDesorption".
Notes
-----
During wash step, weaker binding isoforms might be desorbed and
recycled. In turn they are again desorbed and recycled during next
cycle and so on; resulting in increasing amount of desorbed material
during wash step (even in steady-state). This is not considered by
this class. Furthermore, it is not a favorable case in terms of RTD
as the weakly bound material propagates from column to column for
many cycles.
For list of additional attributes refer to
:class:`PCC` and :class:`AlternatingChromatography`.
See Also
--------
:class:`PCC`
:class:`AlternatingChromatography`
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
load_recycle_pdf: _core.PDF,
column_porosity_retentate: float,
peak_shape_pdf: _core.PDF,
gui_title: str = "PCCWithWashDesorption"):
super().__init__(t, uo_id, load_bt, load_recycle_pdf,
column_porosity_retentate, peak_shape_pdf, gui_title)
self.load_recycle = True
"""Recycle load breakthrough. Default = `True`."""
self.wash_recycle = True
"""Recycle wash. Default = `True`."""
self.wash_desorption = True
"""Simulate desorption during wash step. Default = `True`."""
self.wash_desorption_tail_half_time_cv = -1
"""Wash desorption rate.
Required if :attr:`wash_desorption` is `True`.
Wash desorption is simulated as exponential decay with half-life
:attr:`wash_desorption_tail_half_time_cv`.
"""
self.wash_desorption_desorbable_material_share = -1
"""Share of material that can be desorbed during wash step.
Wash desorption is simulated as exponential decay. Only part of
adsorbed material is subjected to that exponential decay. That
part can be defined by:
* :attr:`wash_desorption_desorbable_material_share` (this one)
or
* :attr:`wash_desorption_desorbable_above_dbc`.
"""
self.wash_desorption_desorbable_above_dbc = -1
"""Share of material that can be desorbed during wash step.
Share is defined as a share of material loaded onto the column
that exceeds specified `wash_desorption_desorbable_above_dbc`
binding capacity.
Wash desorption is simulated as exponential decay. Only part of
adsorbed material is subjected to that exponential decay. That
part can be defined by:
* :attr:`wash_desorption_desorbable_material_share` (this one)
or
* :attr:`wash_desorption_desorbable_above_dbc`.
"""
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Get conc profile of desorbed material during wash step.
`self.wash_desorption_tail_half_time_cv` needs to be defined.
One of `self.wash_desorption_desorbable_material_share` and
`self.wash_desorption_desorbable_above_dbc` needs to be defined.
Parameters
----------
f_load
Flow rate profile during 'effective load' step.
The step includes wash recycle, load recycle and load step
as a column sees it in a single cycle.
c_bound
Conc profile of captured material.
Returns
-------
ndarray
Conc profile of desorbed material during wash step.
"""
assert self.wash_desorption_tail_half_time_cv > 0
assert self._load_f > 0
assert self._wash_f > 0
assert self._wash_t > 0
assert self._cv > 0
assert self.wash_desorption_desorbable_material_share > 0 \
or self.wash_desorption_desorbable_above_dbc > 0
assert f_load.size == c_bound.shape[1]
assert c_bound.shape[0] \
== self._n_species - len(self.non_binding_species)
m_bound = (c_bound * f_load[_np.newaxis, :]).sum(1)[:, _np.newaxis] \
* self._dt
# Calc share of desorbable material.
k = -1
if self.wash_desorption_desorbable_material_share > 0:
k = self.wash_desorption_desorbable_material_share
if self.wash_desorption_desorbable_above_dbc > 0:
if k > 0:
self.log.w(
f"Share of desorbable material defined twice!!"
f" Using `load_recycle_wash_desorbable_material_share`")
else:
k = max(0,
1 - self.wash_desorption_desorbable_above_dbc
* self._cv / m_bound.sum())
assert 1 >= k >= 0, f"Share of desorbable material {k}" \
f" must be >= 0 and <= 1."
i_wash_duration = int(round(self._wash_t / self._dt))
# Generate exponential tail.
exp_pdf = _pdf.TanksInSeries(self._t[:i_wash_duration],
n_tanks=1,
pdf_id=f"wash_desorption_exp_drop")
exp_pdf.allow_open_end = True
exp_pdf.trim_and_normalize = False
tau = self.wash_desorption_tail_half_time_cv \
* self._cv / self._wash_f / _np.log(2)
exp_pdf.update_pdf(rt_mean=tau)
p = exp_pdf.get_p()[_np.newaxis, :i_wash_duration]
# Scale desorbed material conc due to differences in flow rate.
c_desorbed = m_bound * k * p / self._wash_f
# Pad with zeros if needed.
c_desorbed = _np.pad(c_desorbed,
((0, 0),
(0, i_wash_duration - c_desorbed.shape[1])),
mode="constant")
# Log.
self.log.d_data(self._cycle_tree if hasattr(self, "_cycle_tree")
else self._log_tree,
"p_desorbed",
p)
return c_desorbed
|
[
"numpy.ones_like",
"bio_rtd.utils.vectors.true_start",
"bio_rtd.utils.convolution.time_conv",
"numpy.ones",
"bio_rtd.utils.vectors.true_end",
"numpy.log",
"numpy.zeros_like",
"bio_rtd.utils.vectors.true_start_and_end",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.pad",
"numpy.zeros",
"numpy.rint",
"numpy.concatenate",
"numpy.cumsum",
"numpy.all",
"bio_rtd.pdf.TanksInSeries",
"numpy.arange"
] |
[((16197, 16210), 'numpy.array', '_np.array', (['[]'], {}), '([])\n', (16206, 16210), True, 'import numpy as _np\n'), ((54794, 54840), 'bio_rtd.utils.vectors.true_start_and_end', '_utils.vectors.true_start_and_end', (['(self._f > 0)'], {}), '(self._f > 0)\n', (54827, 54840), True, 'import bio_rtd.utils as _utils\n'), ((55563, 55669), 'numpy.arange', '_np.arange', (['(self._t[flow_i_start] + load_extend_first_cycle_t)', 'self._t[flow_i_end - 1]', 'self._cycle_t'], {}), '(self._t[flow_i_start] + load_extend_first_cycle_t, self._t[\n flow_i_end - 1], self._cycle_t)\n', (55573, 55669), True, 'import numpy as _np\n'), ((62054, 62126), 'numpy.arange', '_np.arange', (['dv', '((v_wash[-1] if v_wash.size > 0 else v_load[-1]) + dv)', 'dv'], {}), '(dv, (v_wash[-1] if v_wash.size > 0 else v_load[-1]) + dv, dv)\n', (62064, 62126), True, 'import numpy as _np\n'), ((62581, 62659), 'bio_rtd.utils.convolution.time_conv', '_utils.convolution.time_conv', (['self._dt', 'c_v_combined', 'self._p_load_recycle_pdf'], {}), '(self._dt, c_v_combined, self._p_load_recycle_pdf)\n', (62609, 62659), True, 'import bio_rtd.utils as _utils\n'), ((65786, 65826), 'numpy.all', '_np.all', (['(elution_buffer_composition >= 0)'], {}), '(elution_buffer_composition >= 0)\n', (65793, 65826), True, 'import numpy as _np\n'), ((93789, 93886), 'bio_rtd.pdf.TanksInSeries', '_pdf.TanksInSeries', (['self._t[:i_wash_duration]'], {'n_tanks': '(1)', 'pdf_id': 'f"""wash_desorption_exp_drop"""'}), "(self._t[:i_wash_duration], n_tanks=1, pdf_id=\n f'wash_desorption_exp_drop')\n", (93807, 93886), True, 'import bio_rtd.pdf as _pdf\n'), ((94423, 94517), 'numpy.pad', '_np.pad', (['c_desorbed', '((0, 0), (0, i_wash_duration - c_desorbed.shape[1]))'], {'mode': '"""constant"""'}), "(c_desorbed, ((0, 0), (0, i_wash_duration - c_desorbed.shape[1])),\n mode='constant')\n", (94430, 94517), True, 'import numpy as _np\n'), ((62741, 62811), 'scipy.interpolate.interp1d', '_interp.interp1d', (['v', 'c_v_combined_propagated'], {'fill_value': '"""extrapolate"""'}), "(v, c_v_combined_propagated, fill_value='extrapolate')\n", (62757, 62811), True, 'import scipy.interpolate as _interp\n'), ((62858, 62899), 'numpy.concatenate', '_np.concatenate', (['(v_load, v_wash)'], {'axis': '(0)'}), '((v_load, v_wash), axis=0)\n', (62873, 62899), True, 'import numpy as _np\n'), ((64319, 64414), 'numpy.pad', '_np.pad', (['c_elution', '((0, 0), (0, i_elution_duration - c_elution.shape[1]))'], {'mode': '"""constant"""'}), "(c_elution, ((0, 0), (0, i_elution_duration - c_elution.shape[1])),\n mode='constant')\n", (64326, 64414), True, 'import numpy as _np\n'), ((65986, 66017), 'numpy.zeros', '_np.zeros', (['[self._n_species, 1]'], {}), '([self._n_species, 1])\n', (65995, 66017), True, 'import numpy as _np\n'), ((66262, 66299), 'numpy.ones_like', '_np.ones_like', (['self._t[:n_time_steps]'], {}), '(self._t[:n_time_steps])\n', (66275, 66299), True, 'import numpy as _np\n'), ((72260, 72298), 'bio_rtd.utils.vectors.true_end', '_utils.vectors.true_end', (['(f_in_load > 0)'], {}), '(f_in_load > 0)\n', (72283, 72298), True, 'import bio_rtd.utils as _utils\n'), ((78637, 78685), 'bio_rtd.utils.vectors.true_start_and_end', '_utils.vectors.true_start_and_end', (['b_out_elution'], {}), '(b_out_elution)\n', (78670, 78685), True, 'import bio_rtd.utils as _utils\n'), ((94132, 94142), 'numpy.log', '_np.log', (['(2)'], {}), '(2)\n', (94139, 94142), True, 'import numpy as _np\n'), ((40682, 40704), 'numpy.ones', '_np.ones', (['self._t.size'], {}), '(self._t.size)\n', (40690, 40704), True, 'import numpy as _np\n'), ((55800, 55839), 'numpy.rint', '_np.rint', (['(cycle_start_t_list / self._dt)'], {}), '(cycle_start_t_list / self._dt)\n', (55808, 55839), True, 'import numpy as _np\n'), ((62226, 62267), 'numpy.concatenate', '_np.concatenate', (['(v_load, v_wash)'], {'axis': '(0)'}), '((v_load, v_wash), axis=0)\n', (62241, 62267), True, 'import numpy as _np\n'), ((62281, 62334), 'numpy.concatenate', '_np.concatenate', (['(c_unbound, c_wash_desorbed)'], {'axis': '(1)'}), '((c_unbound, c_wash_desorbed), axis=1)\n', (62296, 62334), True, 'import numpy as _np\n'), ((31650, 31689), 'numpy.cumsum', '_np.cumsum', (['(elution_peak_pdf * self._dt)'], {}), '(elution_peak_pdf * self._dt)\n', (31660, 31689), True, 'import numpy as _np\n'), ((33637, 33676), 'numpy.cumsum', '_np.cumsum', (['(elution_peak_pdf * self._dt)'], {}), '(elution_peak_pdf * self._dt)\n', (33647, 33676), True, 'import numpy as _np\n'), ((47617, 47671), 'numpy.concatenate', '_np.concatenate', (['(rec_load, c_load_fist_cycle)'], {'axis': '(1)'}), '((rec_load, c_load_fist_cycle), axis=1)\n', (47632, 47671), True, 'import numpy as _np\n'), ((49538, 49574), 'numpy.ones', '_np.ones', (['c_load_fist_cycle.shape[1]'], {}), '(c_load_fist_cycle.shape[1])\n', (49546, 49574), True, 'import numpy as _np\n'), ((61882, 61925), 'numpy.arange', '_np.arange', (['(1)', '(c_wash_desorbed.shape[1] + 1)'], {}), '(1, c_wash_desorbed.shape[1] + 1)\n', (61892, 61925), True, 'import numpy as _np\n'), ((34333, 34395), 'bio_rtd.utils.vectors.true_end', '_utils.vectors.true_end', (['(self._t < self.elution_peak_cut_end_t)'], {}), '(self._t < self.elution_peak_cut_end_t)\n', (34356, 34395), True, 'import bio_rtd.utils as _utils\n'), ((47450, 47511), 'numpy.zeros_like', '_np.zeros_like', (['bt_load_out[:, i_prev_cycle:i_cycle_duration]'], {}), '(bt_load_out[:, i_prev_cycle:i_cycle_duration])\n', (47464, 47511), True, 'import numpy as _np\n'), ((47822, 47852), 'numpy.ones', '_np.ones', (['c_next_load.shape[1]'], {}), '(c_next_load.shape[1])\n', (47830, 47852), True, 'import numpy as _np\n'), ((52899, 52921), 'numpy.ones', '_np.ones', (['self._t.size'], {}), '(self._t.size)\n', (52907, 52921), True, 'import numpy as _np\n'), ((54397, 54438), 'bio_rtd.utils.vectors.true_start', '_utils.vectors.true_start', (['(m_ext_bt >= dm)'], {}), '(m_ext_bt >= dm)\n', (54422, 54438), True, 'import bio_rtd.utils as _utils\n')]
|
import os
import sys
import argparse
import copy
import numpy as np
import scipy.special
sys.path.append(os.getcwd())
def log_gaussian_pdf(theta, sigma=1, mu=0, ndim=None):
if ndim is None:
try:
ndim = len(theta)
except TypeError:
assert isinstance(theta, (float, int)), theta
ndim = 1
logl = -(np.sum((theta - mu) ** 2) / (2 * sigma ** 2))
logl -= np.log(2 * np.pi * (sigma ** 2)) * ndim / 2.0
return logl
class Gaussian(object):
def __init__(self, sigma=1.0, nderived=0):
self.sigma = sigma
self.nderived = nderived
def __call__(self, theta):
logl = log_gaussian_pdf(theta, sigma=self.sigma, mu=0)
return logl, [0.0] * self.nderived
class GaussianMix(object):
def __init__(self, sep=4, weights=(0.4, 0.3, 0.2, 0.1), sigma=1,
nderived=0):
assert len(weights) in [2, 3, 4], (
'Weights must have 2, 3 or 4 components. Weights=' + str(weights))
assert np.isclose(sum(weights), 1), (
'Weights must sum to 1! Weights=' + str(weights))
self.nderived = nderived
self.weights = weights
self.sigmas = [sigma] * len(weights)
positions = []
positions.append(np.asarray([0, sep]))
positions.append(np.asarray([0, -sep]))
positions.append(np.asarray([sep, 0]))
positions.append(np.asarray([-sep, 0]))
self.positions = positions[:len(weights)]
def __call__(self, theta):
thetas = []
for pos in self.positions:
thetas.append(copy.deepcopy(theta))
thetas[-1][:2] -= pos
logls = [(Gaussian(sigma=self.sigmas[i])(thetas[i])[0]
+ np.log(self.weights[i])) for i in range(len(self.weights))]
logl = scipy.special.logsumexp(logls)
return logl, [0.0] * self.nderived
def main(args):
from nnest import NestedSampler
g = GaussianMix()
def loglike(z):
return np.array([g(x)[0] for x in z])
def transform(x):
return 10. * x
volume_switch = 1.0 / (5 * args.num_slow)
sampler = NestedSampler(args.x_dim, loglike, transform=transform, log_dir=args.log_dir, num_live_points=args.num_live_points,
hidden_dim=args.hidden_dim, num_layers=args.num_layers, num_blocks=args.num_blocks, num_slow=args.num_slow,
use_gpu=args.use_gpu)
sampler.run(train_iters=args.train_iters, mcmc_steps=args.mcmc_steps, volume_switch=volume_switch, noise=args.noise)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=5,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=2000,
help="number of train iters")
parser.add_argument("--mcmc_steps", type=int, default=0)
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--switch', type=float, default=-1)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=2)
parser.add_argument('--log_dir', type=str, default='logs/mog4_fast')
args = parser.parse_args()
main(args)
|
[
"argparse.ArgumentParser",
"numpy.log",
"numpy.asarray",
"nnest.NestedSampler",
"os.getcwd",
"numpy.sum",
"copy.deepcopy"
] |
[((107, 118), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (116, 118), False, 'import os\n'), ((2136, 2395), 'nnest.NestedSampler', 'NestedSampler', (['args.x_dim', 'loglike'], {'transform': 'transform', 'log_dir': 'args.log_dir', 'num_live_points': 'args.num_live_points', 'hidden_dim': 'args.hidden_dim', 'num_layers': 'args.num_layers', 'num_blocks': 'args.num_blocks', 'num_slow': 'args.num_slow', 'use_gpu': 'args.use_gpu'}), '(args.x_dim, loglike, transform=transform, log_dir=args.\n log_dir, num_live_points=args.num_live_points, hidden_dim=args.\n hidden_dim, num_layers=args.num_layers, num_blocks=args.num_blocks,\n num_slow=args.num_slow, use_gpu=args.use_gpu)\n', (2149, 2395), False, 'from nnest import NestedSampler\n'), ((2602, 2627), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2625, 2627), False, 'import argparse\n'), ((359, 384), 'numpy.sum', 'np.sum', (['((theta - mu) ** 2)'], {}), '((theta - mu) ** 2)\n', (365, 384), True, 'import numpy as np\n'), ((417, 447), 'numpy.log', 'np.log', (['(2 * np.pi * sigma ** 2)'], {}), '(2 * np.pi * sigma ** 2)\n', (423, 447), True, 'import numpy as np\n'), ((1268, 1288), 'numpy.asarray', 'np.asarray', (['[0, sep]'], {}), '([0, sep])\n', (1278, 1288), True, 'import numpy as np\n'), ((1315, 1336), 'numpy.asarray', 'np.asarray', (['[0, -sep]'], {}), '([0, -sep])\n', (1325, 1336), True, 'import numpy as np\n'), ((1363, 1383), 'numpy.asarray', 'np.asarray', (['[sep, 0]'], {}), '([sep, 0])\n', (1373, 1383), True, 'import numpy as np\n'), ((1410, 1431), 'numpy.asarray', 'np.asarray', (['[-sep, 0]'], {}), '([-sep, 0])\n', (1420, 1431), True, 'import numpy as np\n'), ((1596, 1616), 'copy.deepcopy', 'copy.deepcopy', (['theta'], {}), '(theta)\n', (1609, 1616), False, 'import copy\n'), ((1735, 1758), 'numpy.log', 'np.log', (['self.weights[i]'], {}), '(self.weights[i])\n', (1741, 1758), True, 'import numpy as np\n')]
|
from typing import Dict, Tuple
import numpy as np
def einsum(expr: str, *args: Tuple[np.ndarray, ...], **kwargs) -> np.ndarray:
(a, b) = map(str.strip, expr.split("->"))
a_ = list(
map(lambda s: list(map(str.strip, s.split(","))), map(str.strip, a.split(";")))
)
b_ = list(map(str.strip, b.split(",")))
chars = "abcdefghijklmnopqrstuvwxyz"
char_map: Dict[str, str] = {}
i = 0
for cs in a_:
for c in cs:
if c not in char_map:
char_map[c] = chars[i]
i += 1
for c in b_:
if c not in char_map:
char_map[c] = chars[i]
i += 1
expr_ = "->".join(
[
",".join(map(lambda ss: "".join(map(lambda s: char_map[s], ss)), a_)),
"".join(map(lambda s: char_map[s], b_)),
]
)
return np.einsum(expr_, *args, **kwargs)
|
[
"numpy.einsum"
] |
[((846, 879), 'numpy.einsum', 'np.einsum', (['expr_', '*args'], {}), '(expr_, *args, **kwargs)\n', (855, 879), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
class _LinearModel(object):
def __init__(self):
self.w = None
def fit(self, x, y):
pass
def predict(self, x):
return np.dot(x, self.w)
def cost(self, x, y):
pass
def precision(self, x, y):
p = self.predict(x)
return (1.0 / len(p)) * np.sum(p == y)
class LeastSquareRegression(_LinearModel):
def __init__(self):
super(LeastSquareRegression, self).__init__()
def fit(self, x, y):
xt = x.transpose()
self.w = np.linalg.pinv(np.dot(xt, x)).dot(xt).dot(y)
def cost(self, x, y):
""" Residual Sum of Squares """
r = y - np.dot(x, self.w)
rt= np.transpose(r)
return (1.0 / len(x)) * np.trace(np.dot(rt, r))
class RidgeRegression(LeastSquareRegression):
def __init__(self, incr=0.1, min_change=0.001):
super(RidgeRegression, self).__init__()
self.incr = incr
self.min_change = min_change
def fit(self, x, y):
xtrain, xval = np.split(x, [int(0.7*len(x))])
ytrain, yval = np.split(y, [int(0.7*len(y))])
alpha = 0.0
best_alpha = 0.0
best_cost = float("inf")
old_cost = float("inf")
new_cost = float("inf")
while True:
self._fit(xtrain, ytrain, alpha)
new_cost = self.cost(xval, yval)
if new_cost < best_cost:
best_cost = new_cost
best_alpha = alpha
#print("cost: %f, alpha: %f" % (best_cost, best_alpha))
if abs(new_cost - old_cost) < self.min_change:
break
old_cost = new_cost
alpha += self.incr
self._fit(xtrain, ytrain, best_alpha)
def _fit(self, x, y, alpha):
x = x[:,1:]
xt = np.transpose(x)
self.w = np.linalg.pinv(np.dot(xt, x) + alpha * np.eye(x.shape[1])).dot(xt).dot(y)
bias = np.mean(y, axis=0, keepdims=True) - np.dot(np.mean(x, axis=0, keepdims=True), self.w)
self.w = np.vstack([bias, self.w])
class LeastSquareClassification(LeastSquareRegression):
def __init__(self):
super(LeastSquareClassification, self).__init__()
def predict(self, x):
return super(LeastSquareClassification, self).predict(x).argmax(axis=1)
class RidgeClassification(RidgeRegression):
def __init__(self, incr=0.1, min_change=0.001):
super(RidgeClassification, self).__init__(incr, min_change)
def predict(self, x):
return super(RidgeClassification, self).predict(x).argmax(axis=1)
class LDAClassification(_LinearModel):
def __init__(self):
self.w = None
self.priors = None
self.means = []
self.covs = []
def fit(self, x, y):
k = y.shape[1]
y_arg = np.argmax(y, axis=1)
class_count = np.sum (y, axis=0, keepdims=True)
self.priors = (1.0 / len(y)) * np.sum (y, axis=0, keepdims=True)
self.w = self._lda(x, y)
x_proj = np.dot(x, self.w)
means = (1.0 / class_count.T) * np.dot(y.T, x_proj)
for i in xrange(k):
xk_proj = x_proj[y_arg==i]
self.means.append(np.mean(xk_proj, axis = 0))
self.covs .append(np.cov (xk_proj, rowvar=False))
def predict(self, x):
k = self.w.shape[1]
x_proj = np.dot(x, self.w)
likelihood = np.column_stack([multivariate_normal.pdf(x_proj, self.means[i], self.covs[i]) for i in xrange(k)])
posterior = (likelihood * self.priors)
posterior = posterior / np.sum(posterior, axis=1, keepdims=True)
return np.argmax(posterior, axis=1)
def _lda(self, x, y):
k = y.shape[1]
y_arg = np.argmax(y, axis=1)
class_count= np.sum (y, axis=0, keepdims=True)
total_mean = np.mean(x, axis=0, keepdims=True)
class_mean = (1.0 / class_count.T) * np.dot(y.T, x)
mk_m = class_mean - total_mean
b_cov = np.dot(class_count * mk_m.T, mk_m)
w_cov = np.zeros(b_cov.shape)
for i in xrange(k):
xk = x[y_arg == i]
xk_mk = xk - class_mean[i]
w_cov += np.dot(xk_mk.T, xk_mk)
eig_vals, eig_vecs = np.linalg.eig(np.dot(np.linalg.pinv(w_cov), b_cov))
eig_vals = np.abs(eig_vals)
eig_args = np.argsort(eig_vals)[::-1][:k]
return eig_vecs[:, eig_args]
|
[
"numpy.mean",
"numpy.abs",
"numpy.eye",
"numpy.linalg.pinv",
"scipy.stats.multivariate_normal.pdf",
"numpy.argmax",
"numpy.argsort",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.vstack",
"numpy.cov",
"numpy.transpose"
] |
[((277, 294), 'numpy.dot', 'np.dot', (['x', 'self.w'], {}), '(x, self.w)\n', (283, 294), True, 'import numpy as np\n'), ((798, 813), 'numpy.transpose', 'np.transpose', (['r'], {}), '(r)\n', (810, 813), True, 'import numpy as np\n'), ((1976, 1991), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (1988, 1991), True, 'import numpy as np\n'), ((2209, 2234), 'numpy.vstack', 'np.vstack', (['[bias, self.w]'], {}), '([bias, self.w])\n', (2218, 2234), True, 'import numpy as np\n'), ((3000, 3020), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3009, 3020), True, 'import numpy as np\n'), ((3043, 3075), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)', 'keepdims': '(True)'}), '(y, axis=0, keepdims=True)\n', (3049, 3075), True, 'import numpy as np\n'), ((3207, 3224), 'numpy.dot', 'np.dot', (['x', 'self.w'], {}), '(x, self.w)\n', (3213, 3224), True, 'import numpy as np\n'), ((3554, 3571), 'numpy.dot', 'np.dot', (['x', 'self.w'], {}), '(x, self.w)\n', (3560, 3571), True, 'import numpy as np\n'), ((3831, 3859), 'numpy.argmax', 'np.argmax', (['posterior'], {'axis': '(1)'}), '(posterior, axis=1)\n', (3840, 3859), True, 'import numpy as np\n'), ((3931, 3951), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3940, 3951), True, 'import numpy as np\n'), ((3974, 4006), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)', 'keepdims': '(True)'}), '(y, axis=0, keepdims=True)\n', (3980, 4006), True, 'import numpy as np\n'), ((4029, 4062), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (4036, 4062), True, 'import numpy as np\n'), ((4188, 4222), 'numpy.dot', 'np.dot', (['(class_count * mk_m.T)', 'mk_m'], {}), '(class_count * mk_m.T, mk_m)\n', (4194, 4222), True, 'import numpy as np\n'), ((4248, 4269), 'numpy.zeros', 'np.zeros', (['b_cov.shape'], {}), '(b_cov.shape)\n', (4256, 4269), True, 'import numpy as np\n'), ((4516, 4532), 'numpy.abs', 'np.abs', (['eig_vals'], {}), '(eig_vals)\n', (4522, 4532), True, 'import numpy as np\n'), ((431, 445), 'numpy.sum', 'np.sum', (['(p == y)'], {}), '(p == y)\n', (437, 445), True, 'import numpy as np\n'), ((768, 785), 'numpy.dot', 'np.dot', (['x', 'self.w'], {}), '(x, self.w)\n', (774, 785), True, 'import numpy as np\n'), ((2101, 2134), 'numpy.mean', 'np.mean', (['y'], {'axis': '(0)', 'keepdims': '(True)'}), '(y, axis=0, keepdims=True)\n', (2108, 2134), True, 'import numpy as np\n'), ((3117, 3149), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)', 'keepdims': '(True)'}), '(y, axis=0, keepdims=True)\n', (3123, 3149), True, 'import numpy as np\n'), ((3266, 3285), 'numpy.dot', 'np.dot', (['y.T', 'x_proj'], {}), '(y.T, x_proj)\n', (3272, 3285), True, 'import numpy as np\n'), ((3774, 3814), 'numpy.sum', 'np.sum', (['posterior'], {'axis': '(1)', 'keepdims': '(True)'}), '(posterior, axis=1, keepdims=True)\n', (3780, 3814), True, 'import numpy as np\n'), ((4108, 4122), 'numpy.dot', 'np.dot', (['y.T', 'x'], {}), '(y.T, x)\n', (4114, 4122), True, 'import numpy as np\n'), ((4392, 4414), 'numpy.dot', 'np.dot', (['xk_mk.T', 'xk_mk'], {}), '(xk_mk.T, xk_mk)\n', (4398, 4414), True, 'import numpy as np\n'), ((856, 869), 'numpy.dot', 'np.dot', (['rt', 'r'], {}), '(rt, r)\n', (862, 869), True, 'import numpy as np\n'), ((2144, 2177), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (2151, 2177), True, 'import numpy as np\n'), ((3383, 3407), 'numpy.mean', 'np.mean', (['xk_proj'], {'axis': '(0)'}), '(xk_proj, axis=0)\n', (3390, 3407), True, 'import numpy as np\n'), ((3445, 3474), 'numpy.cov', 'np.cov', (['xk_proj'], {'rowvar': '(False)'}), '(xk_proj, rowvar=False)\n', (3451, 3474), True, 'import numpy as np\n'), ((3611, 3671), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['x_proj', 'self.means[i]', 'self.covs[i]'], {}), '(x_proj, self.means[i], self.covs[i])\n', (3634, 3671), False, 'from scipy.stats import multivariate_normal\n'), ((4466, 4487), 'numpy.linalg.pinv', 'np.linalg.pinv', (['w_cov'], {}), '(w_cov)\n', (4480, 4487), True, 'import numpy as np\n'), ((4553, 4573), 'numpy.argsort', 'np.argsort', (['eig_vals'], {}), '(eig_vals)\n', (4563, 4573), True, 'import numpy as np\n'), ((655, 668), 'numpy.dot', 'np.dot', (['xt', 'x'], {}), '(xt, x)\n', (661, 668), True, 'import numpy as np\n'), ((2025, 2038), 'numpy.dot', 'np.dot', (['xt', 'x'], {}), '(xt, x)\n', (2031, 2038), True, 'import numpy as np\n'), ((2049, 2067), 'numpy.eye', 'np.eye', (['x.shape[1]'], {}), '(x.shape[1])\n', (2055, 2067), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from copy import *
from bisect import *
from scipy.optimize import curve_fit
from sklearn.metrics import *
from collections import defaultdict as defd
import datetime,pickle
from DemandHelper import *
import warnings
warnings.filterwarnings("ignore")
#################################################################
#################################################################
#################################################################
class DemandForecastModel:
def __init__(self,rank_model='',forecast='',rmodel_beta=1.0,final_beta=1.0):
if rank_model != '':
self.ingest(rank_model,forecast,rmodel_beta,final_beta)
def ingest(self,rank_model,forecast,rmodel_beta=1.0,final_beta=1.0):
self.rank_model = rank_model
self.rmodel_beta = rmodel_beta
self.forecast = forecast
self.final_beta = final_beta
self.alldates = sorted(forecast.index)
def predict(self,rank=10000,date='2018-07-04',buybox=100):
if 'str' not in str(type(date)): date = str(date)[:10]
pred1 = self.rank_model.predict([rank])[0]
pred2 = pred1*self.rmodel_beta
d = self.forecast.loc[date]
mid,lo,hi = d['yhat'],d['yhat_lower'],d['yhat_upper']
rdr_preds = np.array([lo,mid,hi])
pred3 = pred2*rdr_preds
pred4 = pred3*self.final_beta
pred5 = global2local(pred4,buybox)
return pred5
#################################################################
#################################################################
# Export a fitted model to text file:
# These filenames normally end in '.pkl'
def ExportModel(filename,model_object):
pickle.dump(model_object, open(filename, 'wb'))
print('Model Saved TO: '+filename)
# Import a fitted model from text file:
# These filenames normally end in '.pkl'
def ImportModel(filename):
model_object = pickle.load(open(filename, 'rb'))
print('Model Imported FROM: '+filename)
return model_object
def GetToday():
today = datetime.datetime.today()
return str(today)[:10]
#################################################################
#################################################################
#################################################################
short2long = {
'H&G' : 'Home & Garden',
'L&G' : 'Lawn & Garden',
'SPORTS' : 'Sports & Outdoors',
'HI' : 'Home Improvement',
'TOY' : 'Toys & Games',
'KIT' : 'Home & Kitchen',
}
long2short = {}
for short in sorted(short2long):
long2short[short2long[short]] = short
Shorts = sorted(short2long)
Longs = sorted(long2short)
def ConvertToShort(thing):
if thing in long2short: return long2short[thing]
return thing
Models2 = {}
for SH in Shorts:
fn = 'MODELS/'+SH+'/DFM2.pkl'
model = ImportModel(fn)
Models2[SH] = model
AllDates = sorted(set([str(a)[:10] for a in Models2['H&G'].alldates]))
#################################################################
#################################################################
# Returns a list of valid category names:
def GetCategories2():
return sorted(long2short)
# SPREETAIL DEMAND PREDICTION:
# cat : Category (String or List)
# rank : Sales Rank (Integer, 2-List, Long-List)
# date1 : First Date of Forecast ("2018-09-03")
# date2 : Final Date of Forecast OR # Days Forward ("2018-10-03" or 30)
# bb_ratio : BuyBox Percent (100.0)
# md_ratio : Marketplace Distribution Percent
def SpreetailPredict(cat,rank,date1='today',date2=30,bb_ratio=1.0,md_ratio=0.62):
if (not date1) or (str(date1).lower()=='today'): date1 = GetToday()
index1 = bisect_left(AllDates,date1)
if len(str(date2)) >10: date2 = str(date2)[:10]
if len(str(date2))==10: index2 = bisect_left(AllDates,date2)
else: index2 = index1+int(date2)
index_dif = abs(index2-index1)
index1 = min([index1,index2])
index2 = index1+index_dif
DateRange = AllDates[index1:index2+1]
LEN = len(DateRange)
#--------------------------------------
tdf = pd.DataFrame()
tdf['DATE'] = DateRange
#--------------------------------------
if 'list' in str(type(cat)):
cat = [ConvertToShort(a) for a in cat]
if len(cat)==LEN: tdf['CAT'] = cat
else: tdf['CAT'] = cat[0]
else: tdf['CAT'] = ConvertToShort(cat)
#--------------------------------------
if 'list' in str(type(rank)):
if len(rank)==LEN: tdf['RANK'] = rank
elif len(rank)==2:
r1,r2 = tuple(rank)
tdf['RANK'] = np.linspace(r1,r2,LEN)
else: tdf['RANK'] = rank[0]
else: tdf['RANK'] = rank
#--------------------------------------
md_ratio2 = max(0.3,min(md_ratio,0.99))
other_ratio = (1.0-md_ratio2)/md_ratio2
tdf['BBR'] = bb_ratio
tdf['MDR'] = md_ratio2
#--------------------------------------
M = tdf.values
results = []
for row in M:
d,c,r = tuple(row[:3])
pred_100 = Models2[c].predict(r,d,100.0)
pred_bbr = Models2[c].predict(r,d,100.0*bb_ratio)
results.append([pred_100,pred_bbr])
tdf['P_100'] = [r[0][1] for r in results]
tdf['P_100_HI'] = [r[0][2] for r in results]
tdf['P_100_LO'] = [r[0][0] for r in results]
tdf['P_BBR'] = [r[1][1] for r in results]
tdf['P_BBR_HI'] = [r[1][2] for r in results]
tdf['P_BBR_LO'] = [r[1][0] for r in results]
tdf['P_OTH'] = other_ratio * tdf['P_100']
tdf['P_OTH_HI'] = other_ratio * tdf['P_100_HI']
tdf['P_OTH_LO'] = other_ratio * tdf['P_100_LO']
tdf['P_TOT'] = tdf['P_BBR'] +tdf['P_OTH']
tdf['P_TOT_HI'] = tdf['P_BBR_HI']+tdf['P_OTH_HI']
tdf['P_TOT_LO'] = tdf['P_BBR_LO']+tdf['P_OTH_LO']
cols = list(tdf.columns)[5:]
for col in cols:
col2 = col+'_C'
tdf[col2] = np.cumsum(tdf[col])
Matrix = [list(tdf.columns)]
for row in tdf.values:
Matrix.append(list(row))
MainPred = list(tdf['P_TOT_C'])[-1]
return [MainPred,Matrix]
def SpreePred(cat,rank,date1='today',date2=30,bb_ratio=1.0,md_ratio=0.62):
result = SpreetailPredict(cat,rank,date1,date2,bb_ratio,md_ratio)
M = result[1]
cols,m = M[0],M[1:]
return pd.DataFrame(m,columns=cols)
#################################################################
#################################################################
# [END]
|
[
"pandas.DataFrame",
"numpy.array",
"numpy.linspace",
"datetime.datetime.today",
"numpy.cumsum",
"warnings.filterwarnings"
] |
[((265, 298), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (288, 298), False, 'import warnings\n'), ((2092, 2117), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (2115, 2117), False, 'import datetime, pickle\n'), ((4079, 4093), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4091, 4093), True, 'import pandas as pd\n'), ((6042, 6071), 'pandas.DataFrame', 'pd.DataFrame', (['m'], {'columns': 'cols'}), '(m, columns=cols)\n', (6054, 6071), True, 'import pandas as pd\n'), ((1329, 1352), 'numpy.array', 'np.array', (['[lo, mid, hi]'], {}), '([lo, mid, hi])\n', (1337, 1352), True, 'import numpy as np\n'), ((5686, 5705), 'numpy.cumsum', 'np.cumsum', (['tdf[col]'], {}), '(tdf[col])\n', (5695, 5705), True, 'import numpy as np\n'), ((4520, 4544), 'numpy.linspace', 'np.linspace', (['r1', 'r2', 'LEN'], {}), '(r1, r2, LEN)\n', (4531, 4544), True, 'import numpy as np\n')]
|
import numpy as np
import spikemetrics.metrics as metrics
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
from spikemetrics.utils import Epoch, printProgressBar
from collections import OrderedDict
from .parameter_dictionaries import get_recording_gui_params, get_feature_gui_params
def make_curator_gui_params(params):
keys = list(params.keys())
types = [type(params[key]) for key in keys]
values = [params[key] for key in keys]
gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "Mode to compute noise SNR ('mad' | 'std' - default 'mad')"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "Number of seconds to compute noise level from (default 10.0)"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Maximum number of spikes to compute templates from (default 1000)"},
{'name': keys[3], 'type': str(types[3].__name__), 'value': values[3], 'default': values[3], 'title': "Use 'mean' or 'median' to compute templates"},
{'name': keys[4], 'type': str(types[4].__name__), 'value': values[4], 'default': values[4], 'title': "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)"},
{'name': keys[5], 'type': 'int', 'value': values[5], 'default': values[5], 'title': "Random seed for reproducibility"},
{'name': keys[6], 'type': str(types[6].__name__), 'value': values[6], 'default': values[6], 'title': "If True, will be verbose in metric computation."},]
curator_gui_params = [{'name': 'threshold', 'type': 'float', 'title': "The threshold for the given metric."},
{'name': 'threshold_sign', 'type': 'str',
'title': "If 'less', will threshold any metric less than the given threshold. "
"If 'less_or_equal', will threshold any metric less than or equal to the given threshold. "
"If 'greater', will threshold any metric greater than the given threshold. "
"If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold."}]
gui_params = curator_gui_params + gui_params + get_recording_gui_params() + get_feature_gui_params()
return gui_params
class SNR(QualityMetric):
installed = True # check at class level if installed or not
installation_mesg = "" # err
params = OrderedDict([('snr_mode',"mad"), ('snr_noise_duration',10.0), ('max_spikes_per_unit_for_snr',1000),
('template_mode', "median"), ('max_channel_peak', "both"), ('seed',None), ('verbose',False)])
curator_name = "ThresholdSNR"
curator_gui_params = make_curator_gui_params(params)
def __init__(self, metric_data):
QualityMetric.__init__(self, metric_data, metric_name="snr")
if not metric_data.has_recording():
raise ValueError("MetricData object must have a recording")
def compute_metric(self, snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props,
recompute_info, seed, save_as_property):
snrs_epochs = []
for epoch in self._metric_data._epochs:
epoch_recording = self._metric_data._recording.get_epoch(epoch[0])
epoch_sorting = self._metric_data._sorting.get_epoch(epoch[0])
channel_noise_levels = _compute_channel_noise_levels(
recording=epoch_recording,
mode=snr_mode,
noise_duration=snr_noise_duration,
seed=seed,
)
templates = st.postprocessing.get_unit_templates(
epoch_recording,
epoch_sorting,
unit_ids=self._metric_data._unit_ids,
max_spikes_per_unit=max_spikes_per_unit_for_snr,
mode=template_mode,
save_wf_as_features=save_features_props,
recompute_waveforms=recompute_info,
save_as_property=save_features_props,
seed=seed,
)
max_channels = st.postprocessing.get_unit_max_channels(
epoch_recording,
epoch_sorting,
unit_ids=self._metric_data._unit_ids,
max_spikes_per_unit=max_spikes_per_unit_for_snr,
peak=max_channel_peak,
recompute_templates=recompute_info,
save_as_property=save_features_props,
mode=template_mode,
seed=seed,
)
snr_list = []
for i, unit_id in enumerate(self._metric_data._unit_ids):
if self._metric_data.verbose:
printProgressBar(i + 1, len(self._metric_data._unit_ids))
max_channel_idx = epoch_recording.get_channel_ids().index(
max_channels[i]
)
snr = _compute_template_SNR(
templates[i], channel_noise_levels, max_channel_idx
)
snr_list.append(snr)
snrs = np.asarray(snr_list)
snrs_epochs.append(snrs)
if save_as_property:
self.save_as_property(self._metric_data._sorting, snrs_epochs, self._metric_name)
return snrs_epochs
def threshold_metric(self, threshold, threshold_sign, snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props, recompute_info,
seed, save_as_property):
snrs_epochs = self.compute_metric(snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props,
recompute_info, seed, save_as_property)[0]
threshold_curator = ThresholdCurator(
sorting=self._metric_data._sorting, metrics_epoch=snrs_epochs
)
threshold_curator.threshold_sorting(
threshold=threshold, threshold_sign=threshold_sign
)
return threshold_curator
def _compute_template_SNR(template, channel_noise_levels, max_channel_idx):
"""
Computes SNR on the channel with largest amplitude
Parameters
----------
template: np.array
Template (n_elec, n_timepoints)
channel_noise_levels: list
Noise levels for the different channels
max_channel_idx: int
Index of channel with largest templaye
Returns
-------
snr: float
Signal-to-noise ratio for the template
"""
snr = (
np.max(np.abs(template[max_channel_idx]))
/ channel_noise_levels[max_channel_idx]
)
return snr
def _compute_channel_noise_levels(recording, mode, noise_duration, seed):
"""
Computes noise level channel-wise
Parameters
----------
recording: RecordingExtractor
The recording ectractor object
mode: str
'std' or 'mad' (default
noise_duration: float
Number of seconds to compute SNR from
Returns
-------
moise_levels: list
Noise levels for each channel
"""
M = recording.get_num_channels()
n_frames = int(noise_duration * recording.get_sampling_frequency())
if n_frames >= recording.get_num_frames():
start_frame = 0
end_frame = recording.get_num_frames()
else:
start_frame = np.random.RandomState(seed=seed).randint(
0, recording.get_num_frames() - n_frames
)
end_frame = start_frame + n_frames
X = recording.get_traces(start_frame=start_frame, end_frame=end_frame)
noise_levels = []
for ch in range(M):
if mode == "std":
noise_level = np.std(X[ch, :])
elif mode == "mad":
noise_level = np.median(np.abs(X[ch, :]) / 0.6745)
else:
raise Exception("'mode' can be 'std' or 'mad'")
noise_levels.append(noise_level)
return noise_levels
|
[
"numpy.abs",
"collections.OrderedDict",
"spiketoolkit.postprocessing.get_unit_templates",
"numpy.asarray",
"numpy.std",
"spiketoolkit.postprocessing.get_unit_max_channels",
"numpy.random.RandomState"
] |
[((2688, 2896), 'collections.OrderedDict', 'OrderedDict', (["[('snr_mode', 'mad'), ('snr_noise_duration', 10.0), (\n 'max_spikes_per_unit_for_snr', 1000), ('template_mode', 'median'), (\n 'max_channel_peak', 'both'), ('seed', None), ('verbose', False)]"], {}), "([('snr_mode', 'mad'), ('snr_noise_duration', 10.0), (\n 'max_spikes_per_unit_for_snr', 1000), ('template_mode', 'median'), (\n 'max_channel_peak', 'both'), ('seed', None), ('verbose', False)])\n", (2699, 2896), False, 'from collections import OrderedDict\n'), ((3939, 4258), 'spiketoolkit.postprocessing.get_unit_templates', 'st.postprocessing.get_unit_templates', (['epoch_recording', 'epoch_sorting'], {'unit_ids': 'self._metric_data._unit_ids', 'max_spikes_per_unit': 'max_spikes_per_unit_for_snr', 'mode': 'template_mode', 'save_wf_as_features': 'save_features_props', 'recompute_waveforms': 'recompute_info', 'save_as_property': 'save_features_props', 'seed': 'seed'}), '(epoch_recording, epoch_sorting,\n unit_ids=self._metric_data._unit_ids, max_spikes_per_unit=\n max_spikes_per_unit_for_snr, mode=template_mode, save_wf_as_features=\n save_features_props, recompute_waveforms=recompute_info,\n save_as_property=save_features_props, seed=seed)\n', (3975, 4258), True, 'import spiketoolkit as st\n'), ((4428, 4733), 'spiketoolkit.postprocessing.get_unit_max_channels', 'st.postprocessing.get_unit_max_channels', (['epoch_recording', 'epoch_sorting'], {'unit_ids': 'self._metric_data._unit_ids', 'max_spikes_per_unit': 'max_spikes_per_unit_for_snr', 'peak': 'max_channel_peak', 'recompute_templates': 'recompute_info', 'save_as_property': 'save_features_props', 'mode': 'template_mode', 'seed': 'seed'}), '(epoch_recording, epoch_sorting,\n unit_ids=self._metric_data._unit_ids, max_spikes_per_unit=\n max_spikes_per_unit_for_snr, peak=max_channel_peak, recompute_templates\n =recompute_info, save_as_property=save_features_props, mode=\n template_mode, seed=seed)\n', (4467, 4733), True, 'import spiketoolkit as st\n'), ((5414, 5434), 'numpy.asarray', 'np.asarray', (['snr_list'], {}), '(snr_list)\n', (5424, 5434), True, 'import numpy as np\n'), ((6959, 6992), 'numpy.abs', 'np.abs', (['template[max_channel_idx]'], {}), '(template[max_channel_idx])\n', (6965, 6992), True, 'import numpy as np\n'), ((8083, 8099), 'numpy.std', 'np.std', (['X[ch, :]'], {}), '(X[ch, :])\n', (8089, 8099), True, 'import numpy as np\n'), ((7761, 7793), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (7782, 7793), True, 'import numpy as np\n'), ((8164, 8180), 'numpy.abs', 'np.abs', (['X[ch, :]'], {}), '(X[ch, :])\n', (8170, 8180), True, 'import numpy as np\n')]
|
# -*- coding:utf-8 -*-
# author: Xinge
# @file: spconv_unet.py
# @time: 2020/06/22 15:01
import time
import numpy as np
import spconv
import torch
import torch.nn.functional as F
from torch import nn
def conv3x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, indice_key=indice_key)
def conv1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=stride,
padding=(0, 1, 1), bias=False, indice_key=indice_key)
def conv1x1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 1, 3), stride=stride,
padding=(0, 0, 1), bias=False, indice_key=indice_key)
def conv1x3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 1), stride=stride,
padding=(0, 1, 0), bias=False, indice_key=indice_key)
def conv3x1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 1), stride=stride,
padding=(1, 0, 0), bias=False, indice_key=indice_key)
def conv3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 3), stride=stride,
padding=(1, 0, 1), bias=False, indice_key=indice_key)
def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=1, bias=False, indice_key=indice_key)
class ResContextBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ResContextBlock, self).__init__()
self.conv1 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.LeakyReLU()
self.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.LeakyReLU()
self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
return resA
class ResBlock(nn.Module):
def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3, 3), stride=1,
pooling=True, drop_out=True, height_pooling=False, indice_key=None):
super(ResBlock, self).__init__()
self.pooling = pooling
self.drop_out = drop_out
self.conv1 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act1 = nn.LeakyReLU()
self.bn0 = nn.BatchNorm1d(out_filters)
self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act1_2 = nn.LeakyReLU()
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.conv2 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
# self.conv4 = conv3x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act4 = nn.LeakyReLU()
# self.bn4 = nn.BatchNorm1d(out_filters)
if pooling:
# self.dropout = nn.Dropout3d(p=dropout_rate)
if height_pooling:
# self.pool = spconv.SparseMaxPool3d(kernel_size=2, stride=2)
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=2,
padding=1, indice_key=indice_key, bias=False)
else:
# self.pool = spconv.SparseMaxPool3d(kernel_size=(2,2,1), stride=(2, 2, 1))
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=(2,2,1),
padding=1, indice_key=indice_key, bias=False)
# else:
# self.dropout = nn.Dropout3d(p=dropout_rate)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
# resA = self.conv4(resA)
# resA.features = self.act4(resA.features)
# resA.features = self.bn4(resA.features)
if self.pooling:
# if self.drop_out:
# resB = self.dropout(resA.features)
# else:
# resB = resA
resB = self.pool(resA)
return resB, resA
else:
# if self.drop_out:
# resB = self.dropout(resA)
# else:
# resB = resA
return resA
class UpBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), indice_key=None, up_key=None):
super(UpBlock, self).__init__()
# self.drop_out = drop_out
#self.trans = nn.ConvTranspose2d(in_filters, out_filters, kernel_size, stride=(2, 2), padding=1)
self.trans_dilao = conv3x3(in_filters, out_filters, indice_key=indice_key+"new_up")
self.trans_act = nn.LeakyReLU()
self.trans_bn = nn.BatchNorm1d(out_filters)
# self.dropout1 = nn.Dropout3d(p=dropout_rate)
# self.dropout2 = nn.Dropout3d(p=dropout_rate)
self.conv1 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act1 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv2 = conv3x1(out_filters, out_filters, indice_key=indice_key)
self.act2 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x3(out_filters, out_filters, indice_key=indice_key)
self.act3 = nn.LeakyReLU()
self.bn3 = nn.BatchNorm1d(out_filters)
# self.dropout3 = nn.Dropout3d(p=dropout_rate)
self.up_subm = spconv.SparseInverseConv3d(out_filters, out_filters, kernel_size=3, indice_key=up_key, bias=False)
def forward(self, x, skip):
upA = self.trans_dilao(x)
#if upA.shape != skip.shape:
# upA = F.pad(upA, (0, 1, 0, 1), mode='replicate')
upA.features = self.trans_act(upA.features)
upA.features = self.trans_bn(upA.features)
## upsample
upA = self.up_subm(upA)
# upA = F.interpolate(upA, size=skip.size()[2:], mode='trilinear', align_corners=True)
# if self.drop_out:
# upA = self.dropout1(upA)
upA.features = upA.features + skip.features
# if self.drop_out:
# upB = self.dropout2(upB)
upE = self.conv1(upA)
upE.features = self.act1(upE.features)
upE.features = self.bn1(upE.features)
upE = self.conv2(upE)
upE.features = self.act2(upE.features)
upE.features = self.bn2(upE.features)
upE = self.conv3(upE)
upE.features = self.act3(upE.features)
upE.features = self.bn3(upE.features)
# if self.drop_out:
# upE = self.dropout3(upE)
return upE
class ReconBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ReconBlock, self).__init__()
self.conv1 = conv3x1x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.Sigmoid()
self.conv1_2 = conv1x3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.Sigmoid()
self.conv1_3 = conv1x1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_3 = nn.BatchNorm1d(out_filters)
self.act1_3 = nn.Sigmoid()
# self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
# self.act2 = nn.LeakyReLU()
# self.bn1 = nn.BatchNorm1d(out_filters)
#
# self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act3 = nn.LeakyReLU()
# self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.bn0(shortcut.features)
shortcut.features = self.act1(shortcut.features)
shortcut2 = self.conv1_2(x)
shortcut2.features = self.bn0_2(shortcut2.features)
shortcut2.features = self.act1_2(shortcut2.features)
shortcut3 = self.conv1_3(x)
shortcut3.features = self.bn0_3(shortcut3.features)
shortcut3.features = self.act1_3(shortcut3.features)
# resA = self.conv2(x)
# resA.features = self.act2(resA.features)
# resA.features = self.bn1(resA.features)
#
# resA = self.conv3(resA)
# resA.features = self.act3(resA.features)
# resA.features = self.bn2(resA.features)
shortcut.features = shortcut.features + shortcut2.features + shortcut3.features
shortcut.features = shortcut.features * x.features
return shortcut
class Spconv_salsaNet_res_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_salsaNet_res_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1) # size 4 * init_size --> OK with the size of the semantic and instance heads
return up0e, up0e
class Spconv_sem_logits_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_sem_logits_head_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, fea):
logits = self.logits(fea)
return logits.dense()
class Spconv_ins_offset_concatxyz_threelayers_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_ins_offset_concatxyz_threelayers_head_cfg, self).__init__()
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.pt_fea_dim = 4 * init_size
self.embedding_dim = cfg.MODEL.INS_HEAD.EMBEDDING_CHANNEL
self.conv1 = conv3x3(self.pt_fea_dim, self.pt_fea_dim, indice_key='offset_head_conv1')
self.bn1 = nn.BatchNorm1d(self.pt_fea_dim)
self.act1 = nn.LeakyReLU()
self.conv2 = conv3x3(self.pt_fea_dim, 2 * init_size, indice_key='offset_head_conv2')
self.bn2 = nn.BatchNorm1d(2 * init_size)
self.act2 = nn.LeakyReLU()
self.conv3 = conv3x3(2 * init_size, init_size, indice_key='offset_head_conv3')
self.bn3 = nn.BatchNorm1d(init_size)
self.act3 = nn.LeakyReLU()
self.offset = nn.Sequential(
nn.Linear(init_size+3, init_size, bias=True),
nn.BatchNorm1d(init_size),
nn.ReLU()
)
self.offset_linear = nn.Linear(init_size, self.embedding_dim, bias=True)
def forward(self, fea, batch):
fea = self.conv1(fea)
fea.features = self.act1(self.bn1(fea.features))
fea = self.conv2(fea)
fea.features = self.act2(self.bn2(fea.features))
fea = self.conv3(fea)
fea.features = self.act3(self.bn3(fea.features))
grid_ind = batch['grid']
xyz = batch['pt_cart_xyz']
fea = fea.dense()
fea = fea.permute(0, 2, 3, 4, 1)
pt_ins_fea_list = []
for batch_i, grid_ind_i in enumerate(grid_ind):
pt_ins_fea_list.append(fea[batch_i, grid_ind[batch_i][:,0], grid_ind[batch_i][:,1], grid_ind[batch_i][:,2]])
pt_pred_offsets_list = []
for batch_i, pt_ins_fea in enumerate(pt_ins_fea_list):
pt_pred_offsets_list.append(self.offset_linear(self.offset(torch.cat([pt_ins_fea,torch.from_numpy(xyz[batch_i]).cuda()],dim=1))))
return pt_pred_offsets_list, pt_ins_fea_list
class Spconv_alsaNet_res(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
nclasses = 20, n_height = 32, strict=False, init_size=16):
super(Spconv_alsaNet_res, self).__init__()
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
import pdb
pdb.set_trace()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1)
# up2e = self.upBlock3(up3e, down2b)
# up1e = self.upBlock4(up2e, down1b)
# up0e = self.upBlock5(up1e, down0b)
# up0e_gap = nn.AdaptiveAvgPool3d((1))(up0e)
# up0e_gap = F.interpolate(up0e_gap, size=(up0e.size()[2:]), mode='trilinear', align_corners=True)
# up0e = torch.cat((up0e, up0e_gap), dim=1)
logits = self.logits(up0e)
y = logits.dense()
# y = logits.permute(0, 1, 3, 4, 2)
return y
|
[
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.LeakyReLU",
"spconv.SparseInverseConv3d",
"torch.from_numpy",
"spconv.SubMConv3d",
"torch.nn.BatchNorm1d",
"numpy.array",
"spconv.SparseConvTensor",
"spconv.SparseConv3d",
"torch.nn.Linear",
"pdb.set_trace",
"torch.cat"
] |
[((276, 396), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False, indice_key=indice_key)\n', (293, 396), False, 'import spconv\n'), ((490, 627), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(1, 3, 3)', 'stride': 'stride', 'padding': '(0, 1, 1)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=(1, 3, 3), stride=\n stride, padding=(0, 1, 1), bias=False, indice_key=indice_key)\n', (507, 627), False, 'import spconv\n'), ((721, 858), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(1, 1, 3)', 'stride': 'stride', 'padding': '(0, 0, 1)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=(1, 1, 3), stride=\n stride, padding=(0, 0, 1), bias=False, indice_key=indice_key)\n', (738, 858), False, 'import spconv\n'), ((952, 1089), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(1, 3, 1)', 'stride': 'stride', 'padding': '(0, 1, 0)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=(1, 3, 1), stride=\n stride, padding=(0, 1, 0), bias=False, indice_key=indice_key)\n', (969, 1089), False, 'import spconv\n'), ((1183, 1320), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(3, 1, 1)', 'stride': 'stride', 'padding': '(1, 0, 0)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=(3, 1, 1), stride=\n stride, padding=(1, 0, 0), bias=False, indice_key=indice_key)\n', (1200, 1320), False, 'import spconv\n'), ((1413, 1550), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(3, 1, 3)', 'stride': 'stride', 'padding': '(1, 0, 1)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=(3, 1, 3), stride=\n stride, padding=(1, 0, 1), bias=False, indice_key=indice_key)\n', (1430, 1550), False, 'import spconv\n'), ((1642, 1762), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)', 'indice_key': 'indice_key'}), '(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=1, bias=False, indice_key=indice_key)\n', (1659, 1762), False, 'import spconv\n'), ((2066, 2093), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (2080, 2093), False, 'from torch import nn\n'), ((2114, 2128), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2126, 2128), False, 'from torch import nn\n'), ((2237, 2264), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (2251, 2264), False, 'from torch import nn\n'), ((2287, 2301), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2299, 2301), False, 'from torch import nn\n'), ((2406, 2420), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2418, 2420), False, 'from torch import nn\n'), ((2440, 2467), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (2454, 2467), False, 'from torch import nn\n'), ((2573, 2587), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2585, 2587), False, 'from torch import nn\n'), ((2607, 2634), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (2621, 2634), False, 'from torch import nn\n'), ((3722, 3736), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3734, 3736), False, 'from torch import nn\n'), ((3756, 3783), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (3770, 3783), False, 'from torch import nn\n'), ((3893, 3907), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3905, 3907), False, 'from torch import nn\n'), ((3929, 3956), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (3943, 3956), False, 'from torch import nn\n'), ((4061, 4075), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (4073, 4075), False, 'from torch import nn\n'), ((4095, 4122), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (4109, 4122), False, 'from torch import nn\n'), ((4228, 4242), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (4240, 4242), False, 'from torch import nn\n'), ((4262, 4289), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (4276, 4289), False, 'from torch import nn\n'), ((6777, 6791), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (6789, 6791), False, 'from torch import nn\n'), ((6816, 6843), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (6830, 6843), False, 'from torch import nn\n'), ((7055, 7069), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (7067, 7069), False, 'from torch import nn\n'), ((7089, 7116), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (7103, 7116), False, 'from torch import nn\n'), ((7217, 7231), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (7229, 7231), False, 'from torch import nn\n'), ((7251, 7278), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (7265, 7278), False, 'from torch import nn\n'), ((7378, 7392), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (7390, 7392), False, 'from torch import nn\n'), ((7412, 7439), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (7426, 7439), False, 'from torch import nn\n'), ((7519, 7621), 'spconv.SparseInverseConv3d', 'spconv.SparseInverseConv3d', (['out_filters', 'out_filters'], {'kernel_size': '(3)', 'indice_key': 'up_key', 'bias': '(False)'}), '(out_filters, out_filters, kernel_size=3,\n indice_key=up_key, bias=False)\n', (7545, 7621), False, 'import spconv\n'), ((8966, 8993), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (8980, 8993), False, 'from torch import nn\n'), ((9014, 9026), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9024, 9026), False, 'from torch import nn\n'), ((9136, 9163), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (9150, 9163), False, 'from torch import nn\n'), ((9186, 9198), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9196, 9198), False, 'from torch import nn\n'), ((9308, 9335), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_filters'], {}), '(out_filters)\n', (9322, 9335), False, 'from torch import nn\n'), ((9358, 9370), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9368, 9370), False, 'from torch import nn\n'), ((11340, 11362), 'numpy.array', 'np.array', (['output_shape'], {}), '(output_shape)\n', (11348, 11362), True, 'import numpy as np\n'), ((13123, 13200), 'spconv.SparseConvTensor', 'spconv.SparseConvTensor', (['voxel_features', 'coors', 'self.sparse_shape', 'batch_size'], {}), '(voxel_features, coors, self.sparse_shape, batch_size)\n', (13146, 13200), False, 'import spconv\n'), ((13828, 13872), 'torch.cat', 'torch.cat', (['(up0e.features, up1e.features)', '(1)'], {}), '((up0e.features, up1e.features), 1)\n', (13837, 13872), False, 'import torch\n'), ((14559, 14673), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['(4 * init_size)', 'nclasses'], {'indice_key': '"""logit"""', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), "(4 * init_size, nclasses, indice_key='logit', kernel_size=\n 3, stride=1, padding=1, bias=True)\n", (14576, 14673), False, 'import spconv\n'), ((15211, 15242), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.pt_fea_dim'], {}), '(self.pt_fea_dim)\n', (15225, 15242), False, 'from torch import nn\n'), ((15263, 15277), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (15275, 15277), False, 'from torch import nn\n'), ((15390, 15419), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(2 * init_size)'], {}), '(2 * init_size)\n', (15404, 15419), False, 'from torch import nn\n'), ((15440, 15454), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (15452, 15454), False, 'from torch import nn\n'), ((15561, 15586), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['init_size'], {}), '(init_size)\n', (15575, 15586), False, 'from torch import nn\n'), ((15607, 15621), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (15619, 15621), False, 'from torch import nn\n'), ((15818, 15869), 'torch.nn.Linear', 'nn.Linear', (['init_size', 'self.embedding_dim'], {'bias': '(True)'}), '(init_size, self.embedding_dim, bias=True)\n', (15827, 15869), False, 'from torch import nn\n'), ((17210, 17232), 'numpy.array', 'np.array', (['output_shape'], {}), '(output_shape)\n', (17218, 17232), True, 'import numpy as np\n'), ((18914, 19028), 'spconv.SubMConv3d', 'spconv.SubMConv3d', (['(4 * init_size)', 'nclasses'], {'indice_key': '"""logit"""', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), "(4 * init_size, nclasses, indice_key='logit', kernel_size=\n 3, stride=1, padding=1, bias=True)\n", (18931, 19028), False, 'import spconv\n'), ((19167, 19182), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (19180, 19182), False, 'import pdb\n'), ((19197, 19274), 'spconv.SparseConvTensor', 'spconv.SparseConvTensor', (['voxel_features', 'coors', 'self.sparse_shape', 'batch_size'], {}), '(voxel_features, coors, self.sparse_shape, batch_size)\n', (19220, 19274), False, 'import spconv\n'), ((19902, 19946), 'torch.cat', 'torch.cat', (['(up0e.features, up1e.features)', '(1)'], {}), '((up0e.features, up1e.features), 1)\n', (19911, 19946), False, 'import torch\n'), ((15672, 15718), 'torch.nn.Linear', 'nn.Linear', (['(init_size + 3)', 'init_size'], {'bias': '(True)'}), '(init_size + 3, init_size, bias=True)\n', (15681, 15718), False, 'from torch import nn\n'), ((15730, 15755), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['init_size'], {}), '(init_size)\n', (15744, 15755), False, 'from torch import nn\n'), ((15769, 15778), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15776, 15778), False, 'from torch import nn\n'), ((4679, 4799), 'spconv.SparseConv3d', 'spconv.SparseConv3d', (['out_filters', 'out_filters'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'indice_key': 'indice_key', 'bias': '(False)'}), '(out_filters, out_filters, kernel_size=3, stride=2,\n padding=1, indice_key=indice_key, bias=False)\n', (4698, 4799), False, 'import spconv\n'), ((4955, 5084), 'spconv.SparseConv3d', 'spconv.SparseConv3d', (['out_filters', 'out_filters'], {'kernel_size': '(3)', 'stride': '(2, 2, 1)', 'padding': '(1)', 'indice_key': 'indice_key', 'bias': '(False)'}), '(out_filters, out_filters, kernel_size=3, stride=(2, 2, \n 1), padding=1, indice_key=indice_key, bias=False)\n', (4974, 5084), False, 'import spconv\n'), ((16699, 16729), 'torch.from_numpy', 'torch.from_numpy', (['xyz[batch_i]'], {}), '(xyz[batch_i])\n', (16715, 16729), False, 'import torch\n')]
|
import numpy as np
from prml.dimreduction.pca import PCA
class BayesianPCA(PCA):
def fit(self, X, iter_max=100, initial="random"):
"""
empirical bayes estimation of pca parameters
Parameters
----------
X : (sample_size, n_features) ndarray
input data
iter_max : int
maximum number of em steps
Returns
-------
mean : (n_features,) ndarray
sample mean fo the input data
W : (n_features, n_components) ndarray
projection matrix
var : float
variance of observation noise
"""
initial_list = ["random", "eigen"]
self.mean = np.mean(X, axis=0)
self.I = np.eye(self.n_components)
if initial not in initial_list:
print("availabel initializations are {}".format(initial_list))
if initial == "random":
self.W = np.eye(np.size(X, 1), self.n_components)
#self.W = np.random.randn(np.size(X, 1), self.n_components)
self.var = 1.
elif initial == "eigen":
self.eigen(X)
self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
for i in range(iter_max):
W = np.copy(self.W)
stats = self._expectation(X - self.mean)
self._maximization(X - self.mean, *stats)
#self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
#if np.allclose(W, self.W):
# break
self.n_iter = i + 1
self.C = self.W @ self.W.T + self.var * np.eye(np.size(X, 1))
self.Cinv = np.linalg.inv(self.C)
def _maximization(self, X, Ez, Ezz):
self.W = X.T @ Ez @ np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha))
self.var = np.mean(
np.mean(X ** 2, axis=-1)
- 2 * np.mean(Ez @ self.W.T * X, axis=-1)
+ np.trace((Ezz @ self.W.T @ self.W).T) / len(self.mean))
def maximize(self, D, Ez, Ezz):
self.W = D.T.dot(Ez).dot(np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha)))
self.var = np.mean(
np.mean(D ** 2, axis=-1)
- 2 * np.mean(Ez.dot(self.W.T) * D, axis=-1)
+ np.trace(Ezz.dot(self.W.T).dot(self.W).T) / self.ndim)
|
[
"numpy.mean",
"numpy.eye",
"numpy.copy",
"numpy.trace",
"numpy.size",
"numpy.diag",
"numpy.sum",
"numpy.linalg.inv"
] |
[((699, 717), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (706, 717), True, 'import numpy as np\n'), ((735, 760), 'numpy.eye', 'np.eye', (['self.n_components'], {}), '(self.n_components)\n', (741, 760), True, 'import numpy as np\n'), ((1650, 1671), 'numpy.linalg.inv', 'np.linalg.inv', (['self.C'], {}), '(self.C)\n', (1663, 1671), True, 'import numpy as np\n'), ((1259, 1274), 'numpy.copy', 'np.copy', (['self.W'], {}), '(self.W)\n', (1266, 1274), True, 'import numpy as np\n'), ((936, 949), 'numpy.size', 'np.size', (['X', '(1)'], {}), '(X, 1)\n', (943, 949), True, 'import numpy as np\n'), ((1165, 1192), 'numpy.sum', 'np.sum', (['(self.W ** 2)'], {'axis': '(0)'}), '(self.W ** 2, axis=0)\n', (1171, 1192), True, 'import numpy as np\n'), ((1615, 1628), 'numpy.size', 'np.size', (['X', '(1)'], {}), '(X, 1)\n', (1622, 1628), True, 'import numpy as np\n'), ((1757, 1776), 'numpy.sum', 'np.sum', (['Ezz'], {'axis': '(0)'}), '(Ezz, axis=0)\n', (1763, 1776), True, 'import numpy as np\n'), ((1851, 1875), 'numpy.mean', 'np.mean', (['(X ** 2)'], {'axis': '(-1)'}), '(X ** 2, axis=-1)\n', (1858, 1875), True, 'import numpy as np\n'), ((1944, 1981), 'numpy.trace', 'np.trace', (['(Ezz @ self.W.T @ self.W).T'], {}), '((Ezz @ self.W.T @ self.W).T)\n', (1952, 1981), True, 'import numpy as np\n'), ((2084, 2103), 'numpy.sum', 'np.sum', (['Ezz'], {'axis': '(0)'}), '(Ezz, axis=0)\n', (2090, 2103), True, 'import numpy as np\n'), ((2179, 2203), 'numpy.mean', 'np.mean', (['(D ** 2)'], {'axis': '(-1)'}), '(D ** 2, axis=-1)\n', (2186, 2203), True, 'import numpy as np\n'), ((1790, 1809), 'numpy.diag', 'np.diag', (['self.alpha'], {}), '(self.alpha)\n', (1797, 1809), True, 'import numpy as np\n'), ((1894, 1929), 'numpy.mean', 'np.mean', (['(Ez @ self.W.T * X)'], {'axis': '(-1)'}), '(Ez @ self.W.T * X, axis=-1)\n', (1901, 1929), True, 'import numpy as np\n'), ((2117, 2136), 'numpy.diag', 'np.diag', (['self.alpha'], {}), '(self.alpha)\n', (2124, 2136), True, 'import numpy as np\n')]
|
""" Testing array utilities
"""
import sys
import numpy as np
from ..arrfuncs import as_native_array, pinv, eigh
from numpy.testing import (assert_array_almost_equal,
assert_array_equal)
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
NATIVE_ORDER = '<' if sys.byteorder == 'little' else '>'
SWAPPED_ORDER = '>' if sys.byteorder == 'little' else '<'
def test_as_native():
arr = np.arange(5) # native
assert_equal(arr.dtype.byteorder, '=')
narr = as_native_array(arr)
assert_true(arr is narr)
sdt = arr.dtype.newbyteorder('s')
barr = arr.astype(sdt)
assert_equal(barr.dtype.byteorder, SWAPPED_ORDER)
narr = as_native_array(barr)
assert_false(barr is narr)
assert_array_equal(barr, narr)
assert_equal(narr.dtype.byteorder, NATIVE_ORDER)
def test_pinv():
arr = np.random.randn(4, 4, 4, 3, 7)
_pinv = pinv(arr)
for i in range(4):
for j in range(4):
for k in range(4):
assert_array_almost_equal(_pinv[i, j, k],
np.linalg.pinv(arr[i, j, k]))
def test_eigh():
for i in range(10):
arr = np.random.randn(7, 7)
evals1, evecs1 = eigh(arr)
evals2, evecs2 = np.linalg.eigh(arr)
assert_array_almost_equal(evals1, evals2)
assert_array_almost_equal(evecs1, evecs2)
arr = np.random.randn(4, 4, 4, 7, 7)
evals, evecs = eigh(arr)
for i in range(4):
for j in range(4):
for k in range(4):
evals_vox, evecs_vox = np.linalg.eigh(arr[i, j, k])
assert_array_almost_equal(evals[i, j, k], evals_vox)
assert_array_almost_equal(evecs[i, j, k], evecs_vox)
|
[
"numpy.testing.assert_array_almost_equal",
"numpy.linalg.pinv",
"numpy.arange",
"nose.tools.assert_true",
"numpy.linalg.eigh",
"nose.tools.assert_equal",
"nose.tools.assert_false",
"numpy.random.randn",
"numpy.testing.assert_array_equal"
] |
[((447, 459), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (456, 459), True, 'import numpy as np\n'), ((474, 512), 'nose.tools.assert_equal', 'assert_equal', (['arr.dtype.byteorder', '"""="""'], {}), "(arr.dtype.byteorder, '=')\n", (486, 512), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_raises\n'), ((549, 573), 'nose.tools.assert_true', 'assert_true', (['(arr is narr)'], {}), '(arr is narr)\n', (560, 573), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_raises\n'), ((643, 692), 'nose.tools.assert_equal', 'assert_equal', (['barr.dtype.byteorder', 'SWAPPED_ORDER'], {}), '(barr.dtype.byteorder, SWAPPED_ORDER)\n', (655, 692), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_raises\n'), ((730, 756), 'nose.tools.assert_false', 'assert_false', (['(barr is narr)'], {}), '(barr is narr)\n', (742, 756), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_raises\n'), ((761, 791), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['barr', 'narr'], {}), '(barr, narr)\n', (779, 791), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((796, 844), 'nose.tools.assert_equal', 'assert_equal', (['narr.dtype.byteorder', 'NATIVE_ORDER'], {}), '(narr.dtype.byteorder, NATIVE_ORDER)\n', (808, 844), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_raises\n'), ((874, 904), 'numpy.random.randn', 'np.random.randn', (['(4)', '(4)', '(4)', '(3)', '(7)'], {}), '(4, 4, 4, 3, 7)\n', (889, 904), True, 'import numpy as np\n'), ((1408, 1438), 'numpy.random.randn', 'np.random.randn', (['(4)', '(4)', '(4)', '(7)', '(7)'], {}), '(4, 4, 4, 7, 7)\n', (1423, 1438), True, 'import numpy as np\n'), ((1195, 1216), 'numpy.random.randn', 'np.random.randn', (['(7)', '(7)'], {}), '(7, 7)\n', (1210, 1216), True, 'import numpy as np\n'), ((1277, 1296), 'numpy.linalg.eigh', 'np.linalg.eigh', (['arr'], {}), '(arr)\n', (1291, 1296), True, 'import numpy as np\n'), ((1305, 1346), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['evals1', 'evals2'], {}), '(evals1, evals2)\n', (1330, 1346), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1355, 1396), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['evecs1', 'evecs2'], {}), '(evecs1, evecs2)\n', (1380, 1396), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1588, 1616), 'numpy.linalg.eigh', 'np.linalg.eigh', (['arr[i, j, k]'], {}), '(arr[i, j, k])\n', (1602, 1616), True, 'import numpy as np\n'), ((1633, 1685), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['evals[i, j, k]', 'evals_vox'], {}), '(evals[i, j, k], evals_vox)\n', (1658, 1685), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1702, 1754), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['evecs[i, j, k]', 'evecs_vox'], {}), '(evecs[i, j, k], evecs_vox)\n', (1727, 1754), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1108, 1136), 'numpy.linalg.pinv', 'np.linalg.pinv', (['arr[i, j, k]'], {}), '(arr[i, j, k])\n', (1122, 1136), True, 'import numpy as np\n')]
|
"""Define the CSRmatrix class."""
import numpy as np
from scipy.sparse import coo_matrix
from six import iteritems
from openmdao.matrices.coo_matrix import COOMatrix
class CSRMatrix(COOMatrix):
"""
Sparse matrix in Compressed Row Storage format.
"""
def _build(self, num_rows, num_cols):
"""
Allocate the matrix.
Parameters
----------
num_rows : int
number of rows in the matrix.
num_cols : int
number of cols in the matrix.
"""
data, rows, cols = self._build_sparse(num_rows, num_cols)
# get a set of indices that sorts into row major order
srtidxs = np.lexsort((cols, rows))
data = data[srtidxs]
rows = rows[srtidxs]
cols = cols[srtidxs]
# now sort these back into ascending order (our original stacked order)
# so in _update_submat() we can just extract the individual index
# arrays that will map each block into the combined data array.
revidxs = np.argsort(srtidxs)
metadata = self._metadata
for key, (ind1, ind2, idxs, jac_type, factor) in iteritems(metadata):
if idxs is None:
metadata[key] = (revidxs[ind1:ind2], jac_type, factor)
else:
# apply the reverse index to each part of revidxs so that
# we can avoid copying the index array during updates.
metadata[key] = (revidxs[ind1:ind2][np.argsort(idxs)],
jac_type, factor)
# data array for the CSR will be the same as for the COO since
# it was already in sorted order.
coo = coo_matrix((data, (rows, cols)), shape=(num_rows, num_cols))
coo_data_size = coo.data.size
self._matrix = coo.tocsr()
# make sure data size is the same between coo and csr, else indexing is
# messed up
if coo_data_size != self._matrix.data.size:
raise ValueError("CSR matrix data contains duplicate row/col entries. "
"This would break internal indexing.")
|
[
"numpy.argsort",
"numpy.lexsort",
"six.iteritems",
"scipy.sparse.coo_matrix"
] |
[((679, 703), 'numpy.lexsort', 'np.lexsort', (['(cols, rows)'], {}), '((cols, rows))\n', (689, 703), True, 'import numpy as np\n'), ((1037, 1056), 'numpy.argsort', 'np.argsort', (['srtidxs'], {}), '(srtidxs)\n', (1047, 1056), True, 'import numpy as np\n'), ((1149, 1168), 'six.iteritems', 'iteritems', (['metadata'], {}), '(metadata)\n', (1158, 1168), False, 'from six import iteritems\n'), ((1683, 1743), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(data, (rows, cols))'], {'shape': '(num_rows, num_cols)'}), '((data, (rows, cols)), shape=(num_rows, num_cols))\n', (1693, 1743), False, 'from scipy.sparse import coo_matrix\n'), ((1485, 1501), 'numpy.argsort', 'np.argsort', (['idxs'], {}), '(idxs)\n', (1495, 1501), True, 'import numpy as np\n')]
|
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class SimpleMLP(nn.Module):
"""Simple MLP function approximator for Q-Learning."""
def __init__(self, in_dim, out_dim, hidden_units=256, num_hidden_layers=1):
super().__init__()
self.input_layer = nn.Sequential(nn.Linear(in_dim, hidden_units), nn.ReLU())
self.hidden_layers = nn.Sequential(
*[nn.Sequential(nn.Linear(hidden_units, hidden_units), nn.ReLU()) for _ in range(num_hidden_layers - 1)]
)
self.output_layer = nn.Linear(hidden_units, out_dim)
def forward(self, x):
x = self.input_layer(x)
x = self.hidden_layers(x)
return self.output_layer(x)
class NoisyLinear(nn.Module):
"""NoisyLinear Layer"""
def __init__(self, in_dim, out_dim, std_init=0.4):
super(NoisyLinear, self).__init__()
self.in_features = in_dim
self.out_features = out_dim
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.empty(out_dim, in_dim))
self.weight_sigma = nn.Parameter(torch.empty(out_dim, in_dim))
self.register_buffer("weight_epsilon", torch.empty(out_dim, in_dim))
self.bias_mu = nn.Parameter(torch.empty(out_dim))
self.bias_sigma = nn.Parameter(torch.empty(out_dim))
self.register_buffer("bias_epsilon", torch.empty(out_dim))
self.reset_parameters()
self.sample_noise()
def reset_parameters(self):
mu_range = 1.0 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def _scale_noise(self, size):
x = torch.randn(size)
return x.sign().mul_(x.abs().sqrt_())
def sample_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
weight_eps = epsilon_out.ger(epsilon_in)
bias_eps = epsilon_out
return weight_eps, bias_eps
def forward(self, inp):
if self.training:
weight_eps, bias_eps = self.sample_noise()
return F.linear(
inp,
self.weight_mu + self.weight_sigma * weight_eps,
self.bias_mu + self.bias_sigma * bias_eps,
)
else:
return F.linear(inp, self.weight_mu, self.bias_mu)
class ComplexMLP(nn.Module):
"""MLP function approximator for Q-Learning."""
def __init__(
self,
in_dim,
out_dim,
hidden_units=256,
num_hidden_layers=1,
noisy=False,
dueling=False,
sigma_init=0.5,
atoms=1,
):
super().__init__()
self._noisy = noisy
self._dueling = dueling
self._sigma_init = sigma_init
self._in_dim = np.prod(in_dim)
self._hidden_units = hidden_units
if self._dueling:
num_hidden_layers = max(num_hidden_layers - 1, 2)
self._num_hidden_layers = num_hidden_layers
self._out_dim = out_dim
self._atoms = atoms
self.init_networks()
def init_networks(self):
if self._noisy:
self.input_layer = nn.Sequential(
NoisyLinear(self._in_dim, self._hidden_units, self._sigma_init),
nn.ReLU(),
)
self.hidden_layers = nn.Sequential(
*[
nn.Sequential(
NoisyLinear(self._hidden_units, self._hidden_units, self._sigma_init),
nn.ReLU(),
)
for _ in range(self._num_hidden_layers - 1)
]
)
else:
self.input_layer = nn.Sequential(nn.Linear(self._in_dim, self._hidden_units), nn.ReLU())
self.hidden_layers = nn.Sequential(
*[
nn.Sequential(nn.Linear(self._hidden_units, self._hidden_units), nn.ReLU())
for _ in range(self._num_hidden_layers - 1)
]
)
if self._dueling:
"""In dueling, we have two heads - one for estimating advantage function and one for
estimating value function. If `noisy` is also true, then each of these layers will
be NoisyLinear()"""
if self._noisy:
self.output_layer_adv = nn.Sequential(
NoisyLinear(self._hidden_units, self._hidden_units, self._sigma_init),
nn.ReLU(),
NoisyLinear(
self._hidden_units,
self._out_dim * self._atoms,
self._sigma_init,
),
)
self.output_layer_val = nn.Sequential(
NoisyLinear(self._hidden_units, self._hidden_units, self._sigma_init),
nn.ReLU(),
NoisyLinear(
self._hidden_units,
1 * self._atoms,
self._sigma_init,
),
)
else:
self.output_layer_adv = nn.Sequential(
nn.Linear(self._hidden_units, self._hidden_units, self._sigma_init),
nn.ReLU(),
nn.Linear(
self._hidden_units,
self._out_dim * self._atoms,
self._sigma_init,
),
)
self.output_layer_val = nn.Sequential(
nn.Linear(self._hidden_units, self._hidden_units, self._sigma_init),
nn.ReLU(),
nn.Linear(
self._hidden_units,
1 * self._atoms,
self._sigma_init,
),
)
else:
if self._noisy:
self.output_layer = NoisyLinear(self._hidden_units, self._out_dim * self._atoms, self._sigma_init)
else:
self.output_layer = nn.Linear(self._hidden_units, self._out_dim * self._atoms)
def forward(self, x):
x = torch.flatten(x, start_dim=1)
x = self.input_layer(x)
x = self.hidden_layers(x)
if self._dueling:
adv = self.output_layer_adv(x)
val = self.output_layer_val(x)
if len(adv.shape) == 1:
x = val + adv - adv.mean(0)
else:
x = val + adv - adv.mean(1).unsqueeze(1).expand(x.shape[0], self._out_dim)
else:
x = self.output_layer(x)
return x
class DistributionalMLP(ComplexMLP):
"""Distributional MLP function approximator for Q-Learning."""
def __init__(
self,
in_dim,
out_dim,
supports,
hidden_units=256,
num_hidden_layers=1,
noisy=True,
dueling=True,
sigma_init=0.5,
atoms=51,
):
super().__init__(
in_dim,
out_dim,
hidden_units,
num_hidden_layers,
noisy,
dueling,
sigma_init,
atoms,
)
self._supports = supports
def forward(self, x):
x = torch.flatten(x, start_dim=1)
x = self.dist(x)
x = torch.sum(x * self._supports, dim=2)
return x
def dist(self, x):
x = self.input_layer(x)
x = self.hidden_layers(x)
if self._dueling:
adv = self.output_layer_adv(x)
adv = adv.view(-1, self._out_dim, self._atoms)
val = self.output_layer_val(x)
val = val.view(-1, 1, self._atoms)
x = val + adv - adv.mean(dim=1, keepdim=True)
else:
x = self.output_layer(x)
x = x.view(-1, self._out_dim, self._atoms)
x = F.softmax(x, dim=-1)
x = x.clamp(min=1e-3)
return x
|
[
"torch.nn.functional.linear",
"numpy.prod",
"torch.nn.functional.softmax",
"torch.nn.ReLU",
"math.sqrt",
"torch.sum",
"torch.nn.Linear",
"torch.empty",
"torch.randn",
"torch.flatten"
] |
[((580, 612), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', 'out_dim'], {}), '(hidden_units, out_dim)\n', (589, 612), False, 'from torch import nn\n'), ((1877, 1894), 'torch.randn', 'torch.randn', (['size'], {}), '(size)\n', (1888, 1894), False, 'import torch\n'), ((3022, 3037), 'numpy.prod', 'np.prod', (['in_dim'], {}), '(in_dim)\n', (3029, 3037), True, 'import numpy as np\n'), ((6396, 6425), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)'}), '(x, start_dim=1)\n', (6409, 6425), False, 'import torch\n'), ((7491, 7520), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)'}), '(x, start_dim=1)\n', (7504, 7520), False, 'import torch\n'), ((7558, 7594), 'torch.sum', 'torch.sum', (['(x * self._supports)'], {'dim': '(2)'}), '(x * self._supports, dim=2)\n', (7567, 7594), False, 'import torch\n'), ((8096, 8116), 'torch.nn.functional.softmax', 'F.softmax', (['x'], {'dim': '(-1)'}), '(x, dim=-1)\n', (8105, 8116), True, 'import torch.nn.functional as F\n'), ((337, 368), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'hidden_units'], {}), '(in_dim, hidden_units)\n', (346, 368), False, 'from torch import nn\n'), ((370, 379), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (377, 379), False, 'from torch import nn\n'), ((1043, 1071), 'torch.empty', 'torch.empty', (['out_dim', 'in_dim'], {}), '(out_dim, in_dim)\n', (1054, 1071), False, 'import torch\n'), ((1114, 1142), 'torch.empty', 'torch.empty', (['out_dim', 'in_dim'], {}), '(out_dim, in_dim)\n', (1125, 1142), False, 'import torch\n'), ((1191, 1219), 'torch.empty', 'torch.empty', (['out_dim', 'in_dim'], {}), '(out_dim, in_dim)\n', (1202, 1219), False, 'import torch\n'), ((1257, 1277), 'torch.empty', 'torch.empty', (['out_dim'], {}), '(out_dim)\n', (1268, 1277), False, 'import torch\n'), ((1318, 1338), 'torch.empty', 'torch.empty', (['out_dim'], {}), '(out_dim)\n', (1329, 1338), False, 'import torch\n'), ((1385, 1405), 'torch.empty', 'torch.empty', (['out_dim'], {}), '(out_dim)\n', (1396, 1405), False, 'import torch\n'), ((1525, 1552), 'math.sqrt', 'math.sqrt', (['self.in_features'], {}), '(self.in_features)\n', (1534, 1552), False, 'import math\n'), ((2331, 2440), 'torch.nn.functional.linear', 'F.linear', (['inp', '(self.weight_mu + self.weight_sigma * weight_eps)', '(self.bias_mu + self.bias_sigma * bias_eps)'], {}), '(inp, self.weight_mu + self.weight_sigma * weight_eps, self.bias_mu +\n self.bias_sigma * bias_eps)\n', (2339, 2440), True, 'import torch.nn.functional as F\n'), ((2533, 2576), 'torch.nn.functional.linear', 'F.linear', (['inp', 'self.weight_mu', 'self.bias_mu'], {}), '(inp, self.weight_mu, self.bias_mu)\n', (2541, 2576), True, 'import torch.nn.functional as F\n'), ((1664, 1691), 'math.sqrt', 'math.sqrt', (['self.in_features'], {}), '(self.in_features)\n', (1673, 1691), False, 'import math\n'), ((1800, 1828), 'math.sqrt', 'math.sqrt', (['self.out_features'], {}), '(self.out_features)\n', (1809, 1828), False, 'import math\n'), ((3506, 3515), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3513, 3515), False, 'from torch import nn\n'), ((3941, 3984), 'torch.nn.Linear', 'nn.Linear', (['self._in_dim', 'self._hidden_units'], {}), '(self._in_dim, self._hidden_units)\n', (3950, 3984), False, 'from torch import nn\n'), ((3986, 3995), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3993, 3995), False, 'from torch import nn\n'), ((6297, 6355), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', '(self._out_dim * self._atoms)'], {}), '(self._hidden_units, self._out_dim * self._atoms)\n', (6306, 6355), False, 'from torch import nn\n'), ((4703, 4712), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4710, 4712), False, 'from torch import nn\n'), ((5094, 5103), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5101, 5103), False, 'from torch import nn\n'), ((5400, 5467), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', 'self._hidden_units', 'self._sigma_init'], {}), '(self._hidden_units, self._hidden_units, self._sigma_init)\n', (5409, 5467), False, 'from torch import nn\n'), ((5489, 5498), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5496, 5498), False, 'from torch import nn\n'), ((5520, 5596), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', '(self._out_dim * self._atoms)', 'self._sigma_init'], {}), '(self._hidden_units, self._out_dim * self._atoms, self._sigma_init)\n', (5529, 5596), False, 'from torch import nn\n'), ((5787, 5854), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', 'self._hidden_units', 'self._sigma_init'], {}), '(self._hidden_units, self._hidden_units, self._sigma_init)\n', (5796, 5854), False, 'from torch import nn\n'), ((5876, 5885), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5883, 5885), False, 'from torch import nn\n'), ((5907, 5971), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', '(1 * self._atoms)', 'self._sigma_init'], {}), '(self._hidden_units, 1 * self._atoms, self._sigma_init)\n', (5916, 5971), False, 'from torch import nn\n'), ((453, 490), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', 'hidden_units'], {}), '(hidden_units, hidden_units)\n', (462, 490), False, 'from torch import nn\n'), ((492, 501), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (499, 501), False, 'from torch import nn\n'), ((3752, 3761), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3759, 3761), False, 'from torch import nn\n'), ((4098, 4147), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', 'self._hidden_units'], {}), '(self._hidden_units, self._hidden_units)\n', (4107, 4147), False, 'from torch import nn\n'), ((4149, 4158), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4156, 4158), False, 'from torch import nn\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME>
# MIT License (https://opensource.org/licenses/MIT)
import logging
import numpy as np
import torch
from parallel_wavegan.layers import Conv1d
from parallel_wavegan.layers import Conv1d1x1
from parallel_wavegan.layers import Conv2d
from parallel_wavegan.layers import ConvInUpsampleNetwork
from parallel_wavegan.layers import UpsampleNetwork
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
def test_conv_initialization():
conv = Conv1d(10, 10, 3, bias=True)
np.testing.assert_array_equal(conv.bias.data.numpy(),
np.zeros_like(conv.bias.data.numpy()))
conv1x1 = Conv1d1x1(10, 10, bias=True)
np.testing.assert_array_equal(conv1x1.bias.data.numpy(),
np.zeros_like(conv1x1.bias.data.numpy()))
kernel_size = (10, 10)
conv2d = Conv2d(10, 10, kernel_size, bias=True)
np.testing.assert_array_equal(conv2d.weight.data.numpy(),
np.ones_like(conv2d.weight.data.numpy()) / np.prod(kernel_size))
np.testing.assert_array_equal(conv2d.bias.data.numpy(),
np.zeros_like(conv2d.bias.data.numpy()))
kernel_size = (1, 10)
conv2d = Conv2d(10, 10, kernel_size, bias=True)
np.testing.assert_array_equal(conv2d.weight.data.numpy(),
np.ones_like(conv2d.weight.data.numpy()) / np.prod(kernel_size))
np.testing.assert_array_equal(conv2d.bias.data.numpy(),
np.zeros_like(conv2d.bias.data.numpy()))
def test_upsample():
length = 10
scales = [4, 4]
x = torch.randn(1, 10, length)
upsample = UpsampleNetwork(scales)
y = upsample(x)
assert x.size(-1) * np.prod(scales) == y.size(-1)
for aux_context_window in [0, 1, 2, 3]:
conv_upsample = ConvInUpsampleNetwork(scales,
aux_channels=x.size(1),
aux_context_window=aux_context_window)
y = conv_upsample(x)
assert (x.size(-1) - 2 * aux_context_window) * np.prod(scales) == y.size(-1)
|
[
"logging.basicConfig",
"parallel_wavegan.layers.UpsampleNetwork",
"numpy.prod",
"parallel_wavegan.layers.Conv1d1x1",
"parallel_wavegan.layers.Conv1d",
"parallel_wavegan.layers.Conv2d",
"torch.randn"
] |
[((418, 536), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')\n", (437, 536), False, 'import logging\n'), ((582, 610), 'parallel_wavegan.layers.Conv1d', 'Conv1d', (['(10)', '(10)', '(3)'], {'bias': '(True)'}), '(10, 10, 3, bias=True)\n', (588, 610), False, 'from parallel_wavegan.layers import Conv1d\n'), ((756, 784), 'parallel_wavegan.layers.Conv1d1x1', 'Conv1d1x1', (['(10)', '(10)'], {'bias': '(True)'}), '(10, 10, bias=True)\n', (765, 784), False, 'from parallel_wavegan.layers import Conv1d1x1\n'), ((962, 1000), 'parallel_wavegan.layers.Conv2d', 'Conv2d', (['(10)', '(10)', 'kernel_size'], {'bias': '(True)'}), '(10, 10, kernel_size, bias=True)\n', (968, 1000), False, 'from parallel_wavegan.layers import Conv2d\n'), ((1336, 1374), 'parallel_wavegan.layers.Conv2d', 'Conv2d', (['(10)', '(10)', 'kernel_size'], {'bias': '(True)'}), '(10, 10, kernel_size, bias=True)\n', (1342, 1374), False, 'from parallel_wavegan.layers import Conv2d\n'), ((1737, 1763), 'torch.randn', 'torch.randn', (['(1)', '(10)', 'length'], {}), '(1, 10, length)\n', (1748, 1763), False, 'import torch\n'), ((1779, 1802), 'parallel_wavegan.layers.UpsampleNetwork', 'UpsampleNetwork', (['scales'], {}), '(scales)\n', (1794, 1802), False, 'from parallel_wavegan.layers import UpsampleNetwork\n'), ((1140, 1160), 'numpy.prod', 'np.prod', (['kernel_size'], {}), '(kernel_size)\n', (1147, 1160), True, 'import numpy as np\n'), ((1514, 1534), 'numpy.prod', 'np.prod', (['kernel_size'], {}), '(kernel_size)\n', (1521, 1534), True, 'import numpy as np\n'), ((1847, 1862), 'numpy.prod', 'np.prod', (['scales'], {}), '(scales)\n', (1854, 1862), True, 'import numpy as np\n'), ((2215, 2230), 'numpy.prod', 'np.prod', (['scales'], {}), '(scales)\n', (2222, 2230), True, 'import numpy as np\n')]
|
# Copyright 2020 The Tilt Brush Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for 3d printing."""
import os
import re
import sys
import math
import pprint
import shutil
import itertools
import subprocess
from collections import Counter
import numpy
try:
from tiltbrush.tilt import Tilt
except ImportError:
print("You need the Tilt Brush Toolkit (https://github.com/googlevr/tilt-brush-toolkit)")
print("and then put its Python directory in your PYTHONPATH.")
sys.exit(1)
from tbdata.brush_lookup import BrushLookup
# Convert strokes for 3d printing.
# True Don't touch these strokes
# False Remove these strokes from the sketch
# <name> Replace the brush for these strokes
# names can also be guids, which is useful when the name is ambiguous
BRUSH_REPLACEMENTS = [
# Good brushes
('SquarePaper', True),
('ThickGeometry', True),
('Wire', True),
# Brushes that should be replaced
('TaperedMarker', 'ThickGeometry'),
('OilPaint', 'ThickGeometry'),
('Ink', 'ThickGeometry'),
('Marker', 'ThickGeometry'),
('Paper', 'ThickGeometry'),
('FlatDeprecated','ThickGeometry'),
# Questionable
('Highlighter', 'ThickGeometry'),
('Light', 'Wire'),
# Remove particles
('Smoke', None),
('Snow', None),
('Embers', None),
('Stars', None),
# Remove animated
('Fire', None),
# Remove shader-based
('Plasma', None),
('Rainbow', None),
('Streamers', None),
]
# ----------------------------------------------------------------------
# Little utilities
# ----------------------------------------------------------------------
def msg(text):
sys.stdout.write("%-79s\r" % text[:79])
sys.stdout.flush()
def msgln(text):
sys.stdout.write("%-79s\n" % text[:79])
sys.stdout.flush()
def rgb8_to_hsl(rgb):
"""Takes a rgb8 tuple, returns a hsl tuple."""
HUE_MAX = 6
r = rgb[0] / 255.0
g = rgb[1] / 255.0
b = rgb[2] / 255.0
cmin = min(r, g, b)
cmax = max(r, g, b)
delta = cmax - cmin
h = 0
s = 0
l = (cmax + cmin)
if delta != 0:
if l < 0.5:
s = delta / l
else:
s = delta / (2 - l)
if r == cmax:
h = (g - b) / delta
elif g == cmax:
h = 2 + (b - r) / delta
elif b == cmax:
h = 4 + (r - g) / delta
return h, s, l
# ----------------------------------------------------------------------
# Brush conversion
# ----------------------------------------------------------------------
def get_replacements_by_guid(replacements_by_name):
"""Returns a lookup table that is by-guid rather than by-name."""
brush_lookup = BrushLookup.get()
def guid_or_name_to_guid(guid_or_name):
if guid_or_name in brush_lookup.guid_to_name:
return guid_or_name
elif guid_or_name in brush_lookup.name_to_guids:
return brush_lookup.get_unique_guid(guid_or_name)
else:
raise LookupError("Not a known brush or brush guid: %r" % guid_or_name)
dct = {}
for before, after in replacements_by_name:
before_guid = guid_or_name_to_guid(before)
if after is True:
after_guid = before_guid
elif after is None:
after_guid = None
else:
after_guid = guid_or_name_to_guid(after)
dct[before_guid] = after_guid
return dct
def convert_brushes(tilt, replacements_by_name, show_removed=False):
"""Convert brushes to 3d-printable versions, or remove their strokes from the tilt."""
replacements = get_replacements_by_guid(replacements_by_name)
brush_lookup = BrushLookup.get()
with tilt.mutable_metadata() as dct:
index_to_guid = dct['BrushIndex']
# First, show us what brushes the tilt file uses
used_guids = Counter()
for stroke in tilt.sketch.strokes:
guid = index_to_guid[stroke.brush_idx]
used_guids[guid] += 1
print("Brushes used:")
for guid, n in sorted(list(used_guids.items()), key=lambda p:-p[1]):
print(" %5d %s" % (n, brush_lookup.guid_to_name.get(guid)))
sys.stdout.flush()
del used_guids
index_to_new_index = {}
for i, guid in enumerate(index_to_guid):
name = brush_lookup.guid_to_name.get(guid, guid)
try:
new_guid = replacements[guid]
except KeyError:
print("%d: Don't know what to do with brush %s" % (i, name))
index_to_new_index[i] = i
else:
new_name = brush_lookup.guid_to_name.get(new_guid, new_guid)
if new_guid is None:
print("%d: Remove %s" % (i, name))
index_to_new_index[i] = None
else:
if guid == new_guid:
print("%d: Keep %s" % (i, name))
elif name == new_name:
print("%d: Replace %s/%s -> %s/%s" % (i, name, guid, new_name, new_guid))
else:
print("%d: Replace %s -> %s" % (i, name, new_name))
try:
new_idx = index_to_guid.index(new_guid)
except ValueError:
new_idx = len(index_to_guid)
index_to_guid.append(new_guid)
index_to_new_index[i] = new_idx
brush_indices_to_remove = set(i for (i, new_i) in list(index_to_new_index.items()) if new_i is None)
if brush_indices_to_remove:
old_len = len(tilt.sketch.strokes)
if show_removed:
# Render in magenta instead of removing
for stroke in tilt.sketch.strokes:
if stroke.brush_idx in brush_indices_to_remove:
stroke.brush_color = (1, 0, 1, 1)
else:
stroke.brush_color = stroke.brush_color
else:
tilt.sketch.strokes[:] = [s for s in tilt.sketch.strokes if s.brush_idx not in brush_indices_to_remove]
new_len = len(tilt.sketch.strokes)
print("Strokes %d -> %d" % (old_len, new_len))
for stroke in tilt.sketch.strokes:
new_idx = index_to_new_index[stroke.brush_idx]
# Might be none if it's a removed brush
if new_idx is not None:
stroke.brush_idx = new_idx
# ----------------------------------------------------------------------
# Stroke simplification
# ----------------------------------------------------------------------
def calculate_pos_error(cp0, cp1, middle_cps):
if len(middle_cps) == 0:
return 0
strip_length = cp1._dist - cp0._dist
if strip_length <= 0:
return 0
max_pos_error = 0
for i, cp in enumerate(middle_cps):
t = (cp._dist - cp0._dist) / strip_length
pos_interpolated = t * cp0._pos + (1-t) * cp1._pos
pos_error = numpy.linalg.norm((pos_interpolated - cp._pos))
if pos_error > max_pos_error:
max_pos_error = pos_error
return max_pos_error
def simplify_stroke(stroke, max_error):
# Do greedy optimization of stroke.
REQUIRED_END_CPS = 1 # or 2
keep_cps = []
toss_cps = [] # The current set of candidates to toss
n = len(stroke.controlpoints)
brush_size = stroke.brush_size
for i, cp in enumerate(stroke.controlpoints):
cp._pos = numpy.array(cp.position)
if i == 0:
cp._dist = 0
else:
prev_cp = stroke.controlpoints[i-1]
cp._dist = prev_cp._dist + numpy.linalg.norm(prev_cp._pos - cp._pos)
if REQUIRED_END_CPS <= i < n - REQUIRED_END_CPS:
pos_error = calculate_pos_error(keep_cps[-1], cp, toss_cps)
keep = (pos_error > max_error * stroke.brush_size)
#print " %3d: %s %f %f" % (i, keep, pos_error, stroke.brush_size * .2)
else:
keep = True
#print " %3d: True (End)" % i
if keep:
keep_cps.append(cp)
toss_cps = []
else:
toss_cps.append(cp)
stroke.controlpoints[:] = keep_cps
def reduce_control_points(tilt, max_error):
# If debug_simplify, the resulting .tilt file shows both the old and the new
before_cp = 0
after_cp = 0
msg("Simplify strokes")
pct = 0
n = len(tilt.sketch.strokes)
for i, stroke in enumerate(tilt.sketch.strokes):
new_pct = (i+1) * 100 / n
if new_pct != pct:
pct = new_pct
removed_pct = (before_cp - after_cp) * 100 / (before_cp+1)
msg("Simplify strokes: %3d%% %5d/%5d Removed %3d%%" % (pct, i, n, removed_pct))
before_cp += len(stroke.controlpoints)
simplify_stroke(stroke, max_error)
after_cp += len(stroke.controlpoints)
msg("Simplify strokes: done")
msgln("Control points: %5d -> %5d (%2d%%)" % (
before_cp, after_cp, after_cp * 100 / before_cp))
# ----------------------------------------------------------------------
# Stray strokes
# ----------------------------------------------------------------------
def remove_stray_strokes(tilt, max_dist=0, replacement_brush_guid=None):
"""Show histograms of control point positions, to help with resizing."""
import numpy as np
from math import sqrt
def iter_pos(tilt):
first_cp = 0
for stroke in tilt.sketch.strokes:
stroke._first_cp = first_cp
first_cp += len(stroke.controlpoints)
for cp in stroke.controlpoints:
yield cp.position
positions = np.array(list(iter_pos(tilt)))
if False:
# Print out x/y/z histograms
histograms = [np.histogram(positions[... , i], bins=30) for i in range(3)]
for irow in range(len(histograms[0][0])+1):
for axis, histogram in enumerate(histograms):
try:
print("%s %3d %6d " % ('xyz'[axis], histogram[1][irow], histogram[0][irow]), end=' ')
except IndexError:
print("%s %3d %6s " % ('xyz'[axis], histogram[1][irow], ''), end=' ')
print()
if max_dist > 0:
# Convert replacement guid -> replacement index
if replacement_brush_guid is None:
replacement_brush_index = None
else:
with tilt.mutable_metadata() as dct:
try:
replacement_brush_index = dct['BrushIndex'].index(replacement_brush_guid)
except ValueError:
dct['BrushIndex'].append(replacement_brush_guid)
replacement_brush_index = dct['BrushIndex'].index(replacement_brush_guid)
# Compute Mahalanobis distance and remove strokes that fall outside
# https://en.wikipedia.org/wiki/Mahalanobis_distance
mean = np.mean(positions, axis=0)
cov = np.cov(positions, rowvar=False)
invcov = np.linalg.inv(cov)
def mahalanobis_distance(v):
"""Return distance of row vector"""
cv = (v - mean)[np.newaxis]
return sqrt(cv.dot(invcov).dot(cv.T)[0, 0])
def out_of_bounds(stroke):
i0 = stroke._first_cp
i1 = i0 + len(stroke.controlpoints)
dists = np.array(list(map(mahalanobis_distance, positions[i0 : i1])))
return np.any(dists > max_dist)
msg("Finding OOB strokes")
# TODO: figure out how to use np.einsum() and remove all the python-level loops
oob_strokes = [
pair for pair in enumerate(tilt.sketch.strokes)
if out_of_bounds(pair[1])
]
msg("")
if len(oob_strokes):
if replacement_brush_index is not None:
for i, stroke in oob_strokes:
print("Replacing out-of-bounds stroke", i)
stroke.brush_idx = replacement_brush_index
stroke.brush_color = (1,0,1,1)
else:
print("Removing %d strokes" % len(oob_strokes))
remove_indices = set(pair[0] for pair in oob_strokes)
tilt.sketch.strokes[:] = [
stroke for i, stroke in enumerate(tilt.sketch.stroke)
if i not in remove_indices
]
# ----------------------------------------------------------------------
# Color reduction
# ----------------------------------------------------------------------
def get_most_similar_factors(n):
"""Factorize n into two numbers.
Returns the best pair, in the sense that the numbers are the closest to each other."""
i = int(n**0.5 + 0.5)
while n % i != 0:
i -= 1
return i, n/i
def get_good_factors(n, max_aspect_ratio=None):
"""Factorize n into two integers that are closest to each other.
If max_aspect_ratio is passed, search numbers >= n until
a pair is found whose aspect ratio is <= max_aspect_ratio."""
if max_aspect_ratio is None:
return get_most_similar_factors(n)
for i in itertools.count():
a, b = get_most_similar_factors(n + i)
if float(b)/a <= max_aspect_ratio:
return a, b
def rgbaf_to_rgb8(rgbaf):
"""Convert [r, g, b, a] floats to (r, g, b) bytes."""
return tuple(int(channel * 255) for channel in rgbaf[0:3])
def rgb8_to_rgbaf(rgb8):
"""Convert (r, g, b) bytes to [r, g, b, a] floats."""
lst = [channel / 255.0 for channel in rgb8]
lst.append(1.0)
return lst
def tilt_colors_to_image(tilt, max_aspect_ratio=None, preserve_colors=()):
"""Returns a PIL.Image containing the colors used in the tilt.
The image will have colors in roughly the same proportion as the
control points in the tilt.
preserve_colors is a list of rgb8 colors."""
import numpy as np
from PIL import Image
assert max_aspect_ratio is None or max_aspect_ratio > 0
preserve_colors = set(preserve_colors)
def iter_rgb8_colors(tilt):
for stroke in tilt.sketch.strokes:
yield (rgbaf_to_rgb8(stroke.brush_color), len(stroke.controlpoints))
def by_decreasing_usage(counter_pair):
# Sort function for colors
return -counter_pair[1]
def by_color_similarity(counter_pair):
# Sort function for colors
rgb8, usage = counter_pair
h, s, l = rgb8_to_hsl(rgb8)
return (rgb8 in preserve_colors), l
counter = Counter()
for color, n in iter_rgb8_colors(tilt):
counter[color] += n
most_used_color, amt = max(iter(counter.items()), key=lambda pair: pair[1])
for rgb8 in preserve_colors:
if rgb8 not in counter:
print("Ignoring: #%02x%02x%02x is not in the image" % rgb8)
else:
counter[rgb8] += amt / 2
# Find a "nice" width and height, possibly adjusting the number of texels
num_texels = sum(counter.values())
width, height = get_good_factors(num_texels, max_aspect_ratio)
if width * height != num_texels:
counter[most_used_color] += width * height - num_texels
assert counter[most_used_color] > 0
num_texels = sum(counter.values())
assert width * height == num_texels
# Expand the colors into a 1d array, then turn into an Image
colors_array = np.zeros(shape=(num_texels, 3), dtype='uint8')
i = 0
# The sort used here only matters to humans when they look at the images
colors_and_counts = sorted(iter(counter.items()), key=by_color_similarity)
# colors_and_counts = sorted(counter.iteritems(), key=by_decreasing_usage)
for (color, count) in colors_and_counts:
colors_array[i:i+count] = color
i += count
colors_array.shape = (height, width, 3)
return Image.fromarray(colors_array, mode='RGB')
def get_quantized_image_pillow(im, num_colors):
MAXIMUM_COVERAGE = 1
print("Falling back to old color quantization")
return im.quantize(colors=num_colors, method=MAXIMUM_COVERAGE), 'pillow'
def get_quantized_image_pngquant(im, num_colors):
from PIL import Image
import subprocess
# pngquant errors out if its best solution is below this "quality"
QUALITY_MIN = 0 # never error out
# pngquant stops using colors when "quality" goes above this.
# I have no real feeling for what this number means in practice
QUALITY_MAX = 40
im.save('tmp_pngquant.png')
try:
subprocess.check_call([
'pngquant',
'--nofs', # no dithering
'--force',
'--quality', '%d-%d' % (QUALITY_MIN, QUALITY_MAX),
'-o', 'tmp_pngquant_out.png',
str(num_colors), '--',
'tmp_pngquant.png'
])
imq = Image.open('tmp_pngquant_out.png')
imq.load()
finally:
if os.path.exists('tmp_pngquant.png'):
os.unlink('tmp_pngquant.png')
if os.path.exists('tmp_pngquant_out.png'):
os.unlink('tmp_pngquant_out.png')
return imq, 'pngquant'
def get_quantized_image(im, num_colors):
try:
return get_quantized_image_pngquant(im, num_colors)
except subprocess.CalledProcessError as e:
print("Error running pngquant: %s" % e)
except OSError as e:
print("Missing pngquant: %s" % e)
print("Download pngquant.exe it and put it in your PATH.")
return get_quantized_image_pillow(im, num_colors)
def simplify_colors(tilt, num_colors, preserve_colors):
im = tilt_colors_to_image(tilt, max_aspect_ratio=4, preserve_colors=preserve_colors)
if num_colors < 0:
# Little hack to force use of pillow
imq, method = get_quantized_image_pillow(im, -num_colors)
else:
imq, method = get_quantized_image(im, num_colors)
def iter_rgb8(im):
return zip(im.getdata(0), im.getdata(1), im.getdata(2))
def get_imq_color(ipixel, data=imq.getdata(), palette=imq.getpalette()):
# Look up color in imq, which is awkward because it's palettized
palette_entry = data[ipixel]
r, g, b = palette[palette_entry * 3 : (palette_entry + 1) * 3]
return (r, g, b)
# Create table mapping unquantized rgb8 to quantized rgbaf
old_to_new = {}
idx = 0
for (old_color, group) in itertools.groupby(iter_rgb8(im)):
assert old_color not in old_to_new
old_to_new[old_color] = rgb8_to_rgbaf(get_imq_color(idx))
idx += len(list(group))
for stroke in tilt.sketch.strokes:
stroke.brush_color = old_to_new[rgbaf_to_rgb8(stroke.brush_color)]
if True:
import numpy as np
for old8, newf in old_to_new.items():
oldv = np.array(rgb8_to_rgbaf(old8)[0:3])
newv = np.array(newf[0:3])
err = oldv - newv
err = math.sqrt(np.dot(err, err))
if err > .2:
print("High color error: #%02x%02x%02x" % old8)
num_colors = len(set(map(tuple, list(old_to_new.values()))))
base, _ = os.path.splitext(tilt.filename)
im.save('%s_%s.png' % (base, 'orig'))
imq.save('%s_%s_%d.png' % (base, method, num_colors))
# ----------------------------------------------------------------------
# Split export into multiple .obj files
# ----------------------------------------------------------------------
def iter_aggregated_by_color(json_filename):
"""Yields TiltBrushMesh instances, each of a uniform color."""
from tiltbrush.export import iter_meshes, TiltBrushMesh
def by_color(m): return m.c[0]
meshes = iter_meshes(json_filename)
for (color, group) in itertools.groupby(sorted(meshes, key=by_color), key=by_color):
yield TiltBrushMesh.from_meshes(group)
def write_simple_obj(mesh, outf_name):
from io import StringIO
tmpf = StringIO()
for v in mesh.v:
tmpf.write("v %f %f %f\n" % v)
for (t1, t2, t3) in mesh.tri:
t1 += 1; t2 += 1; t3 += 1
tmpf.write("f %d %d %d\n" % (t1, t2, t3))
with file(outf_name, 'wb') as outf:
outf.write(tmpf.getvalue())
def split_json_into_obj(json_filename):
import struct
output_base = os.path.splitext(json_filename)[0].replace('_out', '')
meshes = list(iter_aggregated_by_color(json_filename))
meshes.sort(key=lambda m: len(m.v), reverse=True)
for i, mesh in enumerate(meshes):
# It's the "ignore normals" that does the most collapsing here.
mesh.collapse_verts(ignore=('uv0', 'uv1', 'c', 't', 'n'))
mesh.remove_degenerate()
(r, g, b, a) = struct.unpack('4B', struct.pack('I', mesh.c[0]))
assert a == 255, (r, g, b, a)
hex_color = '%02x%02x%02x' % (r, g, b)
outf_name = '%s %02d %s.obj' % (output_base, i, hex_color)
write_simple_obj(mesh, outf_name)
msgln("Wrote %s" % outf_name)
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def process_tilt(filename, args):
msg("Load tilt")
tilt = Tilt(filename)
msg("Load strokes")
tilt.sketch.strokes
msg("")
if args.debug:
msg("Clone strokes")
before_strokes = [s.clone() for s in tilt.sketch.strokes]
# Do this before color quantization, because it removes strokes (and their colors)
if args.convert_brushes:
convert_brushes(tilt, BRUSH_REPLACEMENTS)
if args.remove_stray_strokes is not None:
remove_stray_strokes(tilt, args.remove_stray_strokes,
BrushLookup.get().get_unique_guid('Wire'))
if args.pos_error_tolerance > 0:
reduce_control_points(tilt, args.pos_error_tolerance)
if args.simplify_colors is not None:
simplify_colors(tilt, num_colors=args.simplify_colors, preserve_colors=args.preserve_colors)
if args.debug:
final_strokes = []
# interleave them so it renders semi-nicely...
for before, after in itertools.zip_longest(before_strokes, tilt.sketch.strokes):
if before is not None:
for cp in before.controlpoints:
cp.position[1] += 10
final_strokes.append(before)
if after is not None:
final_strokes.append(after)
tilt.sketch.strokes[:] = final_strokes
tilt.write_sketch()
msgln("Wrote %s" % os.path.basename(tilt.filename))
def main():
import argparse
parser = argparse.ArgumentParser(usage='''%(prog)s [ files ]
Process .tilt files to get them ready for 3D printing.
You should generally do the steps in this order:
1. Use --remove-stray-strokes (which actually just colors them magenta).
Manually delete the strokes you don't want to keep.
2. Experiment with different values for --simplify-colors. Use
--preserve-color option to force a color to remain present.
3. Use --convert-brushes and --pos-error-tolerance.
4. Load .tilt files in Tilt Brush, and export to .json
5. Convert from .json -> multiple .obj files
''')
def hex_color(arg):
arg = arg.lower()
m = re.match(r'^#?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})$', arg)
if m is not None:
return tuple(int(m.group(i), 16) for i in (1, 2, 3))
else:
raise argparse.ArgumentTypeError("Must be exactly hex 6 digits: %r" % arg)
parser.add_argument(
'--debug', action='store_true',
help='For debugging: put both the original and modified strokes in the resulting .tilt file')
parser.add_argument(
'--remove-stray-strokes', metavar='float', type=float, default=None,
help="Replace strokes that are far away from the sketch with magenta wire. Argument is the number of standard deviations; 5.0 is a reasonable starting point.")
parser.add_argument(
'--simplify-colors', type=int, metavar='N',
help='Simplify down to N colors. Use a negative number to try the alternate algorithm.')
parser.add_argument(
'--preserve-color', dest='preserve_colors', type=hex_color, action='append',
default=[],
help='Color to preserve, as a hex string like #ff00ff')
parser.add_argument(
'--convert-brushes', action='store_true',
help='Convert brushes to 3d-printable ones')
parser.add_argument(
'--pos-error-tolerance', type=float, default=0,
help='Allowable positional error when simplifying strokes, as a fraction of stroke width. If 0, do not simplify. .1 to .3 are good values. (default %(default)s)')
parser.add_argument('-o', dest='output_file', help='Name of output file (optional)')
parser.add_argument('files', type=str, nargs='+', help='File(s) to hack')
args = parser.parse_args()
for i, orig_filename in enumerate(args.files):
if orig_filename.endswith('.tilt'):
base, ext = os.path.splitext(orig_filename)
if i == 0 and args.output_file is not None:
working_filename = args.output_file
else:
working_filename = base + '_out' + ext
shutil.copyfile(orig_filename, working_filename)
process_tilt(working_filename, args)
elif orig_filename.endswith('.json'):
split_json_into_obj(orig_filename)
if __name__=='__main__':
main()
|
[
"numpy.array",
"numpy.linalg.norm",
"sys.exit",
"numpy.cov",
"numpy.mean",
"os.path.exists",
"numpy.histogram",
"argparse.ArgumentParser",
"tiltbrush.export.iter_meshes",
"tiltbrush.export.TiltBrushMesh.from_meshes",
"numpy.dot",
"os.unlink",
"io.StringIO",
"sys.stdout.flush",
"tiltbrush.tilt.Tilt",
"os.path.splitext",
"itertools.zip_longest",
"re.match",
"numpy.any",
"struct.pack",
"tbdata.brush_lookup.BrushLookup.get",
"argparse.ArgumentTypeError",
"shutil.copyfile",
"PIL.Image.fromarray",
"PIL.Image.open",
"collections.Counter",
"numpy.zeros",
"itertools.count",
"numpy.linalg.inv",
"os.path.basename",
"sys.stdout.write"
] |
[((2232, 2271), 'sys.stdout.write', 'sys.stdout.write', (["('%-79s\\r' % text[:79])"], {}), "('%-79s\\r' % text[:79])\n", (2248, 2271), False, 'import sys\n'), ((2274, 2292), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2290, 2292), False, 'import sys\n'), ((2314, 2353), 'sys.stdout.write', 'sys.stdout.write', (["('%-79s\\n' % text[:79])"], {}), "('%-79s\\n' % text[:79])\n", (2330, 2353), False, 'import sys\n'), ((2356, 2374), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2372, 2374), False, 'import sys\n'), ((3187, 3204), 'tbdata.brush_lookup.BrushLookup.get', 'BrushLookup.get', ([], {}), '()\n', (3202, 3204), False, 'from tbdata.brush_lookup import BrushLookup\n'), ((4075, 4092), 'tbdata.brush_lookup.BrushLookup.get', 'BrushLookup.get', ([], {}), '()\n', (4090, 4092), False, 'from tbdata.brush_lookup import BrushLookup\n'), ((12463, 12480), 'itertools.count', 'itertools.count', ([], {}), '()\n', (12478, 12480), False, 'import itertools\n'), ((13754, 13763), 'collections.Counter', 'Counter', ([], {}), '()\n', (13761, 13763), False, 'from collections import Counter\n'), ((14553, 14599), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_texels, 3)', 'dtype': '"""uint8"""'}), "(shape=(num_texels, 3), dtype='uint8')\n", (14561, 14599), True, 'import numpy as np\n'), ((14982, 15023), 'PIL.Image.fromarray', 'Image.fromarray', (['colors_array'], {'mode': '"""RGB"""'}), "(colors_array, mode='RGB')\n", (14997, 15023), False, 'from PIL import Image\n'), ((18505, 18531), 'tiltbrush.export.iter_meshes', 'iter_meshes', (['json_filename'], {}), '(json_filename)\n', (18516, 18531), False, 'from tiltbrush.export import iter_meshes, TiltBrushMesh\n'), ((18738, 18748), 'io.StringIO', 'StringIO', ([], {}), '()\n', (18746, 18748), False, 'from io import StringIO\n'), ((19918, 19932), 'tiltbrush.tilt.Tilt', 'Tilt', (['filename'], {}), '(filename)\n', (19922, 19932), False, 'from tiltbrush.tilt import Tilt\n'), ((21194, 21773), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""%(prog)s [ files ]\n\nProcess .tilt files to get them ready for 3D printing.\n\nYou should generally do the steps in this order:\n\n1. Use --remove-stray-strokes (which actually just colors them magenta).\n Manually delete the strokes you don\'t want to keep.\n2. Experiment with different values for --simplify-colors. Use\n --preserve-color option to force a color to remain present.\n3. Use --convert-brushes and --pos-error-tolerance.\n4. Load .tilt files in Tilt Brush, and export to .json\n5. Convert from .json -> multiple .obj files\n"""'}), '(usage=\n """%(prog)s [ files ]\n\nProcess .tilt files to get them ready for 3D printing.\n\nYou should generally do the steps in this order:\n\n1. Use --remove-stray-strokes (which actually just colors them magenta).\n Manually delete the strokes you don\'t want to keep.\n2. Experiment with different values for --simplify-colors. Use\n --preserve-color option to force a color to remain present.\n3. Use --convert-brushes and --pos-error-tolerance.\n4. Load .tilt files in Tilt Brush, and export to .json\n5. Convert from .json -> multiple .obj files\n"""\n )\n', (21217, 21773), False, 'import argparse\n'), ((991, 1002), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (999, 1002), False, 'import sys\n'), ((4242, 4251), 'collections.Counter', 'Counter', ([], {}), '()\n', (4249, 4251), False, 'from collections import Counter\n'), ((4535, 4553), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4551, 4553), False, 'import sys\n'), ((6945, 6990), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(pos_interpolated - cp._pos)'], {}), '(pos_interpolated - cp._pos)\n', (6962, 6990), False, 'import numpy\n'), ((7398, 7422), 'numpy.array', 'numpy.array', (['cp.position'], {}), '(cp.position)\n', (7409, 7422), False, 'import numpy\n'), ((10492, 10518), 'numpy.mean', 'np.mean', (['positions'], {'axis': '(0)'}), '(positions, axis=0)\n', (10499, 10518), True, 'import numpy as np\n'), ((10529, 10560), 'numpy.cov', 'np.cov', (['positions'], {'rowvar': '(False)'}), '(positions, rowvar=False)\n', (10535, 10560), True, 'import numpy as np\n'), ((10574, 10592), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (10587, 10592), True, 'import numpy as np\n'), ((15900, 15934), 'PIL.Image.open', 'Image.open', (['"""tmp_pngquant_out.png"""'], {}), "('tmp_pngquant_out.png')\n", (15910, 15934), False, 'from PIL import Image\n'), ((15968, 16002), 'os.path.exists', 'os.path.exists', (['"""tmp_pngquant.png"""'], {}), "('tmp_pngquant.png')\n", (15982, 16002), False, 'import os\n'), ((16047, 16085), 'os.path.exists', 'os.path.exists', (['"""tmp_pngquant_out.png"""'], {}), "('tmp_pngquant_out.png')\n", (16061, 16085), False, 'import os\n'), ((17970, 18001), 'os.path.splitext', 'os.path.splitext', (['tilt.filename'], {}), '(tilt.filename)\n', (17986, 18001), False, 'import os\n'), ((20770, 20828), 'itertools.zip_longest', 'itertools.zip_longest', (['before_strokes', 'tilt.sketch.strokes'], {}), '(before_strokes, tilt.sketch.strokes)\n', (20791, 20828), False, 'import itertools\n'), ((21817, 21877), 're.match', 're.match', (['"""^#?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})$"""', 'arg'], {}), "('^#?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})$', arg)\n", (21825, 21877), False, 'import re\n'), ((9488, 9528), 'numpy.histogram', 'np.histogram', (['positions[..., i]'], {'bins': '(30)'}), '(positions[..., i], bins=30)\n', (9500, 9528), True, 'import numpy as np\n'), ((10948, 10972), 'numpy.any', 'np.any', (['(dists > max_dist)'], {}), '(dists > max_dist)\n', (10954, 10972), True, 'import numpy as np\n'), ((16010, 16039), 'os.unlink', 'os.unlink', (['"""tmp_pngquant.png"""'], {}), "('tmp_pngquant.png')\n", (16019, 16039), False, 'import os\n'), ((16093, 16126), 'os.unlink', 'os.unlink', (['"""tmp_pngquant_out.png"""'], {}), "('tmp_pngquant_out.png')\n", (16102, 16126), False, 'import os\n'), ((17731, 17750), 'numpy.array', 'np.array', (['newf[0:3]'], {}), '(newf[0:3])\n', (17739, 17750), True, 'import numpy as np\n'), ((18629, 18661), 'tiltbrush.export.TiltBrushMesh.from_meshes', 'TiltBrushMesh.from_meshes', (['group'], {}), '(group)\n', (18654, 18661), False, 'from tiltbrush.export import iter_meshes, TiltBrushMesh\n'), ((19459, 19486), 'struct.pack', 'struct.pack', (['"""I"""', 'mesh.c[0]'], {}), "('I', mesh.c[0])\n", (19470, 19486), False, 'import struct\n'), ((21118, 21149), 'os.path.basename', 'os.path.basename', (['tilt.filename'], {}), '(tilt.filename)\n', (21134, 21149), False, 'import os\n'), ((21982, 22050), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('Must be exactly hex 6 digits: %r' % arg)"], {}), "('Must be exactly hex 6 digits: %r' % arg)\n", (22008, 22050), False, 'import argparse\n'), ((23478, 23509), 'os.path.splitext', 'os.path.splitext', (['orig_filename'], {}), '(orig_filename)\n', (23494, 23509), False, 'import os\n'), ((23669, 23717), 'shutil.copyfile', 'shutil.copyfile', (['orig_filename', 'working_filename'], {}), '(orig_filename, working_filename)\n', (23684, 23717), False, 'import shutil\n'), ((7542, 7583), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(prev_cp._pos - cp._pos)'], {}), '(prev_cp._pos - cp._pos)\n', (7559, 7583), False, 'import numpy\n'), ((17797, 17813), 'numpy.dot', 'np.dot', (['err', 'err'], {}), '(err, err)\n', (17803, 17813), True, 'import numpy as np\n'), ((19059, 19090), 'os.path.splitext', 'os.path.splitext', (['json_filename'], {}), '(json_filename)\n', (19075, 19090), False, 'import os\n'), ((20379, 20396), 'tbdata.brush_lookup.BrushLookup.get', 'BrushLookup.get', ([], {}), '()\n', (20394, 20396), False, 'from tbdata.brush_lookup import BrushLookup\n')]
|
import h5py
import numpy as np
import os, pdb
import tensorflow as tf
from rllab.envs.base import EnvSpec
from rllab.envs.normalized_env import normalize as normalize_env
import rllab.misc.logger as logger
from sandbox.rocky.tf.algos.trpo import TRPO
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from sandbox.rocky.tf.policies.gaussian_gru_policy import GaussianGRUPolicy
from sandbox.rocky.tf.envs.base import TfEnv
from sandbox.rocky.tf.spaces.discrete import Discrete
from hgail.algos.hgail_impl import Level
from hgail.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from hgail.critic.critic import WassersteinCritic
from hgail.envs.spec_wrapper_env import SpecWrapperEnv
from hgail.envs.vectorized_normalized_env import vectorized_normalized_env
from hgail.misc.datasets import CriticDataset, RecognitionDataset
from hgail.policies.categorical_latent_sampler import CategoricalLatentSampler
from hgail.policies.gaussian_latent_var_gru_policy import GaussianLatentVarGRUPolicy
from hgail.policies.gaussian_latent_var_mlp_policy import GaussianLatentVarMLPPolicy
from hgail.policies.latent_sampler import UniformlyRandomLatentSampler
from hgail.core.models import ObservationActionMLP
from hgail.policies.scheduling import ConstantIntervalScheduler
from hgail.recognition.recognition_model import RecognitionModel
from hgail.samplers.hierarchy_sampler import HierarchySampler
import hgail.misc.utils
from julia_env.julia_env import JuliaEnv
'''
Const
NGSIM_FILENAME_TO_ID = {
'trajdata_i101_trajectories-0750am-0805am.txt': 1,
'trajdata_i101_trajectories-0805am-0820am.txt': 2,
'trajdata_i101_trajectories-0820am-0835am.txt': 3,
'trajdata_i80_trajectories-0400-0415.txt': 4,
'trajdata_i80_trajectories-0500-0515.txt': 5,
'trajdata_i80_trajectories-0515-0530.txt': 6
}'''
NGSIM_FILENAME_TO_ID = {
'trajdata_i101_trajectories-0750am-0805am.txt': 1,
'trajdata_i101-22agents-0750am-0805am.txt' : 1
}
'''
Common
'''
def maybe_mkdir(dirpath):
if not os.path.exists(dirpath):
os.mkdir(dirpath)
def partition_list(lst, n):
sublists = [[] for _ in range(n)]
for i, v in enumerate(lst):
sublists[i % n].append(v)
return sublists
def str2bool(v):
if v.lower() == 'true':
return True
return False
def write_trajectories(filepath, trajs):
np.savez(filepath, trajs=trajs)
def load_trajectories(filepath):
return np.load(filepath)['trajs']
def filename2label(fn):
s = fn.find('-') + 1
e = fn.rfind('_')
return fn[s:e]
def load_trajs_labels(directory, files_to_use=[0,1,2,3,4,5]):
filenames = [
'trajdata_i101_trajectories-0750am-0805am_trajectories.npz',
'trajdata_i101_trajectories-0805am-0820am_trajectories.npz',
'trajdata_i101_trajectories-0820am-0835am_trajectories.npz',
'trajdata_i80_trajectories-0400-0415_trajectories.npz',
'trajdata_i80_trajectories-0500-0515_trajectories.npz',
'trajdata_i80_trajectories-0515-0530_trajectories.npz'
]
filenames = [filenames[i] for i in files_to_use]
labels = [filename2label(fn) for fn in filenames]
filepaths = [os.path.join(directory, fn) for fn in filenames]
trajs = [load_trajectories(fp) for fp in filepaths]
return trajs, labels
'''
Component build functions
'''
'''
This is about as hacky as it gets, but I want to avoid editing the rllab
source code as much as possible, so it will have to do for now.
Add a reset(self, kwargs**) function to the normalizing environment
https://stackoverflow.com/questions/972/adding-a-method-to-an-existing-object-instance
'''
def normalize_env_reset_with_kwargs(self, **kwargs):
ret = self._wrapped_env.reset(**kwargs)
if self._normalize_obs:
return self._apply_normalize_obs(ret)
else:
return ret
def add_kwargs_to_reset(env):
normalize_env = hgail.misc.utils.extract_normalizing_env(env)
if normalize_env is not None:
normalize_env.reset = normalize_env_reset_with_kwargs.__get__(normalize_env)
'''end of hack, back to our regularly scheduled programming'''
# Raunak adding an input argument for multiagent video making
def build_ngsim_env(
args,
exp_dir='/tmp',
alpha=0.001,
vectorize=True,
render_params=None,
videoMaking=False):
basedir = os.path.expanduser('~/.julia/v0.6/NGSIM/data')
filepaths = [os.path.join(basedir, args.ngsim_filename)]
if render_params is None:
render_params = dict(
viz_dir=os.path.join(exp_dir, 'imitate/viz'),
zoom=5.
)
env_params = dict(
trajectory_filepaths=filepaths,
H=args.env_H,
primesteps=args.env_primesteps,
action_repeat=args.env_action_repeat,
terminate_on_collision=False,
terminate_on_off_road=False,
render_params=render_params,
n_envs=args.n_envs,
n_veh=args.n_envs,
remove_ngsim_veh=args.remove_ngsim_veh,
reward=args.env_reward
)
# order matters here because multiagent is a subset of vectorized
# i.e., if you want to run with multiagent = true, then vectorize must
# also be true
if args.env_multiagent:
env_id = 'MultiagentNGSIMEnv'
if videoMaking:
print('RAUNAK BHATTACHARRYA VIDEO MAKER IS ON')
env_id='MultiagentNGSIMEnvVideoMaker'
alpha = alpha * args.n_envs
normalize_wrapper = vectorized_normalized_env
elif vectorize:
env_id = 'VectorizedNGSIMEnv'
alpha = alpha * args.n_envs
normalize_wrapper = vectorized_normalized_env
else:
env_id = 'NGSIMEnv'
normalize_wrapper = normalize_env
print(env_params)
env = JuliaEnv(
env_id=env_id,
env_params=env_params,
using='AutoEnvs'
)
# get low and high values for normalizing _real_ actions
low, high = env.action_space.low, env.action_space.high
env = TfEnv(normalize_wrapper(env, normalize_obs=True, obs_alpha=alpha))
add_kwargs_to_reset(env)
return env, low, high
def build_critic(args, data, env, writer=None):
if args.use_critic_replay_memory:
critic_replay_memory = hgail.misc.utils.KeyValueReplayMemory(maxsize=3 * args.batch_size)
else:
critic_replay_memory = None
critic_dataset = CriticDataset(
data,
replay_memory=critic_replay_memory,
batch_size=args.critic_batch_size,
flat_recurrent=args.policy_recurrent
)
critic_network = ObservationActionMLP(
name='critic',
hidden_layer_dims=args.critic_hidden_layer_dims,
dropout_keep_prob=args.critic_dropout_keep_prob
)
critic = WassersteinCritic(
obs_dim=env.observation_space.flat_dim,
act_dim=env.action_space.flat_dim,
dataset=critic_dataset,
network=critic_network,
gradient_penalty=args.gradient_penalty,
optimizer=tf.train.RMSPropOptimizer(args.critic_learning_rate),
n_train_epochs=args.n_critic_train_epochs,
summary_writer=writer,
grad_norm_rescale=args.critic_grad_rescale,
verbose=2,
debug_nan=True
)
return critic
def build_policy(args, env, latent_sampler=None):
if args.use_infogail:
if latent_sampler is None:
latent_sampler = UniformlyRandomLatentSampler(
scheduler=ConstantIntervalScheduler(k=args.scheduler_k),
name='latent_sampler',
dim=args.latent_dim
)
if args.policy_recurrent:
policy = GaussianLatentVarGRUPolicy(
name="policy",
latent_sampler=latent_sampler,
env_spec=env.spec,
hidden_dim=args.recurrent_hidden_dim,
)
else:
print("GaussianLatentVarMLPPolicy")
policy = GaussianLatentVarMLPPolicy(
name="policy",
latent_sampler=latent_sampler,
env_spec=env.spec,
hidden_sizes=args.policy_mean_hidden_layer_dims,
std_hidden_sizes=args.policy_std_hidden_layer_dims
)
else:
if args.policy_recurrent:
print("GaussianGRUPolicy")
policy = GaussianGRUPolicy(
name="policy",
env_spec=env.spec,
hidden_dim=args.recurrent_hidden_dim,
output_nonlinearity=None,
learn_std=True
)
else:
print("GaussianMLPPolicy")
policy = GaussianMLPPolicy(
name="policy",
env_spec=env.spec,
hidden_sizes=args.policy_mean_hidden_layer_dims,
std_hidden_sizes=args.policy_std_hidden_layer_dims,
adaptive_std=True,
output_nonlinearity=None,
learn_std=True
)
return policy
def build_recognition_model(args, env, writer=None):
if args.use_infogail:
recognition_dataset = RecognitionDataset(
args.batch_size,
flat_recurrent=args.policy_recurrent
)
recognition_network = ObservationActionMLP(
name='recog',
hidden_layer_dims=args.recognition_hidden_layer_dims,
output_dim=args.latent_dim
)
recognition_model = RecognitionModel(
obs_dim=env.observation_space.flat_dim,
act_dim=env.action_space.flat_dim,
dataset=recognition_dataset,
network=recognition_network,
variable_type='categorical',
latent_dim=args.latent_dim,
optimizer=tf.train.AdamOptimizer(args.recognition_learning_rate),
n_train_epochs=args.n_recognition_train_epochs,
summary_writer=writer,
verbose=2
)
else:
recognition_model = None
return recognition_model
def build_baseline(args, env):
return GaussianMLPBaseline(env_spec=env.spec)
def build_reward_handler(args, writer=None):
reward_handler = hgail.misc.utils.RewardHandler(
use_env_rewards=args.reward_handler_use_env_rewards,
max_epochs=args.reward_handler_max_epochs, # epoch at which final scales are used
critic_final_scale=args.reward_handler_critic_final_scale,
recognition_initial_scale=0.,
recognition_final_scale=args.reward_handler_recognition_final_scale,
summary_writer=writer,
normalize_rewards=True,
critic_clip_low=-100,
critic_clip_high=100,
)
return reward_handler
def build_hierarchy(args, env, writer=None):
levels = []
latent_sampler = UniformlyRandomLatentSampler(
name='base_latent_sampler',
dim=args.latent_dim,
scheduler=ConstantIntervalScheduler(k=args.env_H)
)
for level_idx in [1,0]:
# wrap env in different spec depending on level
if level_idx == 0:
level_env = env
else:
level_env = SpecWrapperEnv(
env,
action_space=Discrete(args.latent_dim),
observation_space=env.observation_space
)
with tf.variable_scope('level_{}'.format(level_idx)):
# recognition_model = build_recognition_model(args, level_env, writer)
recognition_model = None
if level_idx == 0:
policy = build_policy(args, env, latent_sampler=latent_sampler)
else:
scheduler = ConstantIntervalScheduler(k=args.scheduler_k)
policy = latent_sampler = CategoricalLatentSampler(
scheduler=scheduler,
name='latent_sampler',
policy_name='latent_sampler_policy',
dim=args.latent_dim,
env_spec=level_env.spec,
latent_sampler=latent_sampler,
max_n_envs=args.n_envs
)
baseline = build_baseline(args, level_env)
if args.vectorize:
force_batch_sampler = False
if level_idx == 0:
sampler_args = dict(n_envs=args.n_envs)
else:
sampler_args = None
else:
force_batch_sampler = True
sampler_args = None
sampler_cls = None if level_idx == 0 else HierarchySampler
algo = TRPO(
env=level_env,
policy=policy,
baseline=baseline,
batch_size=args.batch_size,
max_path_length=args.max_path_length,
n_itr=args.n_itr,
discount=args.discount,
step_size=args.trpo_step_size,
sampler_cls=sampler_cls,
force_batch_sampler=force_batch_sampler,
sampler_args=sampler_args,
optimizer_args=dict(
max_backtracks=50,
debug_nan=True
)
)
reward_handler = build_reward_handler(args, writer)
level = Level(
depth=level_idx,
algo=algo,
reward_handler=reward_handler,
recognition_model=recognition_model,
start_itr=0,
end_itr=0 if level_idx == 0 else np.inf
)
levels.append(level)
# by convention the order of the levels should be increasing
# but they must be built in the reverse order
# so reverse the list before returning it
return list(reversed(levels))
'''
setup
'''
def latest_snapshot(exp_dir, phase='train'):
snapshot_dir = os.path.join(exp_dir, phase, 'log')
snapshots = glob.glob('{}/itr_*.pkl'.format(snapshot_dir))
latest = sorted(snapshots, reverse=True)[0]
return latest
def set_up_experiment(
exp_name,
phase,
exp_home='../../data/experiments/',
snapshot_gap=5):
maybe_mkdir(exp_home)
exp_dir = os.path.join(exp_home, exp_name)
maybe_mkdir(exp_dir)
phase_dir = os.path.join(exp_dir, phase)
maybe_mkdir(phase_dir)
log_dir = os.path.join(phase_dir, 'log')
maybe_mkdir(log_dir)
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode('gap')
logger.set_snapshot_gap(snapshot_gap)
log_filepath = os.path.join(log_dir, 'log.txt')
logger.add_text_output(log_filepath)
return exp_dir
'''
data utilities
'''
def compute_lengths(arr):
sums = np.sum(np.array(arr), axis=2)
lengths = []
for sample in sums:
zero_idxs = np.where(sample == 0.)[0]
if len(zero_idxs) == 0:
lengths.append(len(sample))
else:
lengths.append(zero_idxs[0])
return np.array(lengths)
def normalize(x, clip_std_multiple=np.inf):
mean = np.mean(x, axis=0, keepdims=True)
x = x - mean
std = np.std(x, axis=0, keepdims=True) + 1e-8
up = std * clip_std_multiple
lb = - std * clip_std_multiple
x = np.clip(x, lb, up)
x = x / std
return x, mean, std
def normalize_range(x, low, high):
low = np.array(low)
high = np.array(high)
mean = (high + low) / 2.
half_range = (high - low) / 2.
x = (x - mean) / half_range
x = np.clip(x, -1, 1)
return x
def load_x_feature_names(filepath, ngsim_filename):
print(filepath)
f = h5py.File(filepath, 'r')
xs = []
traj_id = NGSIM_FILENAME_TO_ID[ngsim_filename]
# in case this nees to allow for multiple files in the future
traj_ids = [traj_id]
for i in traj_ids:
if str(i) in f.keys():
xs.append(f[str(i)])
else:
raise ValueError('invalid key to trajectory data: {}'.format(i))
x = np.concatenate(xs)
feature_names = f.attrs['feature_names']
return x, feature_names
def load_data(
filepath,
act_keys=['accel', 'turn_rate_global'],
ngsim_filename='trajdata_i101_trajectories-0750am-0805am.txt',
debug_size=None,
min_length=50,
normalize_data=True,
shuffle=False,
act_low=-1,
act_high=1,
clip_std_multiple=np.inf):
# loading varies based on dataset type
x, feature_names = load_x_feature_names(filepath, ngsim_filename)
# optionally keep it to a reasonable size
if debug_size is not None:
x = x[:debug_size]
if shuffle:
idxs = np.random.permutation(len(x))
x = x[idxs]
# compute lengths of the samples before anything else b/c this is fragile
lengths = compute_lengths(x)
# flatten the dataset to (n_samples, n_features)
# taking only the valid timesteps from each sample
# i.e., throw out timeseries information
xs = []
for i, l in enumerate(lengths):
# enforce minimum length constraint
if l >= min_length:
xs.append(x[i,:l])
x = np.concatenate(xs)
# split into observations and actions
# redundant because the environment is not able to extract actions
obs = x
act_idxs = [i for (i,n) in enumerate(feature_names) if n in act_keys]
act = x[:, act_idxs]
if normalize_data:
# normalize it all, _no_ test / val split
obs, obs_mean, obs_std = normalize(obs, clip_std_multiple)
# normalize actions to between -1 and 1
act = normalize_range(act, act_low, act_high)
else:
obs_mean = None
obs_std = None
return dict(
observations=obs,
actions=act,
obs_mean=obs_mean,
obs_std=obs_std,
)
|
[
"numpy.clip",
"rllab.misc.logger.add_text_output",
"numpy.array",
"hgail.misc.datasets.RecognitionDataset",
"rllab.misc.logger.set_snapshot_mode",
"rllab.misc.logger.set_snapshot_dir",
"numpy.mean",
"numpy.savez",
"os.path.exists",
"hgail.algos.hgail_impl.Level",
"numpy.where",
"hgail.policies.gaussian_latent_var_gru_policy.GaussianLatentVarGRUPolicy",
"os.mkdir",
"numpy.concatenate",
"sandbox.rocky.tf.policies.gaussian_mlp_policy.GaussianMLPPolicy",
"julia_env.julia_env.JuliaEnv",
"tensorflow.train.AdamOptimizer",
"hgail.core.models.ObservationActionMLP",
"os.path.expanduser",
"hgail.policies.scheduling.ConstantIntervalScheduler",
"rllab.misc.logger.set_snapshot_gap",
"hgail.baselines.gaussian_mlp_baseline.GaussianMLPBaseline",
"h5py.File",
"hgail.policies.gaussian_latent_var_mlp_policy.GaussianLatentVarMLPPolicy",
"numpy.std",
"hgail.policies.categorical_latent_sampler.CategoricalLatentSampler",
"tensorflow.train.RMSPropOptimizer",
"sandbox.rocky.tf.policies.gaussian_gru_policy.GaussianGRUPolicy",
"hgail.misc.datasets.CriticDataset",
"os.path.join",
"numpy.load",
"sandbox.rocky.tf.spaces.discrete.Discrete"
] |
[((2367, 2398), 'numpy.savez', 'np.savez', (['filepath'], {'trajs': 'trajs'}), '(filepath, trajs=trajs)\n', (2375, 2398), True, 'import numpy as np\n'), ((4357, 4403), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.julia/v0.6/NGSIM/data"""'], {}), "('~/.julia/v0.6/NGSIM/data')\n", (4375, 4403), False, 'import os, pdb\n'), ((5753, 5817), 'julia_env.julia_env.JuliaEnv', 'JuliaEnv', ([], {'env_id': 'env_id', 'env_params': 'env_params', 'using': '"""AutoEnvs"""'}), "(env_id=env_id, env_params=env_params, using='AutoEnvs')\n", (5761, 5817), False, 'from julia_env.julia_env import JuliaEnv\n'), ((6354, 6487), 'hgail.misc.datasets.CriticDataset', 'CriticDataset', (['data'], {'replay_memory': 'critic_replay_memory', 'batch_size': 'args.critic_batch_size', 'flat_recurrent': 'args.policy_recurrent'}), '(data, replay_memory=critic_replay_memory, batch_size=args.\n critic_batch_size, flat_recurrent=args.policy_recurrent)\n', (6367, 6487), False, 'from hgail.misc.datasets import CriticDataset, RecognitionDataset\n'), ((6543, 6681), 'hgail.core.models.ObservationActionMLP', 'ObservationActionMLP', ([], {'name': '"""critic"""', 'hidden_layer_dims': 'args.critic_hidden_layer_dims', 'dropout_keep_prob': 'args.critic_dropout_keep_prob'}), "(name='critic', hidden_layer_dims=args.\n critic_hidden_layer_dims, dropout_keep_prob=args.critic_dropout_keep_prob)\n", (6563, 6681), False, 'from hgail.core.models import ObservationActionMLP\n'), ((9986, 10024), 'hgail.baselines.gaussian_mlp_baseline.GaussianMLPBaseline', 'GaussianMLPBaseline', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (10005, 10024), False, 'from hgail.baselines.gaussian_mlp_baseline import GaussianMLPBaseline\n'), ((13741, 13776), 'os.path.join', 'os.path.join', (['exp_dir', 'phase', '"""log"""'], {}), "(exp_dir, phase, 'log')\n", (13753, 13776), False, 'import os, pdb\n'), ((14074, 14106), 'os.path.join', 'os.path.join', (['exp_home', 'exp_name'], {}), '(exp_home, exp_name)\n', (14086, 14106), False, 'import os, pdb\n'), ((14148, 14176), 'os.path.join', 'os.path.join', (['exp_dir', 'phase'], {}), '(exp_dir, phase)\n', (14160, 14176), False, 'import os, pdb\n'), ((14218, 14248), 'os.path.join', 'os.path.join', (['phase_dir', '"""log"""'], {}), "(phase_dir, 'log')\n", (14230, 14248), False, 'import os, pdb\n'), ((14278, 14310), 'rllab.misc.logger.set_snapshot_dir', 'logger.set_snapshot_dir', (['log_dir'], {}), '(log_dir)\n', (14301, 14310), True, 'import rllab.misc.logger as logger\n'), ((14315, 14346), 'rllab.misc.logger.set_snapshot_mode', 'logger.set_snapshot_mode', (['"""gap"""'], {}), "('gap')\n", (14339, 14346), True, 'import rllab.misc.logger as logger\n'), ((14351, 14388), 'rllab.misc.logger.set_snapshot_gap', 'logger.set_snapshot_gap', (['snapshot_gap'], {}), '(snapshot_gap)\n', (14374, 14388), True, 'import rllab.misc.logger as logger\n'), ((14408, 14440), 'os.path.join', 'os.path.join', (['log_dir', '"""log.txt"""'], {}), "(log_dir, 'log.txt')\n", (14420, 14440), False, 'import os, pdb\n'), ((14445, 14481), 'rllab.misc.logger.add_text_output', 'logger.add_text_output', (['log_filepath'], {}), '(log_filepath)\n', (14467, 14481), True, 'import rllab.misc.logger as logger\n'), ((14818, 14835), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (14826, 14835), True, 'import numpy as np\n'), ((14892, 14925), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (14899, 14925), True, 'import numpy as np\n'), ((15069, 15087), 'numpy.clip', 'np.clip', (['x', 'lb', 'up'], {}), '(x, lb, up)\n', (15076, 15087), True, 'import numpy as np\n'), ((15174, 15187), 'numpy.array', 'np.array', (['low'], {}), '(low)\n', (15182, 15187), True, 'import numpy as np\n'), ((15199, 15213), 'numpy.array', 'np.array', (['high'], {}), '(high)\n', (15207, 15213), True, 'import numpy as np\n'), ((15318, 15335), 'numpy.clip', 'np.clip', (['x', '(-1)', '(1)'], {}), '(x, -1, 1)\n', (15325, 15335), True, 'import numpy as np\n'), ((15430, 15454), 'h5py.File', 'h5py.File', (['filepath', '"""r"""'], {}), "(filepath, 'r')\n", (15439, 15454), False, 'import h5py\n'), ((15802, 15820), 'numpy.concatenate', 'np.concatenate', (['xs'], {}), '(xs)\n', (15816, 15820), True, 'import numpy as np\n'), ((16959, 16977), 'numpy.concatenate', 'np.concatenate', (['xs'], {}), '(xs)\n', (16973, 16977), True, 'import numpy as np\n'), ((2034, 2057), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (2048, 2057), False, 'import os, pdb\n'), ((2067, 2084), 'os.mkdir', 'os.mkdir', (['dirpath'], {}), '(dirpath)\n', (2075, 2084), False, 'import os, pdb\n'), ((2444, 2461), 'numpy.load', 'np.load', (['filepath'], {}), '(filepath)\n', (2451, 2461), True, 'import numpy as np\n'), ((3171, 3198), 'os.path.join', 'os.path.join', (['directory', 'fn'], {}), '(directory, fn)\n', (3183, 3198), False, 'import os, pdb\n'), ((4421, 4463), 'os.path.join', 'os.path.join', (['basedir', 'args.ngsim_filename'], {}), '(basedir, args.ngsim_filename)\n', (4433, 4463), False, 'import os, pdb\n'), ((9055, 9128), 'hgail.misc.datasets.RecognitionDataset', 'RecognitionDataset', (['args.batch_size'], {'flat_recurrent': 'args.policy_recurrent'}), '(args.batch_size, flat_recurrent=args.policy_recurrent)\n', (9073, 9128), False, 'from hgail.misc.datasets import CriticDataset, RecognitionDataset\n'), ((9193, 9314), 'hgail.core.models.ObservationActionMLP', 'ObservationActionMLP', ([], {'name': '"""recog"""', 'hidden_layer_dims': 'args.recognition_hidden_layer_dims', 'output_dim': 'args.latent_dim'}), "(name='recog', hidden_layer_dims=args.\n recognition_hidden_layer_dims, output_dim=args.latent_dim)\n", (9213, 9314), False, 'from hgail.core.models import ObservationActionMLP\n'), ((14570, 14583), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (14578, 14583), True, 'import numpy as np\n'), ((14953, 14985), 'numpy.std', 'np.std', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (14959, 14985), True, 'import numpy as np\n'), ((6962, 7014), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['args.critic_learning_rate'], {}), '(args.critic_learning_rate)\n', (6987, 7014), True, 'import tensorflow as tf\n'), ((7604, 7737), 'hgail.policies.gaussian_latent_var_gru_policy.GaussianLatentVarGRUPolicy', 'GaussianLatentVarGRUPolicy', ([], {'name': '"""policy"""', 'latent_sampler': 'latent_sampler', 'env_spec': 'env.spec', 'hidden_dim': 'args.recurrent_hidden_dim'}), "(name='policy', latent_sampler=latent_sampler,\n env_spec=env.spec, hidden_dim=args.recurrent_hidden_dim)\n", (7630, 7737), False, 'from hgail.policies.gaussian_latent_var_gru_policy import GaussianLatentVarGRUPolicy\n'), ((7896, 8096), 'hgail.policies.gaussian_latent_var_mlp_policy.GaussianLatentVarMLPPolicy', 'GaussianLatentVarMLPPolicy', ([], {'name': '"""policy"""', 'latent_sampler': 'latent_sampler', 'env_spec': 'env.spec', 'hidden_sizes': 'args.policy_mean_hidden_layer_dims', 'std_hidden_sizes': 'args.policy_std_hidden_layer_dims'}), "(name='policy', latent_sampler=latent_sampler,\n env_spec=env.spec, hidden_sizes=args.policy_mean_hidden_layer_dims,\n std_hidden_sizes=args.policy_std_hidden_layer_dims)\n", (7922, 8096), False, 'from hgail.policies.gaussian_latent_var_mlp_policy import GaussianLatentVarMLPPolicy\n'), ((8287, 8423), 'sandbox.rocky.tf.policies.gaussian_gru_policy.GaussianGRUPolicy', 'GaussianGRUPolicy', ([], {'name': '"""policy"""', 'env_spec': 'env.spec', 'hidden_dim': 'args.recurrent_hidden_dim', 'output_nonlinearity': 'None', 'learn_std': '(True)'}), "(name='policy', env_spec=env.spec, hidden_dim=args.\n recurrent_hidden_dim, output_nonlinearity=None, learn_std=True)\n", (8304, 8423), False, 'from sandbox.rocky.tf.policies.gaussian_gru_policy import GaussianGRUPolicy\n'), ((8587, 8815), 'sandbox.rocky.tf.policies.gaussian_mlp_policy.GaussianMLPPolicy', 'GaussianMLPPolicy', ([], {'name': '"""policy"""', 'env_spec': 'env.spec', 'hidden_sizes': 'args.policy_mean_hidden_layer_dims', 'std_hidden_sizes': 'args.policy_std_hidden_layer_dims', 'adaptive_std': '(True)', 'output_nonlinearity': 'None', 'learn_std': '(True)'}), "(name='policy', env_spec=env.spec, hidden_sizes=args.\n policy_mean_hidden_layer_dims, std_hidden_sizes=args.\n policy_std_hidden_layer_dims, adaptive_std=True, output_nonlinearity=\n None, learn_std=True)\n", (8604, 8815), False, 'from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy\n'), ((10809, 10848), 'hgail.policies.scheduling.ConstantIntervalScheduler', 'ConstantIntervalScheduler', ([], {'k': 'args.env_H'}), '(k=args.env_H)\n', (10834, 10848), False, 'from hgail.policies.scheduling import ConstantIntervalScheduler\n'), ((13165, 13329), 'hgail.algos.hgail_impl.Level', 'Level', ([], {'depth': 'level_idx', 'algo': 'algo', 'reward_handler': 'reward_handler', 'recognition_model': 'recognition_model', 'start_itr': '(0)', 'end_itr': '(0 if level_idx == 0 else np.inf)'}), '(depth=level_idx, algo=algo, reward_handler=reward_handler,\n recognition_model=recognition_model, start_itr=0, end_itr=0 if \n level_idx == 0 else np.inf)\n', (13170, 13329), False, 'from hgail.algos.hgail_impl import Level\n'), ((14654, 14677), 'numpy.where', 'np.where', (['(sample == 0.0)'], {}), '(sample == 0.0)\n', (14662, 14677), True, 'import numpy as np\n'), ((4545, 4581), 'os.path.join', 'os.path.join', (['exp_dir', '"""imitate/viz"""'], {}), "(exp_dir, 'imitate/viz')\n", (4557, 4581), False, 'import os, pdb\n'), ((9688, 9742), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['args.recognition_learning_rate'], {}), '(args.recognition_learning_rate)\n', (9710, 9742), True, 'import tensorflow as tf\n'), ((11547, 11592), 'hgail.policies.scheduling.ConstantIntervalScheduler', 'ConstantIntervalScheduler', ([], {'k': 'args.scheduler_k'}), '(k=args.scheduler_k)\n', (11572, 11592), False, 'from hgail.policies.scheduling import ConstantIntervalScheduler\n'), ((11635, 11850), 'hgail.policies.categorical_latent_sampler.CategoricalLatentSampler', 'CategoricalLatentSampler', ([], {'scheduler': 'scheduler', 'name': '"""latent_sampler"""', 'policy_name': '"""latent_sampler_policy"""', 'dim': 'args.latent_dim', 'env_spec': 'level_env.spec', 'latent_sampler': 'latent_sampler', 'max_n_envs': 'args.n_envs'}), "(scheduler=scheduler, name='latent_sampler',\n policy_name='latent_sampler_policy', dim=args.latent_dim, env_spec=\n level_env.spec, latent_sampler=latent_sampler, max_n_envs=args.n_envs)\n", (11659, 11850), False, 'from hgail.policies.categorical_latent_sampler import CategoricalLatentSampler\n'), ((7413, 7458), 'hgail.policies.scheduling.ConstantIntervalScheduler', 'ConstantIntervalScheduler', ([], {'k': 'args.scheduler_k'}), '(k=args.scheduler_k)\n', (7438, 7458), False, 'from hgail.policies.scheduling import ConstantIntervalScheduler\n'), ((11098, 11123), 'sandbox.rocky.tf.spaces.discrete.Discrete', 'Discrete', (['args.latent_dim'], {}), '(args.latent_dim)\n', (11106, 11123), False, 'from sandbox.rocky.tf.spaces.discrete import Discrete\n')]
|
from __future__ import print_function, unicode_literals, absolute_import, division
from six.moves import range, zip, map, reduce, filter
from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda
from keras.models import Model
from keras.layers.merge import Add, Concatenate
import tensorflow as tf
from keras import backend as K
from .blocks import unet_block, unet_blocks, gaussian_2d
import re
from ..utils import _raise, backend_channels_last
import numpy as np
def custom_unet(input_shape,
last_activation,
n_depth=2,
n_filter_base=16,
kernel_size=(3,3,3),
n_conv_per_depth=2,
activation="relu",
batch_norm=False,
dropout=0.0,
pool_size=(2,2,2),
n_channel_out=1,
residual=False,
prob_out=False,
long_skip=True,
eps_scale=1e-3):
""" TODO """
if last_activation is None:
raise ValueError("last activation has to be given (e.g. 'sigmoid', 'relu')!")
all((s % 2 == 1 for s in kernel_size)) or _raise(ValueError('kernel size should be odd in all dimensions.'))
channel_axis = -1 if backend_channels_last() else 1
n_dim = len(kernel_size)
# TODO: rewrite with conv_block
conv = Conv2D if n_dim == 2 else Conv3D
input = Input(input_shape, name="input")
unet = unet_block(n_depth, n_filter_base, kernel_size, input_planes=input_shape[-1],
activation=activation, dropout=dropout, batch_norm=batch_norm,
n_conv_per_depth=n_conv_per_depth, pool=pool_size, long_skip=long_skip)(input)
final = conv(n_channel_out, (1,)*n_dim, activation='linear')(unet)
if residual:
if not (n_channel_out == input_shape[-1] if backend_channels_last() else n_channel_out == input_shape[0]):
raise ValueError("number of input and output channels must be the same for a residual net.")
final = Add()([final, input])
final = Activation(activation=last_activation)(final)
if prob_out:
scale = conv(n_channel_out, (1,)*n_dim, activation='softplus')(unet)
scale = Lambda(lambda x: x+np.float32(eps_scale))(scale)
final = Concatenate(axis=channel_axis)([final, scale])
return Model(inputs=input, outputs=final)
def uxnet(input_shape,
n_depth=2,
n_filter_base=16,
kernel_size=(3, 3),
n_conv_per_depth=2,
activation="relu",
last_activation='linear',
batch_norm=False,
dropout=0.0,
pool_size=(2, 2),
residual=True,
odd_to_even=False,
shortcut=None,
shared_idx=[],
prob_out=False,
eps_scale=1e-3):
"""
Multi-body U-Net which learns identity by leaving one plane out in each branch
:param input_shape:
:param n_depth:
:param n_filter_base:
:param kernel_size:
:param n_conv_per_depth:
:param activation:
:param last_activation:
:param batch_norm:
:param dropout:
:param pool_size:
:param prob_out:
:param eps_scale:
:return: Model
"""
# TODO: fill params
# TODO: add odd-to-even mode
# Define vars
channel_axis = -1 if backend_channels_last() else 1
n_planes = input_shape[channel_axis]
if n_planes % 2 != 0 and odd_to_even:
raise ValueError('Odd-to-even mode does not support uneven number of planes')
n_dim = len(kernel_size)
conv = Conv2D if n_dim == 2 else Conv3D
# Define functional model
input = Input(shape=input_shape, name='input_main')
# TODO test new implementation and remove old
# Split planes (preserve channel)
input_x = [Lambda(lambda x: x[..., i:i+1], output_shape=(None, None, 1))(input) for i in range(n_planes)]
# We can train either in odd-to-even mode or in LOO mode
if odd_to_even:
# In this mode we stack together odd and even planes, train the net to predict even from odd and vice versa
# input_x_out = [Concatenate(axis=-1)(input_x[j::2]) for j in range(2)]
input_x_out = [Concatenate(axis=-1)(input_x[j::2]) for j in range(1, -1, -1)]
else:
# Concatenate planes back in leave-one-out way
input_x_out = [Concatenate(axis=-1)([plane for i, plane in enumerate(input_x) if i != j]) for j in range(n_planes)]
# if odd_to_even:
# input_x_out = [Lambda(lambda x: x[..., j::2],
# output_shape=(None, None, n_planes // 2),
# name='{}_planes'.format('even' if j == 0 else 'odd'))(input)
# for j in range(1, -1, -1)]
# else:
# # input_x_out = [Lambda(lambda x: x[..., tf.convert_to_tensor([i for i in range(n_planes) if i != j], dtype=tf.int32)],
# # output_shape=(None, None, n_planes-1),
# # name='leave_{}_plane_out'.format(j))(input)
# # for j in range(n_planes)]
#
# input_x_out = [Lambda(lambda x: K.concatenate([x[..., :j], x[..., (j+1):]], axis=-1),
# output_shape=(None, None, n_planes - 1),
# name='leave_{}_plane_out'.format(j))(input)
# for j in range(n_planes)]
# U-Net parameters depend on mode (odd-to-even or LOO)
n_blocks = 2 if odd_to_even else n_planes
input_planes = n_planes // 2 if odd_to_even else n_planes-1
output_planes = n_planes // 2 if odd_to_even else 1
# Create U-Net blocks (by number of planes)
unet_x = unet_blocks(n_blocks=n_blocks, input_planes=input_planes, output_planes=output_planes,
n_depth=n_depth, n_filter_base=n_filter_base, kernel_size=kernel_size,
activation=activation, dropout=dropout, batch_norm=batch_norm,
n_conv_per_depth=n_conv_per_depth, pool=pool_size, shared_idx=shared_idx)
unet_x = [unet(inp_out) for unet, inp_out in zip(unet_x, input_x_out)]
# Version without weight sharing:
# unet_x = [unet_block(n_depth, n_filter_base, kernel_size,
# activation=activation, dropout=dropout, batch_norm=batch_norm,
# n_conv_per_depth=n_conv_per_depth, pool=pool_size,
# prefix='out_{}_'.format(i))(inp_out) for i, inp_out in enumerate(input_x_out)]
# TODO: rewritten for sharing -- remove commented below
# Convolve n_filter_base to 1 as each U-Net predicts a single plane
# unet_x = [conv(1, (1,) * n_dim, activation=activation)(unet) for unet in unet_x]
if residual:
if odd_to_even:
# For residual U-Net sum up output for odd planes with even planes and vice versa
unet_x = [Add()([unet, inp]) for unet, inp in zip(unet_x, input_x[::-1])]
else:
# For residual U-Net sum up output with its neighbor (next for the first plane, previous for the rest
unet_x = [Add()([unet, inp]) for unet, inp in zip(unet_x, [input_x[1]]+input_x[:-1])]
# Concatenate outputs of blocks, should receive (None, None, None, n_planes)
# TODO assert to check shape?
if odd_to_even:
# Split even and odd, assemble them together in the correct order
# TODO tests
unet_even = [Lambda(lambda x: x[..., i:i+1],
output_shape=(None, None, 1),
name='even_{}'.format(i))(unet_x[0]) for i in range(n_planes // 2)]
unet_odd = [Lambda(lambda x: x[..., i:i+1],
output_shape=(None, None, 1),
name='odd_{}'.format(i))(unet_x[1]) for i in range(n_planes // 2)]
unet_x = list(np.array(list(zip(unet_even, unet_odd))).flatten())
unet = Concatenate(axis=-1)(unet_x)
if shortcut is not None:
# We can create a shortcut without long skip connection to prevent noise memorization
if shortcut == 'unet':
shortcut_block = unet_block(long_skip=False, input_planes=n_planes,
n_depth=n_depth, n_filter_base=n_filter_base, kernel_size=kernel_size,
activation=activation, dropout=dropout, batch_norm=batch_norm,
n_conv_per_depth=n_conv_per_depth, pool=pool_size)(input)
shortcut_block = conv(n_planes, (1,) * n_dim, activation='linear', name='shortcut_final_conv')(shortcut_block)
# Or a simple gaussian blur block
elif shortcut == 'gaussian':
shortcut_block = gaussian_2d(n_planes, k=13, s=7)(input)
else:
raise ValueError('Shortcut should be either unet or gaussian')
# TODO add or concatenate?
unet = Add()([unet, shortcut_block])
# unet = Concatenate(axis=-1)([unet, shortcut_unet])
# Final activation layer
final = Activation(activation=last_activation)(unet)
if prob_out:
scale = conv(n_planes, (1,)*n_dim, activation='softplus')(unet)
scale = Lambda(lambda x: x+np.float32(eps_scale))(scale)
final = Concatenate(axis=channel_axis)([final, scale])
return Model(inputs=input, outputs=final)
def common_unet(n_dim=2, n_depth=1, kern_size=3, n_first=16, n_channel_out=1,
residual=True, prob_out=False, long_skip=True, last_activation='linear'):
"""
Construct a common CARE neural net based on U-Net [1]_ and residual learning [2]_
to be used for image restoration/enhancement.
Parameters
----------
n_dim : int
number of image dimensions (2 or 3)
n_depth : int
number of resolution levels of U-Net architecture
kern_size : int
size of convolution filter in all image dimensions
n_first : int
number of convolution filters for first U-Net resolution level (value is doubled after each downsampling operation)
n_channel_out : int
number of channels of the predicted output image
residual : bool
if True, model will internally predict the residual w.r.t. the input (typically better)
requires number of input and output image channels to be equal
prob_out : bool
standard regression (False) or probabilistic prediction (True)
if True, model will predict two values for each input pixel (mean and positive scale value)
last_activation : str
name of activation function for the final output layer
Returns
-------
function
Function to construct the network, which takes as argument the shape of the input image
Example
-------
>>> model = common_unet(2, 1,3,16, 1, True, False)(input_shape)
References
----------
.. [1] <NAME>, <NAME>, <NAME>x, *U-Net: Convolutional Networks for Biomedical Image Segmentation*, MICCAI 2015
.. [2] <NAME>, <NAME>, <NAME>, <NAME>. *Deep Residual Learning for Image Recognition*, CVPR 2016
"""
def _build_this(input_shape):
return custom_unet(input_shape, last_activation, n_depth, n_first, (kern_size,)*n_dim, pool_size=(2,)*n_dim,
n_channel_out=n_channel_out, residual=residual, prob_out=prob_out, long_skip=long_skip)
return _build_this
def common_uxnet(n_dim=2, n_depth=1, kern_size=3, n_first=16,
residual=True, prob_out=False, last_activation='linear',
shared_idx=[], odd_to_even=False, shortcut=None):
def _build_this(input_shape):
return uxnet(input_shape=input_shape, last_activation=last_activation, n_depth=n_depth, n_filter_base=n_first,
kernel_size=(kern_size,)*n_dim, pool_size=(2,)*n_dim,
residual=residual, prob_out=prob_out,
shared_idx=shared_idx, odd_to_even=odd_to_even, shortcut=shortcut)
return _build_this
modelname = re.compile("^(?P<model>resunet|unet)(?P<n_dim>\d)(?P<prob_out>p)?_(?P<n_depth>\d+)_(?P<kern_size>\d+)_(?P<n_first>\d+)(_(?P<n_channel_out>\d+)out)?(_(?P<last_activation>.+)-last)?$")
def common_unet_by_name(model):
r"""Shorthand notation for equivalent use of :func:`common_unet`.
Parameters
----------
model : str
define model to be created via string, which is parsed as a regular expression:
`^(?P<model>resunet|unet)(?P<n_dim>\d)(?P<prob_out>p)?_(?P<n_depth>\d+)_(?P<kern_size>\d+)_(?P<n_first>\d+)(_(?P<n_channel_out>\d+)out)?(_(?P<last_activation>.+)-last)?$`
Returns
-------
function
Calls :func:`common_unet` with the respective parameters.
Raises
------
ValueError
If argument `model` is not a valid string according to the regular expression.
Example
-------
>>> model = common_unet_by_name('resunet2_1_3_16_1out')(input_shape)
>>> # equivalent to: model = common_unet(2, 1,3,16, 1, True, False)(input_shape)
Todo
----
Backslashes in docstring for regexp not rendered correctly.
"""
m = modelname.fullmatch(model)
if m is None:
raise ValueError("model name '%s' unknown, must follow pattern '%s'" % (model, modelname.pattern))
# from pprint import pprint
# pprint(m.groupdict())
options = {k:int(m.group(k)) for k in ['n_depth','n_first','kern_size']}
options['prob_out'] = m.group('prob_out') is not None
options['residual'] = {'unet': False, 'resunet': True}[m.group('model')]
options['n_dim'] = int(m.group('n_dim'))
options['n_channel_out'] = 1 if m.group('n_channel_out') is None else int(m.group('n_channel_out'))
if m.group('last_activation') is not None:
options['last_activation'] = m.group('last_activation')
return common_unet(**options)
def receptive_field_unet(n_depth, kern_size, pool_size=2, n_dim=2, img_size=1024):
"""Receptive field for U-Net model (pre/post for each dimension)."""
x = np.zeros((1,)+(img_size,)*n_dim+(1,))
mid = tuple([s//2 for s in x.shape[1:-1]])
x[(slice(None),) + mid + (slice(None),)] = 1
model = custom_unet (
x.shape[1:],
n_depth=n_depth, kernel_size=[kern_size]*n_dim, pool_size=[pool_size]*n_dim,
n_filter_base=8, activation='linear', last_activation='linear',
)
y = model.predict(x)[0,...,0]
y0 = model.predict(0*x)[0,...,0]
ind = np.where(np.abs(y-y0)>0)
return [(m-np.min(i), np.max(i)-m) for (m, i) in zip(mid, ind)]
|
[
"keras.layers.merge.Concatenate",
"numpy.abs",
"six.moves.range",
"numpy.float32",
"re.compile",
"keras.layers.merge.Add",
"keras.layers.Lambda",
"numpy.max",
"keras.layers.Input",
"numpy.zeros",
"keras.layers.Activation",
"keras.models.Model",
"numpy.min",
"six.moves.zip"
] |
[((11943, 12140), 're.compile', 're.compile', (['"""^(?P<model>resunet|unet)(?P<n_dim>\\\\d)(?P<prob_out>p)?_(?P<n_depth>\\\\d+)_(?P<kern_size>\\\\d+)_(?P<n_first>\\\\d+)(_(?P<n_channel_out>\\\\d+)out)?(_(?P<last_activation>.+)-last)?$"""'], {}), "(\n '^(?P<model>resunet|unet)(?P<n_dim>\\\\d)(?P<prob_out>p)?_(?P<n_depth>\\\\d+)_(?P<kern_size>\\\\d+)_(?P<n_first>\\\\d+)(_(?P<n_channel_out>\\\\d+)out)?(_(?P<last_activation>.+)-last)?$'\n )\n", (11953, 12140), False, 'import re\n'), ((1399, 1431), 'keras.layers.Input', 'Input', (['input_shape'], {'name': '"""input"""'}), "(input_shape, name='input')\n", (1404, 1431), False, 'from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda\n'), ((2347, 2381), 'keras.models.Model', 'Model', ([], {'inputs': 'input', 'outputs': 'final'}), '(inputs=input, outputs=final)\n', (2352, 2381), False, 'from keras.models import Model\n'), ((3636, 3679), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'name': '"""input_main"""'}), "(shape=input_shape, name='input_main')\n", (3641, 3679), False, 'from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda\n'), ((9270, 9304), 'keras.models.Model', 'Model', ([], {'inputs': 'input', 'outputs': 'final'}), '(inputs=input, outputs=final)\n', (9275, 9304), False, 'from keras.models import Model\n'), ((13941, 13984), 'numpy.zeros', 'np.zeros', (['((1,) + (img_size,) * n_dim + (1,))'], {}), '((1,) + (img_size,) * n_dim + (1,))\n', (13949, 13984), True, 'import numpy as np\n'), ((2066, 2104), 'keras.layers.Activation', 'Activation', ([], {'activation': 'last_activation'}), '(activation=last_activation)\n', (2076, 2104), False, 'from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda\n'), ((7874, 7894), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (7885, 7894), False, 'from keras.layers.merge import Add, Concatenate\n'), ((8995, 9033), 'keras.layers.Activation', 'Activation', ([], {'activation': 'last_activation'}), '(activation=last_activation)\n', (9005, 9033), False, 'from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda\n'), ((2032, 2037), 'keras.layers.merge.Add', 'Add', ([], {}), '()\n', (2035, 2037), False, 'from keras.layers.merge import Add, Concatenate\n'), ((2288, 2318), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': 'channel_axis'}), '(axis=channel_axis)\n', (2299, 2318), False, 'from keras.layers.merge import Add, Concatenate\n'), ((3784, 3847), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x[..., i:i + 1])'], {'output_shape': '(None, None, 1)'}), '(lambda x: x[..., i:i + 1], output_shape=(None, None, 1))\n', (3790, 3847), False, 'from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda\n'), ((3862, 3877), 'six.moves.range', 'range', (['n_planes'], {}), '(n_planes)\n', (3867, 3877), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((6074, 6098), 'six.moves.zip', 'zip', (['unet_x', 'input_x_out'], {}), '(unet_x, input_x_out)\n', (6077, 6098), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((8860, 8865), 'keras.layers.merge.Add', 'Add', ([], {}), '()\n', (8863, 8865), False, 'from keras.layers.merge import Add, Concatenate\n'), ((9211, 9241), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': 'channel_axis'}), '(axis=channel_axis)\n', (9222, 9241), False, 'from keras.layers.merge import Add, Concatenate\n'), ((14376, 14390), 'numpy.abs', 'np.abs', (['(y - y0)'], {}), '(y - y0)\n', (14382, 14390), True, 'import numpy as np\n'), ((14445, 14458), 'six.moves.zip', 'zip', (['mid', 'ind'], {}), '(mid, ind)\n', (14448, 14458), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((4180, 4200), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4191, 4200), False, 'from keras.layers.merge import Add, Concatenate\n'), ((4225, 4241), 'six.moves.range', 'range', (['(1)', '(-1)', '(-1)'], {}), '(1, -1, -1)\n', (4230, 4241), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((4331, 4351), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4342, 4351), False, 'from keras.layers.merge import Add, Concatenate\n'), ((4415, 4430), 'six.moves.range', 'range', (['n_planes'], {}), '(n_planes)\n', (4420, 4430), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((7562, 7582), 'six.moves.range', 'range', (['(n_planes // 2)'], {}), '(n_planes // 2)\n', (7567, 7582), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((7765, 7785), 'six.moves.range', 'range', (['(n_planes // 2)'], {}), '(n_planes // 2)\n', (7770, 7785), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((14407, 14416), 'numpy.min', 'np.min', (['i'], {}), '(i)\n', (14413, 14416), True, 'import numpy as np\n'), ((14418, 14427), 'numpy.max', 'np.max', (['i'], {}), '(i)\n', (14424, 14427), True, 'import numpy as np\n'), ((6855, 6860), 'keras.layers.merge.Add', 'Add', ([], {}), '()\n', (6858, 6860), False, 'from keras.layers.merge import Add, Concatenate\n'), ((6891, 6917), 'six.moves.zip', 'zip', (['unet_x', 'input_x[::-1]'], {}), '(unet_x, input_x[::-1])\n', (6894, 6917), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((7069, 7074), 'keras.layers.merge.Add', 'Add', ([], {}), '()\n', (7072, 7074), False, 'from keras.layers.merge import Add, Concatenate\n'), ((7105, 7145), 'six.moves.zip', 'zip', (['unet_x', '([input_x[1]] + input_x[:-1])'], {}), '(unet_x, [input_x[1]] + input_x[:-1])\n', (7108, 7145), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((2242, 2263), 'numpy.float32', 'np.float32', (['eps_scale'], {}), '(eps_scale)\n', (2252, 2263), True, 'import numpy as np\n'), ((9165, 9186), 'numpy.float32', 'np.float32', (['eps_scale'], {}), '(eps_scale)\n', (9175, 9186), True, 'import numpy as np\n'), ((7824, 7848), 'six.moves.zip', 'zip', (['unet_even', 'unet_odd'], {}), '(unet_even, unet_odd)\n', (7827, 7848), False, 'from six.moves import range, zip, map, reduce, filter\n')]
|
r"""Train a neural network to predict feedback for a program string."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import random
import numpy as np
from tqdm import tqdm
import torch
import torch.optim as optim
import torch.utils.data as data
import torch.nn.functional as F
from .models import ProgramRNN
from .utils import AverageMeter, save_checkpoint, merge_args_with_dict
from .datasets import load_dataset
from .config import default_hyperparams
from .rubric_utils.load_params import get_label_params, get_max_seq_len
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, help='annotated|synthetic')
parser.add_argument('problem_id', type=int, help='1|2|3|4|5|6|7|8')
parser.add_argument('out_dir', type=str, help='where to save outputs')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
merge_args_with_dict(args, default_hyperparams)
device = torch.device('cuda' if args.cuda else 'cpu')
args.max_seq_len = get_max_seq_len(args.problem_id)
label_dim, _, _, _, _ = get_label_params(args.problem_id)
# reproducibility
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if not os.path.isdir(args.out_dir):
os.makedirs(args.out_dir)
train_dataset = load_dataset( args.dataset, args.problem_id, 'train', vocab=None,
max_seq_len=args.max_seq_len, min_occ=args.min_occ)
val_dataset = load_dataset( args.dataset, args.problem_id, 'val', vocab=train_dataset.vocab,
max_seq_len=args.max_seq_len, min_occ=args.min_occ)
test_dataset = load_dataset(args.dataset, args.problem_id, 'test', vocab=train_dataset.vocab,
max_seq_len=args.max_seq_len, min_occ=args.min_occ)
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
model = ProgramRNN( args.z_dim, label_dim, train_dataset.vocab_size, embedding_dim=args.embedding_dim,
hidden_dim=args.hidden_dim, num_layers=args.num_layers)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
def train(epoch):
model.train()
loss_meter = AverageMeter()
acc_meter = AverageMeter()
for batch_idx, (seq, length, label, _) in enumerate(train_loader):
assert label is not None
batch_size = len(seq)
seq = seq.to(device)
length = length.to(device)
label = label.to(device)
optimizer.zero_grad()
label_out = model(seq, length)
loss = F.binary_cross_entropy(label_out, label)
loss.backward()
loss_meter.update(loss.item(), batch_size)
optimizer.step()
acc = np.mean(torch.round(label_out).detach().numpy() == label.detach().numpy())
acc_meter.update(acc, batch_size)
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.4f}'.format(
epoch, batch_idx * batch_size, len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss_meter.avg,
acc_meter.avg))
print('====> Epoch: {}\tLoss: {:.4f}\tAccuracy: {:.4f}'.format(
epoch, loss_meter.avg, acc_meter.avg))
return loss_meter.avg, acc_meter.avg
def test(epoch, loader, name='Test'):
model.eval()
loss_meter = AverageMeter()
acc_meter = AverageMeter()
with torch.no_grad():
with tqdm(total=len(loader)) as pbar:
for (seq, length, label, _) in loader:
assert label is not None
batch_size = len(seq)
seq = seq.to(device)
length = length.to(device)
label = label.to(device)
label_out = model(seq, length)
loss = F.binary_cross_entropy(label_out, label)
loss_meter.update(loss.item(), batch_size)
acc = np.mean(torch.round(label_out.cpu()).numpy() == label.cpu().numpy())
acc_meter.update(acc, batch_size)
pbar.update()
print('====> {} Epoch: {}\tLoss: {:.4f}\tAccuracy: {:.4f}'.format(
name, epoch, loss_meter.avg, acc_meter.avg))
return loss_meter.avg, acc_meter.avg
best_loss = sys.maxint
track_train_loss = np.zeros(args.epochs)
track_val_loss = np.zeros(args.epochs)
track_test_loss = np.zeros(args.epochs)
track_train_acc = np.zeros(args.epochs)
track_val_acc = np.zeros(args.epochs)
track_test_acc = np.zeros(args.epochs)
for epoch in xrange(1, args.epochs + 1):
train_loss, train_acc = train(epoch)
val_loss, val_acc = test(epoch, val_loader, name='Val')
test_loss, test_acc = test(epoch, test_loader, name='Test')
track_train_loss[epoch - 1] = train_loss
track_val_loss[epoch - 1] = val_loss
track_test_loss[epoch - 1] = test_loss
track_train_acc[epoch - 1] = train_acc
track_val_acc[epoch - 1] = val_acc
track_test_acc[epoch - 1] = test_acc
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
save_checkpoint({
'state_dict': model.state_dict(),
'cmd_line_args': args,
'vocab': train_dataset.vocab,
}, is_best, folder=args.out_dir)
np.save(os.path.join(args.out_dir, 'train_loss.npy'), track_train_loss)
np.save(os.path.join(args.out_dir, 'val_loss.npy'), track_val_loss)
np.save(os.path.join(args.out_dir, 'test_loss.npy'), track_test_loss)
np.save(os.path.join(args.out_dir, 'train_acc.npy'), track_train_acc)
np.save(os.path.join(args.out_dir, 'val_acc.npy'), track_val_acc)
np.save(os.path.join(args.out_dir, 'test_acc.npy'), track_test_acc)
|
[
"torch.manual_seed",
"argparse.ArgumentParser",
"os.makedirs",
"torch.nn.functional.binary_cross_entropy",
"os.path.join",
"numpy.zeros",
"torch.cuda.is_available",
"os.path.isdir",
"numpy.random.seed",
"torch.round",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.device"
] |
[((677, 702), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (700, 702), False, 'import argparse\n'), ((1216, 1260), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (1228, 1260), False, 'import torch\n'), ((1407, 1435), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1424, 1435), False, 'import torch\n'), ((1440, 1465), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1454, 1465), True, 'import numpy as np\n'), ((2104, 2176), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=True)\n', (2119, 2176), True, 'import torch.utils.data as data\n'), ((2194, 2265), 'torch.utils.data.DataLoader', 'data.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(val_dataset, batch_size=args.batch_size, shuffle=False)\n', (2209, 2265), True, 'import torch.utils.data as data\n'), ((2284, 2356), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.batch_size, shuffle=False)\n', (2299, 2356), True, 'import torch.utils.data as data\n'), ((5012, 5033), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5020, 5033), True, 'import numpy as np\n'), ((5055, 5076), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5063, 5076), True, 'import numpy as np\n'), ((5099, 5120), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5107, 5120), True, 'import numpy as np\n'), ((5143, 5164), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5151, 5164), True, 'import numpy as np\n'), ((5185, 5206), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5193, 5206), True, 'import numpy as np\n'), ((5228, 5249), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5236, 5249), True, 'import numpy as np\n'), ((1125, 1150), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1148, 1150), False, 'import torch\n'), ((1478, 1505), 'os.path.isdir', 'os.path.isdir', (['args.out_dir'], {}), '(args.out_dir)\n', (1491, 1505), False, 'import os\n'), ((1515, 1540), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (1526, 1540), False, 'import os\n'), ((3104, 3144), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['label_out', 'label'], {}), '(label_out, label)\n', (3126, 3144), True, 'import torch.nn.functional as F\n'), ((4064, 4079), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4077, 4079), False, 'import torch\n'), ((6059, 6103), 'os.path.join', 'os.path.join', (['args.out_dir', '"""train_loss.npy"""'], {}), "(args.out_dir, 'train_loss.npy')\n", (6071, 6103), False, 'import os\n'), ((6139, 6181), 'os.path.join', 'os.path.join', (['args.out_dir', '"""val_loss.npy"""'], {}), "(args.out_dir, 'val_loss.npy')\n", (6151, 6181), False, 'import os\n'), ((6215, 6258), 'os.path.join', 'os.path.join', (['args.out_dir', '"""test_loss.npy"""'], {}), "(args.out_dir, 'test_loss.npy')\n", (6227, 6258), False, 'import os\n'), ((6293, 6336), 'os.path.join', 'os.path.join', (['args.out_dir', '"""train_acc.npy"""'], {}), "(args.out_dir, 'train_acc.npy')\n", (6305, 6336), False, 'import os\n'), ((6371, 6412), 'os.path.join', 'os.path.join', (['args.out_dir', '"""val_acc.npy"""'], {}), "(args.out_dir, 'val_acc.npy')\n", (6383, 6412), False, 'import os\n'), ((6445, 6487), 'os.path.join', 'os.path.join', (['args.out_dir', '"""test_acc.npy"""'], {}), "(args.out_dir, 'test_acc.npy')\n", (6457, 6487), False, 'import os\n'), ((4485, 4525), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['label_out', 'label'], {}), '(label_out, label)\n', (4507, 4525), True, 'import torch.nn.functional as F\n'), ((3285, 3307), 'torch.round', 'torch.round', (['label_out'], {}), '(label_out)\n', (3296, 3307), False, 'import torch\n')]
|
"""
Sparse Poisson Recovery (SPoRe) module for solving Multiple Measurement Vector
problem with Poisson signals (MMVP) by batch stochastic gradient ascent and
Monte Carlo integration
Authors: <NAME>, <NAME>
Reference:
[1] <NAME>, <NAME>, <NAME>, and <NAME>, "Extreme Compressed
Sensing of Poisson Rates from Multiple Measurements," Mar. 2021.
arXiv ID:
"""
from abc import ABC, abstractmethod
import numpy as np
import time
import pdb
from .mmv_models import FwdModelGroup, SPoReFwdModelGroup
class SPoRe(object):
def __init__(self, N, fwdmodel, sampler, batch_size=100, step_size=1e-1,
min_lambda=1e-3, pyx_min=0, grad_scale=5e-2, conv_rel=1e-2, conv_window=500,
patience = 3000, step_cut = 0.1, max_cut = 5, max_iter=int(1e4)):
"""
Parameters
----------
N: int
Dimension of signals
fwdmodel : object
instance of a mmv_models.FwdModel class. Object should contain any necessary
model-specific parameters as attributes
sampler : object
instance of a spore.Sampler class that has a .sample method returning S samples
of signals X from a probability distribution (N, S, :)
batch_size: int
Number of columns of Y to randomly draw and evaluate for each iteration
step_size: float
initial learning rate for stochastic gradient ascent
min_lambda: float
Lower bound on individual entries of lambda. \epsilon in [1]
pyx_min: float (default 0, i.e. no effect)
A batch element y_b is only included in analysis if max(p(y_b|x_s))
among sampled x's (x_s) is greater than this value. Prevents steps
in the direction of junk measurements (e.g. a corrupted siganl) OR
if samples are not good for the y_b
[1] used 0 for all experiments
grad_scale: float
Maximum l2-norm of gradient step that can be taken. Any step larger
is rescaled to have this l2-norm
conv_rel: float (0,1)
Fractional change in the average of lambda estimate in two conv_windows,
below which iteration stops
conv_window: int
Number of iterations over which to evaluate moving averages. Nonoverlapping windows
are compared. E.g. if conv_window = 500, then 999-500 iterations ago is averaged
and compared to 499-current average.
patience: int
Number of iterations to wait for improvement in log likelihood before
cutting step size
step_cut: float (0, 1)
Fraction to cut step size by if patience exceeded
max_cut: int
Maximum number of times step size can be cut by step_cut before
quitting
max_iter: int
Maximum iteration budget. SPoRe terminates regardless of convergence status
"""
self.N = N
if isinstance(fwdmodel, FwdModelGroup):
self.fwdmodel_group = fwdmodel
else:
self.fwdmodel_group = FwdModelGroup([fwdmodel])
self.sampler = sampler
self.batch_size = batch_size
self.step_size = step_size
self.min_lambda = min_lambda
self.pyx_min = pyx_min
self.grad_scale = grad_scale
self.conv_rel = conv_rel
self.conv_window = conv_window
self.patience = patience
self.step_cut = step_cut
self.max_cut = max_cut
self.max_iter = max_iter
def recover(self, Y, S, lam0=None, randinit_offset=1e-1, seed=None, verbose=True):
"""Recover poisson rate parameters given
Parameters
----------
Y : array_like
Observations.
Shape ``(M, D)``.
S : int
Number of samples to draw for each Y.
lam0: array_like
Initial value for estimated lambda. If None, lam0 = randinit_offset
Shape: ``(N,)
randinit_offset: float
Random initializations (if lam0 not provided) are drawn.
Offset sets a minimum value for any particular entry of lambda0
seed: int or None
Initial seed for before iterations begin
verbose: boolean
If True, prints some information every <self.conv_window> iterations
Returns
-------
lam_S : numpy array
Recovered estimate of lambda
Shape ``(N,)``
includeCheck: numpy array
Indices of observations that never influenced a gradient step. These
observations can be considered 'unexplained' by the recovered lambda.
Can be indicative of a corrupted measurement.
Not used in [1]
lamHistory: numpy array
History of lambda estimates at each iteration
Shape ``(N, iters)`` (for iters evaluated until convergence)
llHistory: numpy array
History of median log-likelihood estimates at each iteration
Shape ``(iters,)``
"""
if isinstance(self.fwdmodel_group, SPoReFwdModelGroup):
fwdmodel = self.fwdmodel_group
else:
_, D = Y.shape
group_indices = None
fwdmodel = SPoReFwdModelGroup(self.fwdmodel_group, group_indices)
M, D = np.shape(Y)
np.random.seed(seed)
lamHistory = np.zeros((self.N, self.max_iter))
llHistory = np.zeros((self.max_iter))
if lam0 is None:
lam0 = np.ones(self.N)*randinit_offset
lamHat = lam0
# Remaining false elements at convergence => unexplained measurements. Not used in [1]
includeCheck = np.zeros(D) > np.ones(D)
refIter = 0
bestIter = 0
stepTemp = self.step_size
numCut = 0
t0 = time.time()
stepIter = []
# Batch gradient ascent
for i in range(self.max_iter):
# Get batch elements and sample for each
batchInds = np.random.choice(D, self.batch_size)
Y_batch = Y[:,batchInds]
self.sampler._lam = lamHat
X_sample = self.sampler.sample(Y_batch, S)
pyx = fwdmodel.py_x_batch(Y_batch[:, None, :], X_sample, batchInds) # (S, B) array
# Don't eval batch elements whose p(y|x) is too low for all samples. In [1] (self.pyx_min=0)
batchInclude = np.max(pyx, axis=0) > self.pyx_min
includeCheck[batchInds[batchInclude]] = True
pyx = pyx[:, batchInclude]
if np.shape(X_sample)[2] > 1:
X_sample = X_sample[:,:,batchInclude]
pqRatio = self.sampler.pq_ratio(X_sample)
probsAgg = pyx * pqRatio # (S, B) array, aggregate value of pdf computations
# Evaluate loss and gradient
llHistory[i] = self.log_likelihood(probsAgg)
grad = self.gradient(X_sample, lamHat, probsAgg)
step = stepTemp * grad
# Necessary to make more robust against numerical issue described in [1]
if not np.all(grad==np.zeros(self.N)): # at least some sampled X informs a gradient step
stepIter.append(i) # track when steps are taken
if np.any( (lamHat+step) >self.min_lambda): #if at least one index is stepped meaningfully
# Rescale according to the indices still in question
normCheck = np.linalg.norm(step[ (lamHat+step) >self.min_lambda])
if normCheck > self.grad_scale :
step = (self.grad_scale / normCheck) * step
else: # step is likely too big, period.
if np.linalg.norm(step) > self.grad_scale : # Rescale based on whole step vector
step = (self.grad_scale / np.linalg.norm(step)) * step
#if steps have been taken at least 1/2 the time, recent conv_window worth of iterations likely to have been taken
# hypothesize that steps may not be taken occasionally at first as lamHat is a bad estimate, but will be taken with increasing regularity
enoughSteps = np.sum(np.array(stepIter) > (i - self.conv_window*2)) > self.conv_window
lamHat += step
lamHat[lamHat < self.min_lambda] = self.min_lambda
lamHistory[:, i] = lamHat
# Check convergence
if (i+1) >= (self.conv_window*2):
lam1 = np.mean(lamHistory[:, (i-2*self.conv_window+1):(i-self.conv_window+1)], axis=1) # e.g [:, 0:500] if conv_window is 500
lam2 = np.mean(lamHistory[:, (i-self.conv_window+1):(i+1)], axis=1) # e.g. [:, 500:] if i is 999, conv_window is 500
pctChange = np.linalg.norm(lam2 - lam1, ord=1) / np.linalg.norm(lam1, ord=1)
if pctChange < self.conv_rel and enoughSteps:
break
# Cut learning rate (if necessary)
if llHistory[i] >= llHistory[bestIter] or np.isnan(llHistory[bestIter]):
bestIter = i
refIter = i
if i - refIter >= self.patience and enoughSteps:
stepTemp = self.step_cut * stepTemp
refIter = i
numCut += 1
if verbose is True:
print('Step size cut ' + str(numCut) + ' times')
if numCut >= self.max_cut:
break
# Report:
if verbose is True and (i+1)>=(self.conv_window*2) and (i+1) % self.conv_window == 0:
print('Iteration #: ' + str(i+1) + '; l1-norm change: ' + str(pctChange) + \
'; recovery time: ' + str(round(time.time()-t0, 2)) + ' seconds')
# average over last conv_window iterations' values
lamHat = np.mean(lamHistory[:, (i-self.conv_window+1):(i+1)], axis=1)
return lamHat, includeCheck, lamHistory, llHistory
def log_likelihood(self, p_agg):
r"""Compute log-likelihood and return the ~average (median/B).
Median used because of high variability of individual batch draws.
Outlier resistance important if using log-likelihood to inform convergence
Parameters
----------
p_agg: array_like
element-wise product of p(y|x) (an (S,B,) array) and
pqRatio (an (S,B) array or an (S,) array if sample_same=True)
Explicitly: p_agg for any element is p(y_b|x_s) * p(x_s|\lamHat) / Q(x_s)
where Q is the sampling function
Shape: (S, B,)
Returns
-------
ll: average log likelihood of p(y_b|\lambda)
"""
S, B = np.shape(p_agg)
likelihood = (1/S) * np.sum(p_agg, axis=0) # of all batch elements
ll = np.median(np.log(likelihood)) / B
return ll
def gradient(self, X_s, lamHat, p_agg):
"""
Compute MC gradients based on pre-computed measurement/sampling likelihoods
p(y|x), Q(x_s) (p_agg) and Poisson likelihoods (samples X_s, current estimate lamHat)
Parameters
----------
X_s : array_like
Sampled X's
Shape (N, S, B) or (N, S, 1)
lamHat : array_like
current estimate of lambda. Shape (N,)
p_agg : see log_likelihood()
Returns
-------
grad: array_like
batch gradient
Shape: (N,)
"""
_, _, sameSamples = np.shape(X_s) #same samples over each iteration
S, B = np.shape(p_agg)
grad = np.zeros((self.N,))
#Note - it's ok if grad = 0 if all sumChecks fail - equates to waiting
#until next iter
sums = np.sum(p_agg, axis=0)
sumCheck = sums !=0
if np.size(sumCheck) != 0: #else just return zero vector
if sameSamples == 1:
xOverL = X_s[:,:,0] / lamHat[:, None] #(N, S)
grad = np.sum((xOverL @ p_agg[:, sumCheck]) / sums[sumCheck] - 1 , axis=1)
else:
xOverL = X_s / lamHat[:, None, None] #(N, S, B)
numer = np.einsum('ij...,j...->i...', xOverL[:,:,sumCheck], p_agg[:,sumCheck])
grad = np.sum((numer / sums) - 1, axis=1)
grad = grad/B
return grad
class Sampler(ABC):
@abstractmethod
def sample(self, Y, S, seed=None):
"""Generate samples of X for each column of Y
Parameters
----------
Y : array_like
Observations to sample according to. This array must have
shape ``(M, B)``.
S : int
Number of samples to draw for each Y.
seed: Random seed for drawing
Returns
-------
X : (N, S, B) or (N, S, 1) ndarray
S Samples of X for each of B columns of Y. Last dimension is 1 if
same samples apply to all batch elements
"""
pass
@abstractmethod
def pq_ratio(self, X):
"""
Get the ratio of probability densities of input X
P(X|self._lam)/Q(X) element-wise
Where P(X|self._lam) is the Poisson probability of each entry in X
Q(X) is the sampler's probability of drawing that X
Parameters
----------
X : array_like
N-dimensional Vectors within range of Sampler.sample(), stacked in columns of array
Shape: ``(N, S, B)`` or ``(N, S, 1)``
Returns
-------
ratio : array_like
Probability densities Q(x) for all X
Shape: ``(S, B)``
"""
pass
class PoissonSampler(Sampler):
def __init__(self, lam, sample_same=True, seed=None):
"""
As used in [1]: Q(x) = P(x|lamHat)
Parameters
----------
lam : array_like (float)
Poisson rates from which to draw
Shape: ``(N,)``
sample_same : bool
Whether to use the same X samples for each column of Y.
"""
self._lam = lam
self._sample_same = sample_same
self._generator = np.random.default_rng(seed)
def sample(self, Y, S):
N, = self._lam.shape
_, B = Y.shape
if self._sample_same:
X = self._generator.poisson(self._lam[:, None, None], (N, S, 1))
else:
X = self._generator.poisson(self._lam[:, None, None], (N, S, B))
return X
def pq_ratio(self, X):
_, S, B = np.shape(X)
#With Poisson sampler - always sampling according to the current lambda value in the sampler
ratio = np.ones((S,B))
return ratio
|
[
"numpy.mean",
"numpy.random.default_rng",
"numpy.ones",
"numpy.random.choice",
"numpy.size",
"numpy.log",
"numpy.any",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.isnan",
"numpy.random.seed",
"numpy.einsum",
"numpy.linalg.norm",
"numpy.shape",
"time.time"
] |
[((5468, 5479), 'numpy.shape', 'np.shape', (['Y'], {}), '(Y)\n', (5476, 5479), True, 'import numpy as np\n'), ((5488, 5508), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5502, 5508), True, 'import numpy as np\n'), ((5530, 5563), 'numpy.zeros', 'np.zeros', (['(self.N, self.max_iter)'], {}), '((self.N, self.max_iter))\n', (5538, 5563), True, 'import numpy as np\n'), ((5584, 5607), 'numpy.zeros', 'np.zeros', (['self.max_iter'], {}), '(self.max_iter)\n', (5592, 5607), True, 'import numpy as np\n'), ((5996, 6007), 'time.time', 'time.time', ([], {}), '()\n', (6005, 6007), False, 'import time\n'), ((10405, 10467), 'numpy.mean', 'np.mean', (['lamHistory[:, i - self.conv_window + 1:i + 1]'], {'axis': '(1)'}), '(lamHistory[:, i - self.conv_window + 1:i + 1], axis=1)\n', (10412, 10467), True, 'import numpy as np\n'), ((11291, 11306), 'numpy.shape', 'np.shape', (['p_agg'], {}), '(p_agg)\n', (11299, 11306), True, 'import numpy as np\n'), ((12112, 12125), 'numpy.shape', 'np.shape', (['X_s'], {}), '(X_s)\n', (12120, 12125), True, 'import numpy as np\n'), ((12175, 12190), 'numpy.shape', 'np.shape', (['p_agg'], {}), '(p_agg)\n', (12183, 12190), True, 'import numpy as np\n'), ((12206, 12225), 'numpy.zeros', 'np.zeros', (['(self.N,)'], {}), '((self.N,))\n', (12214, 12225), True, 'import numpy as np\n'), ((12355, 12376), 'numpy.sum', 'np.sum', (['p_agg'], {'axis': '(0)'}), '(p_agg, axis=0)\n', (12361, 12376), True, 'import numpy as np\n'), ((14891, 14918), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (14912, 14918), True, 'import numpy as np\n'), ((15285, 15296), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (15293, 15296), True, 'import numpy as np\n'), ((15427, 15442), 'numpy.ones', 'np.ones', (['(S, B)'], {}), '((S, B))\n', (15434, 15442), True, 'import numpy as np\n'), ((5844, 5855), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (5852, 5855), True, 'import numpy as np\n'), ((5859, 5869), 'numpy.ones', 'np.ones', (['D'], {}), '(D)\n', (5866, 5869), True, 'import numpy as np\n'), ((6215, 6251), 'numpy.random.choice', 'np.random.choice', (['D', 'self.batch_size'], {}), '(D, self.batch_size)\n', (6231, 6251), True, 'import numpy as np\n'), ((11336, 11357), 'numpy.sum', 'np.sum', (['p_agg'], {'axis': '(0)'}), '(p_agg, axis=0)\n', (11342, 11357), True, 'import numpy as np\n'), ((12433, 12450), 'numpy.size', 'np.size', (['sumCheck'], {}), '(sumCheck)\n', (12440, 12450), True, 'import numpy as np\n'), ((5671, 5686), 'numpy.ones', 'np.ones', (['self.N'], {}), '(self.N)\n', (5678, 5686), True, 'import numpy as np\n'), ((6633, 6652), 'numpy.max', 'np.max', (['pyx'], {'axis': '(0)'}), '(pyx, axis=0)\n', (6639, 6652), True, 'import numpy as np\n'), ((7510, 7549), 'numpy.any', 'np.any', (['(lamHat + step > self.min_lambda)'], {}), '(lamHat + step > self.min_lambda)\n', (7516, 7549), True, 'import numpy as np\n'), ((8899, 8989), 'numpy.mean', 'np.mean', (['lamHistory[:, i - 2 * self.conv_window + 1:i - self.conv_window + 1]'], {'axis': '(1)'}), '(lamHistory[:, i - 2 * self.conv_window + 1:i - self.conv_window + 1\n ], axis=1)\n', (8906, 8989), True, 'import numpy as np\n'), ((9041, 9103), 'numpy.mean', 'np.mean', (['lamHistory[:, i - self.conv_window + 1:i + 1]'], {'axis': '(1)'}), '(lamHistory[:, i - self.conv_window + 1:i + 1], axis=1)\n', (9048, 9103), True, 'import numpy as np\n'), ((9536, 9565), 'numpy.isnan', 'np.isnan', (['llHistory[bestIter]'], {}), '(llHistory[bestIter])\n', (9544, 9565), True, 'import numpy as np\n'), ((11413, 11431), 'numpy.log', 'np.log', (['likelihood'], {}), '(likelihood)\n', (11419, 11431), True, 'import numpy as np\n'), ((12632, 12696), 'numpy.sum', 'np.sum', (['(xOverL @ p_agg[:, sumCheck] / sums[sumCheck] - 1)'], {'axis': '(1)'}), '(xOverL @ p_agg[:, sumCheck] / sums[sumCheck] - 1, axis=1)\n', (12638, 12696), True, 'import numpy as np\n'), ((12823, 12896), 'numpy.einsum', 'np.einsum', (['"""ij...,j...->i..."""', 'xOverL[:, :, sumCheck]', 'p_agg[:, sumCheck]'], {}), "('ij...,j...->i...', xOverL[:, :, sumCheck], p_agg[:, sumCheck])\n", (12832, 12896), True, 'import numpy as np\n'), ((12917, 12949), 'numpy.sum', 'np.sum', (['(numer / sums - 1)'], {'axis': '(1)'}), '(numer / sums - 1, axis=1)\n', (12923, 12949), True, 'import numpy as np\n'), ((6797, 6815), 'numpy.shape', 'np.shape', (['X_sample'], {}), '(X_sample)\n', (6805, 6815), True, 'import numpy as np\n'), ((7766, 7819), 'numpy.linalg.norm', 'np.linalg.norm', (['step[lamHat + step > self.min_lambda]'], {}), '(step[lamHat + step > self.min_lambda])\n', (7780, 7819), True, 'import numpy as np\n'), ((9179, 9213), 'numpy.linalg.norm', 'np.linalg.norm', (['(lam2 - lam1)'], {'ord': '(1)'}), '(lam2 - lam1, ord=1)\n', (9193, 9213), True, 'import numpy as np\n'), ((9216, 9243), 'numpy.linalg.norm', 'np.linalg.norm', (['lam1'], {'ord': '(1)'}), '(lam1, ord=1)\n', (9230, 9243), True, 'import numpy as np\n'), ((7353, 7369), 'numpy.zeros', 'np.zeros', (['self.N'], {}), '(self.N)\n', (7361, 7369), True, 'import numpy as np\n'), ((8022, 8042), 'numpy.linalg.norm', 'np.linalg.norm', (['step'], {}), '(step)\n', (8036, 8042), True, 'import numpy as np\n'), ((8564, 8582), 'numpy.array', 'np.array', (['stepIter'], {}), '(stepIter)\n', (8572, 8582), True, 'import numpy as np\n'), ((8150, 8170), 'numpy.linalg.norm', 'np.linalg.norm', (['step'], {}), '(step)\n', (8164, 8170), True, 'import numpy as np\n'), ((10249, 10260), 'time.time', 'time.time', ([], {}), '()\n', (10258, 10260), False, 'import time\n')]
|
#! /usr/bin/env python
# -*- coding:utf8 -*-
#
# pw_classes.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact one of the authors cited below.
#
# Copyright (c) 2020
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
from mediapack import from_yaml
from mediapack import Air, PEM, EqFluidJCA
from pyPLANES.utils.io import initialisation_out_files_plain
from pyPLANES.core.calculus import PwCalculus
from pyPLANES.core.multilayer import MultiLayer
from pyPLANES.pw.pw_layers import FluidLayer
from pyPLANES.pw.pw_interfaces import FluidFluidInterface, RigidBacking
Air = Air()
# def initialise_PW_solver(L, b):
# nb_PW = 0
# dofs = []
# for _layer in L:
# if _layer.medium.MODEL == "fluid":
# dofs.append(nb_PW+np.arange(2))
# nb_PW += 2
# elif _layer.medium.MODEL == "pem":
# dofs.append(nb_PW+np.arange(6))
# nb_PW += 6
# elif _layer.medium.MODEL == "elastic":
# dofs.append(nb_PW+np.arange(4))
# nb_PW += 4
# interface = []
# for i_l, _layer in enumerate(L[:-1]):
# interface.append((L[i_l].medium.MODEL, L[i_l+1].medium.MODEL))
# return nb_PW, interface, dofs
class PwProblem(PwCalculus, MultiLayer):
"""
Plane Wave Problem
"""
def __init__(self, **kwargs):
PwCalculus.__init__(self, **kwargs)
termination = kwargs.get("termination","rigid")
self.method = kwargs.get("termination","global")
MultiLayer.__init__(self, **kwargs)
self.kx, self.ky, self.k = None, None, None
self.shift_plot = kwargs.get("shift_pw", 0.)
self.plot = kwargs.get("plot_results", [False]*6)
self.result = {}
self.outfiles_directory = False
if self.method == "global":
self.layers.insert(0,FluidLayer(Air,1.e-2))
if self.layers[1].medium.MEDIUM_TYPE == "fluid":
self.interfaces.append(FluidFluidInterface(self.layers[0],self.layers[1]))
self.nb_PW = 0
for _layer in self.layers:
if _layer.medium.MODEL == "fluid":
_layer.dofs = self.nb_PW+np.arange(2)
self.nb_PW += 2
elif _layer.medium.MODEL == "pem":
_layer.dofs = self.nb_PW+np.arange(6)
self.nb_PW += 6
elif _layer.medium.MODEL == "elastic":
_layer.dofs = self.nb_PW+np.arange(4)
self.nb_PW += 4
def update_frequency(self, f):
PwCalculus.update_frequency(self, f)
MultiLayer.update_frequency(self, f, self.k, self.kx)
def create_linear_system(self, f):
self.A = np.zeros((self.nb_PW-1, self.nb_PW), dtype=complex)
i_eq = 0
# Loop on the interfaces
for _int in self.interfaces:
if self.method == "global":
i_eq = _int.update_M_global(self.A, i_eq)
# for i_inter, _inter in enumerate(self.interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = self.interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_fluid_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_fluid_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = self.interface_pem_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_pem_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_pem_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "elastic":
# if _inter[1] == "fluid":
# i_eq = self.interface_elastic_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_elastic_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_elastic_elastic(i_eq, i_inter, Layers, dofs, M)
# if self.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = self.interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1] )
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = self.interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif Layers[-1].medium.MODEL == "elastic":
# i_eq = self.interface_elastic_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif self.backing == "transmission":
# i_eq = self.semi_infinite_medium(M, i_eq, Layers[-1], dofs[-1] )
self.F = -self.A[:, 0]*np.exp(1j*self.ky*self.layers[0].d) # - is for transposition, exponential term is for the phase shift
self.A = np.delete(self.A, 0, axis=1)
# print(self.A)
X = LA.solve(self.A, self.F)
# print(X)
# R_pyPLANES_PW = X[0]
# if self.backing == "transmission":
# T_pyPLANES_PW = X[-2]
# else:
# T_pyPLANES_PW = 0.
# X = np.delete(X, 0)
# del(dofs[0])
# for i, _ld in enumerate(dofs):
# dofs[i] -= 2
# if self.plot:
# self.plot_sol_PW(X, dofs)
# out["R"] = R_pyPLANES_PW
# out["T"] = T_pyPLANES_PW
# return out
# class Solver_PW(PwCalculus):
# def __init__(self, **kwargs):
# PwCalculus.__init__(self, **kwargs)
# ml = kwargs.get("ml")
# termination = kwargs.get("termination")
# self.layers = []
# for _l in ml:
# if _l[0] == "Air":
# mat = Air
# else:
# mat = from_yaml(_l[0]+".yaml")
# d = _l[1]
# self.layers.append(Layer(mat,d))
# if termination in ["trans", "transmission","Transmission"]:
# self.backing = "Transmission"
# else:
# self.backing = backing.rigid
# self.kx, self.ky, self.k = None, None, None
# self.shift_plot = kwargs.get("shift_pw", 0.)
# self.plot = kwargs.get("plot_results", [False]*6)
# self.result = {}
# self.outfiles_directory = False
# initialisation_out_files_plain(self)
# def write_out_files(self, out):
# self.out_file.write("{:.12e}\t".format(self.current_frequency))
# abs = 1-np.abs(out["R"])**2
# self.out_file.write("{:.12e}\t".format(abs))
# self.out_file.write("\n")
# def interface_fluid_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_rigid(self, M, ieq, L, d):
# SV, k_y = fluid_SV(self.kx, self.k, L.medium.K)
# M[ieq, d[0]] = SV[0, 0]*np.exp(-1j*k_y*L.thickness)
# M[ieq, d[1]] = SV[0, 1]
# ieq += 1
# return ieq
# def semi_infinite_medium(self, M, ieq, L, d):
# M[ieq, d[1]] = 1.
# ieq += 1
# return ieq
# def interface_pem_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium, self.kx)
# for _i in range(6):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+0][4]] = SV_1[_i, 4]
# M[ieq, d[iinter+0][5]] = SV_1[_i, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[_i, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[_i, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[4, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[4, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[4, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[4, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[4, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[4, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = SV_2[3, 2]
# M[ieq, d[iinter+1][3]] = SV_2[3, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[3, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[3, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[0, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = SV_2[1, 2]
# M[ieq, d[iinter+1][3]] = SV_2[1, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[1, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[1, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = (SV_2[3, 0]-SV_2[4, 0])
# M[ieq, d[iinter+1][1]] = (SV_2[3, 1]-SV_2[4, 1])
# M[ieq, d[iinter+1][2]] = (SV_2[3, 2]-SV_2[4, 2])
# M[ieq, d[iinter+1][3]] = (SV_2[3, 3]-SV_2[4, 3])*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = (SV_2[3, 4]-SV_2[4, 4])*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = (SV_2[3, 5]-SV_2[4, 5])*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[3, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[3, 3]
# M[ieq, d[iinter+1][0]] = SV_2[5, 0]
# M[ieq, d[iinter+1][1]] = SV_2[5, 1]
# M[ieq, d[iinter+1][2]] = SV_2[5, 2]
# M[ieq, d[iinter+1][3]] = SV_2[5, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[5, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[5, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_pem_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium,self.kx)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# M[ieq, d[iinter+0][4]] = SV_1[0, 4]
# M[ieq, d[iinter+0][5]] = SV_1[0, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[1, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[1, 3]
# M[ieq, d[iinter+0][4]] = SV_1[1, 4]
# M[ieq, d[iinter+0][5]] = SV_1[1, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[2, 3]
# M[ieq, d[iinter+0][4]] = SV_1[2, 4]
# M[ieq, d[iinter+0][5]] = SV_1[2, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = (SV_1[3, 0]-SV_1[4, 0])*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = (SV_1[3, 1]-SV_1[4, 1])*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = (SV_1[3, 2]-SV_1[4, 2])*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = (SV_1[3, 3]-SV_1[4, 3])
# M[ieq, d[iinter+0][4]] = (SV_1[3, 4]-SV_1[4, 4])
# M[ieq, d[iinter+0][5]] = (SV_1[3, 5]-SV_1[4, 5])
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[5, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[5, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[5, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[5, 3]
# M[ieq, d[iinter+0][4]] = SV_1[5, 4]
# M[ieq, d[iinter+0][5]] = SV_1[5, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[3, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[3, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega)
# for _i in range(4):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium, self.kx, self.omega)
# # Continuity of u_y
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# # sigma_yy = -p
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# # sigma_xy = 0
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_pem_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+0][4]] = -SV_1[2, 4]
# M[ieq, d[iinter+0][5]] = -SV_1[2, 5]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[4, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[4, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[4, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = -SV_1[4, 3]
# M[ieq, d[iinter+0][4]] = -SV_1[4, 4]
# M[ieq, d[iinter+0][5]] = -SV_1[4, 5]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# M[ieq, d[iinter+0][4]] = SV_1[0, 4]
# M[ieq, d[iinter+0][5]] = SV_1[0, 5]
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[3, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[3, 3]
# M[ieq, d[iinter+0][4]] = SV_1[3, 4]
# M[ieq, d[iinter+0][5]] = SV_1[3, 5]
# ieq += 1
# return ieq
# def interface_elastic_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium, self.kx, self.omega)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# # Continuity of u_y
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# # sigma_yy = -p
# M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# # sigma_xy = 0
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# ieq += 1
# return ieq
# def interface_elastic_rigid(self, M, ieq, L, d):
# SV, k_y = elastic_SV(L.medium,self.kx, self.omega)
# M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[1, 2]
# M[ieq, d[3]] = SV[1, 3]
# ieq += 1
# M[ieq, d[0]] = SV[3, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[3, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[3, 2]
# M[ieq, d[3]] = SV[3, 3]
# ieq += 1
# return ieq
# def interface_pem_rigid(self, M, ieq, L, d):
# SV, k_y = PEM_SV(L.medium, self.kx)
# M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[1, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[1, 3]
# M[ieq, d[4]] = SV[1, 4]
# M[ieq, d[5]] = SV[1, 5]
# ieq += 1
# M[ieq, d[0]] = SV[2, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[2, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[2, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[2, 3]
# M[ieq, d[4]] = SV[2, 4]
# M[ieq, d[5]] = SV[2, 5]
# ieq += 1
# M[ieq, d[0]] = SV[5, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[5, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[5, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[5, 3]
# M[ieq, d[4]] = SV[5, 4]
# M[ieq, d[5]] = SV[5, 5]
# ieq += 1
# return ieq
# def plot_sol_PW(self, X, dofs):
# x_start = self.shift_plot
# for _l, _layer in enumerate(self.layers):
# x_f = np.linspace(0, _layer.thickness,200)
# x_b = x_f-_layer.thickness
# if _layer.medium.MODEL == "fluid":
# SV, k_y = fluid_SV(self.kx, self.k, _layer.medium.K)
# pr = SV[1, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]]
# pr += SV[1, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]]
# ut = SV[0, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]]
# ut += SV[0, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]]
# if self.plot[2]:
# plt.figure(2)
# plt.plot(x_start+x_f, np.abs(pr), 'r')
# plt.plot(x_start+x_f, np.imag(pr), 'm')
# plt.title("Pressure")
# # plt.figure(5)
# # plt.plot(x_start+x_f,np.abs(ut),'b')
# # plt.plot(x_start+x_f,np.imag(ut),'k')
# if _layer.medium.MODEL == "pem":
# SV, k_y = PEM_SV(_layer.medium, self.kx)
# ux, uy, pr, ut = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f
# for i_dim in range(3):
# ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ux += SV[1, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# uy += SV[5, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# uy += SV[5, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# pr += SV[4, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# pr += SV[4, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# ut += SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ut += SV[2, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# if self.plot[0]:
# plt.figure(0)
# plt.plot(x_start+x_f, np.abs(uy), 'r')
# plt.plot(x_start+x_f, np.imag(uy), 'm')
# plt.title("Solid displacement along x")
# if self.plot[1]:
# plt.figure(1)
# plt.plot(x_start+x_f, np.abs(ux), 'r')
# plt.plot(x_start+x_f, np.imag(ux), 'm')
# plt.title("Solid displacement along y")
# if self.plot[2]:
# plt.figure(2)
# plt.plot(x_start+x_f, np.abs(pr), 'r')
# plt.plot(x_start+x_f, np.imag(pr), 'm')
# plt.title("Pressure")
# if _layer.medium.MODEL == "elastic":
# SV, k_y = elastic_SV(_layer.medium, self.kx, self.omega)
# ux, uy, pr, sig = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f
# for i_dim in range(2):
# ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ux += SV[1, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# uy += SV[3, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# uy += SV[3, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# pr -= SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# pr -= SV[2, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# sig -= SV[0, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# sig -= SV[0, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# if self.plot[0]:
# plt.figure(0)
# plt.plot(x_start+x_f, np.abs(uy), 'r')
# plt.plot(x_start+x_f, np.imag(uy), 'm')
# plt.title("Solid displacement along x")
# if self.plot[1]:
# plt.figure(1)
# plt.plot(x_start+x_f, np.abs(ux), 'r')
# plt.plot(x_start+x_f, np.imag(ux), 'm')
# plt.title("Solid displacement along y")
# # if self.plot[2]:
# # plt.figure(2)
# # plt.plot(x_start+x_f, np.abs(pr), 'r')
# # plt.plot(x_start+x_f, np.imag(pr), 'm')
# # plt.title("Sigma_yy")
# # if self.plot[2]:
# # plt.figure(3)
# # plt.plot(x_start+x_f, np.abs(sig), 'r')
# # plt.plot(x_start+x_f, np.imag(sig), 'm')
# # plt.title("Sigma_xy")
# x_start += _layer.thickness
# def PEM_SV(mat,ky):
# ''' S={0:\hat{\sigma}_{xy}, 1:u_y^s, 2:u_y^t, 3:\hat{\sigma}_{yy}, 4:p, 5:u_x^s}'''
# kx_1 = np.sqrt(mat.delta_1**2-ky**2)
# kx_2 = np.sqrt(mat.delta_2**2-ky**2)
# kx_3 = np.sqrt(mat.delta_3**2-ky**2)
# kx = np.array([kx_1, kx_2, kx_3])
# delta = np.array([mat.delta_1, mat.delta_2, mat.delta_3])
# alpha_1 = -1j*mat.A_hat*mat.delta_1**2-1j*2*mat.N*kx[0]**2
# alpha_2 = -1j*mat.A_hat*mat.delta_2**2-1j*2*mat.N*kx[1]**2
# alpha_3 = -2*1j*mat.N*kx[2]*ky
# SV = np.zeros((6,6), dtype=complex)
# SV[0:6, 0] = np.array([-2*1j*mat.N*kx[0]*ky, kx[0], mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky])
# SV[0:6, 3] = np.array([ 2*1j*mat.N*kx[0]*ky,-kx[0],-mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky])
# SV[0:6, 1] = np.array([-2*1j*mat.N*kx[1]*ky, kx[1], mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky])
# SV[0:6, 4] = np.array([ 2*1j*mat.N*kx[1]*ky,-kx[1],-mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky])
# SV[0:6, 2] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, alpha_3, 0., -kx[2]])
# SV[0:6, 5] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, -alpha_3, 0., kx[2]])
# return SV, kx
# def elastic_SV(mat,ky, omega):
# ''' S={0:\sigma_{xy}, 1: u_y, 2 \sigma_{yy}, 3 u_x}'''
# P_mat = mat.lambda_ + 2.*mat.mu
# delta_p = omega*np.sqrt(mat.rho/P_mat)
# delta_s = omega*np.sqrt(mat.rho/mat.mu)
# kx_p = np.sqrt(delta_p**2-ky**2)
# kx_s = np.sqrt(delta_s**2-ky**2)
# kx = np.array([kx_p, kx_s])
# alpha_p = -1j*mat.lambda_*delta_p**2 - 2j*mat.mu*kx[0]**2
# alpha_s = 2j*mat.mu*kx[1]*ky
# SV = np.zeros((4, 4), dtype=np.complex)
# SV[0:4, 0] = np.array([-2.*1j*mat.mu*kx[0]*ky, kx[0], alpha_p, ky])
# SV[0:4, 2] = np.array([ 2.*1j*mat.mu*kx[0]*ky, -kx[0], alpha_p, ky])
# SV[0:4, 1] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky,-alpha_s, -kx[1]])
# SV[0:4, 3] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky, alpha_s, kx[1]])
# return SV, kx
# def fluid_SV(kx, k, K):
# ''' S={0:u_y , 1:p}'''
# ky = np.sqrt(k**2-kx**2)
# SV = np.zeros((2, 2), dtype=complex)
# SV[0, 0:2] = np.array([ky/(1j*K*k**2), -ky/(1j*K*k**2)])
# SV[1, 0:2] = np.array([1, 1])
# return SV, ky
# def resolution_PW_imposed_displacement(S, p):
# # print("k={}".format(p.k))
# Layers = S.layers.copy()
# n, interfaces, dofs = initialise_PW_solver(Layers, S.backing)
# M = np.zeros((n, n), dtype=complex)
# i_eq = 0
# # Loop on the layers
# for i_inter, _inter in enumerate(interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M, p)
# if _inter[1] == "pem":
# i_eq = interface_fluid_pem(i_eq, i_inter, Layers, dofs, M, p)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = interface_pem_fluid(i_eq, i_inter, Layers, dofs, M, p)
# if _inter[1] == "pem":
# i_eq = interface_pem_pem(i_eq, i_inter, Layers, dofs, M, p)
# if S.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1], p)
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1], p)
# if Layers[0].medium.MODEL == "fluid":
# F = np.zeros(n, dtype=complex)
# SV, k_y = fluid_SV(p.kx, p.k, Layers[0].medium.K)
# M[i_eq, dofs[0][0]] = SV[0, 0]
# M[i_eq, dofs[0][1]] = SV[0, 1]*np.exp(-1j*k_y*Layers[0].thickness)
# F[i_eq] = 1.
# elif Layers[0].medium.MODEL == "pem":
# SV, k_y = PEM_SV(Layers[0].medium, p.kx)
# M[i_eq, dofs[0][0]] = SV[2, 0]
# M[i_eq, dofs[0][1]] = SV[2, 1]
# M[i_eq, dofs[0][2]] = SV[2, 2]
# M[i_eq, dofs[0][3]] = SV[2, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[2, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[2, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# F = np.zeros(n, dtype=complex)
# F[i_eq] = 1.
# i_eq +=1
# M[i_eq, dofs[0][0]] = SV[0, 0]
# M[i_eq, dofs[0][1]] = SV[0, 1]
# M[i_eq, dofs[0][2]] = SV[0, 2]
# M[i_eq, dofs[0][3]] = SV[0, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[0, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[0, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# i_eq += 1
# M[i_eq, dofs[0][0]] = SV[3, 0]
# M[i_eq, dofs[0][1]] = SV[3, 1]
# M[i_eq, dofs[0][2]] = SV[3, 2]
# M[i_eq, dofs[0][3]] = SV[3, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[3, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[3, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# X = LA.solve(M, F)
# # print("|R pyPLANES_PW| = {}".format(np.abs(X[0])))
# print("R pyPLANES_PW = {}".format(X[0]))
# plot_sol_PW(S, X, dofs, p)
|
[
"pyPLANES.core.multilayer.MultiLayer.__init__",
"numpy.linalg.solve",
"pyPLANES.core.multilayer.MultiLayer.update_frequency",
"numpy.delete",
"pyPLANES.core.calculus.PwCalculus.update_frequency",
"mediapack.Air",
"numpy.exp",
"numpy.zeros",
"pyPLANES.core.calculus.PwCalculus.__init__",
"pyPLANES.pw.pw_layers.FluidLayer",
"pyPLANES.pw.pw_interfaces.FluidFluidInterface",
"numpy.arange"
] |
[((1311, 1316), 'mediapack.Air', 'Air', ([], {}), '()\n', (1314, 1316), False, 'from mediapack import Air, PEM, EqFluidJCA\n'), ((2063, 2098), 'pyPLANES.core.calculus.PwCalculus.__init__', 'PwCalculus.__init__', (['self'], {}), '(self, **kwargs)\n', (2082, 2098), False, 'from pyPLANES.core.calculus import PwCalculus\n'), ((2221, 2256), 'pyPLANES.core.multilayer.MultiLayer.__init__', 'MultiLayer.__init__', (['self'], {}), '(self, **kwargs)\n', (2240, 2256), False, 'from pyPLANES.core.multilayer import MultiLayer\n'), ((3281, 3317), 'pyPLANES.core.calculus.PwCalculus.update_frequency', 'PwCalculus.update_frequency', (['self', 'f'], {}), '(self, f)\n', (3308, 3317), False, 'from pyPLANES.core.calculus import PwCalculus\n'), ((3326, 3379), 'pyPLANES.core.multilayer.MultiLayer.update_frequency', 'MultiLayer.update_frequency', (['self', 'f', 'self.k', 'self.kx'], {}), '(self, f, self.k, self.kx)\n', (3353, 3379), False, 'from pyPLANES.core.multilayer import MultiLayer\n'), ((3438, 3491), 'numpy.zeros', 'np.zeros', (['(self.nb_PW - 1, self.nb_PW)'], {'dtype': 'complex'}), '((self.nb_PW - 1, self.nb_PW), dtype=complex)\n', (3446, 3491), True, 'import numpy as np\n'), ((5782, 5810), 'numpy.delete', 'np.delete', (['self.A', '(0)'], {'axis': '(1)'}), '(self.A, 0, axis=1)\n', (5791, 5810), True, 'import numpy as np\n'), ((5847, 5871), 'numpy.linalg.solve', 'LA.solve', (['self.A', 'self.F'], {}), '(self.A, self.F)\n', (5855, 5871), True, 'import numpy.linalg as LA\n'), ((5663, 5704), 'numpy.exp', 'np.exp', (['(1.0j * self.ky * self.layers[0].d)'], {}), '(1.0j * self.ky * self.layers[0].d)\n', (5669, 5704), True, 'import numpy as np\n'), ((2556, 2577), 'pyPLANES.pw.pw_layers.FluidLayer', 'FluidLayer', (['Air', '(0.01)'], {}), '(Air, 0.01)\n', (2566, 2577), False, 'from pyPLANES.pw.pw_layers import FluidLayer\n'), ((2679, 2730), 'pyPLANES.pw.pw_interfaces.FluidFluidInterface', 'FluidFluidInterface', (['self.layers[0]', 'self.layers[1]'], {}), '(self.layers[0], self.layers[1])\n', (2698, 2730), False, 'from pyPLANES.pw.pw_interfaces import FluidFluidInterface, RigidBacking\n'), ((2893, 2905), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (2902, 2905), True, 'import numpy as np\n'), ((3038, 3050), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (3047, 3050), True, 'import numpy as np\n'), ((3187, 3199), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (3196, 3199), True, 'import numpy as np\n')]
|
from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType
import numpy as np
import unittest
class testPerfForesightConsumerType(unittest.TestCase):
def setUp(self):
self.agent = PerfForesightConsumerType()
self.agent_infinite = PerfForesightConsumerType(cycles=0)
PF_dictionary = {
'CRRA' : 2.5,
'DiscFac' : 0.96,
'Rfree' : 1.03,
'LivPrb' : [0.98],
'PermGroFac' : [1.01],
'T_cycle' : 1,
'cycles' : 0,
'AgentCount' : 10000
}
self.agent_alt = PerfForesightConsumerType(
**PF_dictionary)
def test_default_solution(self):
self.agent.solve()
c = self.agent.solution[0].cFunc
self.assertEqual(c.x_list[0], -0.9805825242718447)
self.assertEqual(c.x_list[1], 0.01941747572815533)
self.assertEqual(c.y_list[0], 0)
self.assertEqual(c.y_list[1], 0.511321002804608)
self.assertEqual(c.decay_extrap, False)
def test_another_solution(self):
self.agent_alt.DiscFac = 0.90
self.agent_alt.solve()
self.assertAlmostEqual(
self.agent_alt.solution[0].cFunc(10).tolist(),
3.9750093524820787)
def test_checkConditions(self):
self.agent_infinite.checkConditions()
self.assertTrue(self.agent_infinite.AIC)
self.assertTrue(self.agent_infinite.GICPF)
self.assertTrue(self.agent_infinite.RIC)
self.assertTrue(self.agent_infinite.FHWC)
def test_simulation(self):
self.agent_infinite.solve()
# Create parameter values necessary for simulation
SimulationParams = {
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : None, # Age after which simulated agents are automatically killed
}
self.agent_infinite(**SimulationParams) # This implicitly uses the assignParameters method of AgentType
# Create PFexample object
self.agent_infinite.track_vars = ['mNrmNow']
self.agent_infinite.initializeSim()
self.agent_infinite.simulate()
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[40],
-23.008063500363942
)
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[100],
-27.164608851546927
)
## Try now with the manipulation at time step 80
self.agent_infinite.initializeSim()
self.agent_infinite.simulate(80)
self.agent_infinite.aNrmNow += -5. # Adjust all simulated consumers' assets downward by 5
self.agent_infinite.simulate(40)
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[40],
-23.008063500363942
)
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[100],
-29.140261331951606
)
|
[
"numpy.mean",
"HARK.ConsumptionSaving.ConsIndShockModel.PerfForesightConsumerType"
] |
[((214, 241), 'HARK.ConsumptionSaving.ConsIndShockModel.PerfForesightConsumerType', 'PerfForesightConsumerType', ([], {}), '()\n', (239, 241), False, 'from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType\n'), ((272, 307), 'HARK.ConsumptionSaving.ConsIndShockModel.PerfForesightConsumerType', 'PerfForesightConsumerType', ([], {'cycles': '(0)'}), '(cycles=0)\n', (297, 307), False, 'from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType\n'), ((606, 648), 'HARK.ConsumptionSaving.ConsIndShockModel.PerfForesightConsumerType', 'PerfForesightConsumerType', ([], {}), '(**PF_dictionary)\n', (631, 648), False, 'from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType\n'), ((2666, 2715), 'numpy.mean', 'np.mean', (['self.agent_infinite.mNrmNow_hist'], {'axis': '(1)'}), '(self.agent_infinite.mNrmNow_hist, axis=1)\n', (2673, 2715), True, 'import numpy as np\n'), ((2807, 2856), 'numpy.mean', 'np.mean', (['self.agent_infinite.mNrmNow_hist'], {'axis': '(1)'}), '(self.agent_infinite.mNrmNow_hist, axis=1)\n', (2814, 2856), True, 'import numpy as np\n'), ((3232, 3281), 'numpy.mean', 'np.mean', (['self.agent_infinite.mNrmNow_hist'], {'axis': '(1)'}), '(self.agent_infinite.mNrmNow_hist, axis=1)\n', (3239, 3281), True, 'import numpy as np\n'), ((3373, 3422), 'numpy.mean', 'np.mean', (['self.agent_infinite.mNrmNow_hist'], {'axis': '(1)'}), '(self.agent_infinite.mNrmNow_hist, axis=1)\n', (3380, 3422), True, 'import numpy as np\n')]
|
from PIL import Image
from math import sqrt
import numpy as np
import time
import matplotlib.backends.backend_tkagg
import matplotlib.pyplot as plt
class Point:
x: float
y: float
f: float
h: float
g: float
def __init__(self, x, y, f):
self.x = x
self.y = y
self.f = f
self.g = 0
self.h = 0
self.parent = None
def equal(self, other):
if self.x == other.x and self.y == other.y:
return True
class Output:
result_image: Image
total_time: float
n_elements: int
max_elements: int
def __init__(self, result_image, total_time, n_elements, max_elements):
self.result_image = result_image
self.total_time = total_time
self.n_elements = n_elements
self.max_elements = max_elements
self.name = None
def plot_times(self, other1, other2, other3):
fig, ax = plt.subplots()
ax.bar([self.name, other1.name, other2.name, other3.name],
[self.total_time, other1.total_time, other2.total_time, other3.total_time])
fig.suptitle("Toplam Zamanlar")
fname = image_name.split('.')
plt.savefig(fname[0] + "times.png")
plt.show()
def plot_n_elements(self, other1, other2, other3):
fig, ax = plt.subplots()
ax.bar([self.name, other1.name, other2.name, other3.name],
[self.n_elements, other1.n_elements, other2.n_elements, other3.n_elements])
fig.suptitle("Stack'ten Çekilen Toplam Eleman Sayısı")
fname = image_name.split('.')
plt.savefig(fname[0] + "n_elements.png")
plt.show()
def plot_max_elements(self, other1, other2, other3):
fig, ax = plt.subplots()
ax.bar([self.name, other1.name, other2.name, other3.name],
[self.max_elements, other1.max_elements, other2.max_elements, other3.max_elements])
fig.suptitle("Stack'te Bulunan Maksimum Eleman Sayısı")
fname = image_name.split('.')
plt.savefig(fname[0] + "max_elements.png")
plt.show()
def distance(point, x, y):
return sqrt((point.x - x)**2 + (point.y - y)**2)
def insert_in_heap(heap, top, point):
heap.append(point)
i = top
parent = (i - 1)/2
while i >= 1 and heap[int(i)].f < heap[int(parent)].f:
heap[int(i)], heap[int(parent)] = heap[int(parent)], heap[int(i)] # swap
i = parent
parent = (i - 1) / 2
return
def calculate_weight(x, y, liste, top, point, visited, index1, index2):
if visited[int(x)][int(y)] == 0:
r, g, b = image.getpixel((x, y))
if x == end.x and y == end.y:
print("Path found.")
if r is 0:
r = 1
new_point = Point(x, y, 0)
new_point.parent = point
new_point.h = distance(end, x, y) * (256 - r)
new_point.g = 0
if index1 == 1: # a_star
new_point.g = new_point.parent.g + 256 - r
new_point.f = new_point.h + new_point.g # bfs'de g = 0
if index2 == 0: # stack
liste.append(new_point)
else: # heap
insert_in_heap(liste, top, new_point)
top += 1
visited[int(x)][int(y)] = 1
return top
def add_neighbours(point, liste, top, visited, index1, index2):
# print(point.x, point.y)
if (point.x == width - 1 and point.y == height - 1) or (point.x == 0 and point.y == 0) or \
(point.x == 0 and point.y == height - 1) or (point.x == width - 1 and point.y == 0):
# print("first if")
if point.x == width - 1 and point.y == height - 1:
constx = -1
consty = -1
elif point.x == 0 and point.y == 0:
constx = 1
consty = 1
elif point.x == width - 1 and point.y == 0:
constx = 1
consty = -1
else:
constx = -1
consty = 1
top = calculate_weight(point.x + constx, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + consty, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + constx, point.y + consty, liste, top, point, visited, index1, index2)
elif point.x == 0 or point.x == width - 1:
# print("nd if")
top = calculate_weight(point.x, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + 1, liste, top, point, visited, index1, index2)
if point.x == 0:
const = 1
else:
const = -1
top = calculate_weight(point.x + const, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + const, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + const, point.y, liste, top, point, visited, index1, index2)
elif point.y == 0 or point.y == height - 1:
# print("3rd if")
top = calculate_weight(point.x - 1, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y, liste, top, point, visited, index1, index2)
if point.y == 0:
const = 1
else:
const = -1
top = calculate_weight(point.x - 1, point.y + const, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y + const, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + const, liste, top, point, visited, index1, index2)
else:
# print("4th if")
top = calculate_weight(point.x - 1, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x - 1, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x - 1, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y - 1, liste, top, point, visited, index1, index2)
return top
def paint(point):
yol = []
while not point.equal(start):
yol.append(point)
image.putpixel((int(point.x), int(point.y)), (60, 255, 0))
point = point.parent
end_time = time.time()
# image.show()
'''print("--------------YOL------------------")
for i in range(len(yol)):
print("x: {}, y:{}, distance:{}".format(yol[i].x, yol[i].y, yol[i].f))
print("------------------------------------")'''
return image, (end_time - start_time)
def bfs_and_a_star_with_stack(index):
stack = []
top = 0
found = False
point = None
stack.append(start)
visited = np.zeros((width, height))
visited[int(start.x)][int(start.y)] = 1
j = 0
max_element = 0
while stack and not found:
point = stack.pop(top)
# print("x: {}, y:{}, f:{}".format(point.x, point.y, point.f))
top -= 1
if point.equal(end):
found = True
else:
top = add_neighbours(point, stack, top, visited, index, 0)
stack.sort(key=lambda point: point.f, reverse=True)
if len(stack) > max_element:
max_element = len(stack)
j += 1
if found:
result_image, total_time = paint(point)
# print("Stackten çekilen eleman sayısı: ", j)
# print("Stackteki maksimum eleman sayısı: ", max_element)
return result_image, total_time, j, max_element
def find_smallest_child(heap, i, top):
if 2 * i + 2 < top: # has two child
if heap[2*i + 1].f < heap[2*i + 2].f:
return 2*i + 1
else:
return 2*i + 2
elif 2*i + 1 < top: # has one child
return 2*i + 1
else: # has no child
return 0
def remove_min(heap, top):
if top == 0:
return None
min_point = heap[0]
top -= 1
heap[0] = heap[top]
del heap[top]
i = 0
index = find_smallest_child(heap, i, top)
while index != 0 and heap[i].f > heap[index].f:
heap[i], heap[index] = heap[index], heap[i]
i = index
index = find_smallest_child(heap, i, top)
return min_point, top
def bfs_and_a_star_with_heap(index):
heap = []
found = False
yol = []
point = None
heap.append(start)
visited = np.zeros((width, height))
visited[int(start.x)][int(start.y)] = 1
j = 0
top = 1
max_element = 0
while heap and not found:
point, top = remove_min(heap, top)
# print("x: {}, y:{}, f:{}".format(point.x, point.y, point.f))
if point.equal(end):
found = True
else:
top = add_neighbours(point, heap, top, visited, index, 1)
if len(heap) > max_element:
max_element = len(heap)
j += 1
if found:
result_image, total_time = paint(point)
else:
return
return result_image, total_time, j, max_element
if __name__ == "__main__":
print("UYARI: Seçilecek görüntü exe dosyası ile aynı klasörde olmalıdır.")
image_name = input("Algoritmanın üzerinde çalışacağı görüntünün ismini giriniz (Örnek input: image.png): ")
print(image_name)
print("-------------------Algoritmalar------------------")
print("1- Best First Search with Stack")
print("2- Best First Search with Heap")
print("3- A* with Stack")
print("4- A* with Heap")
print("5- Analiz (tüm algoritmaların çalışmalarını ve kıyaslamalarını gör)")
alg = input("Algoritmayı ve veri yapısının numarasını seçiniz (Örnek input: 1): ")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
print("Görüntünün genişliği: {}, yüksekliği: {}".format(width, height))
print("NOT: Başlangıç ve bitiş noktasının koordinatları genişlik ve uzunluktan küçük olmalıdır.")
sx, sy = input("Başlangıç noktasının x ve y piksel koordinatlarını sırasıyla giriniz (Örnek input: 350 100): ").split()
ex, ey = input("Bitiş noktasının x ve y piksel koordinatlarını sırasıyla giriniz (Örnek input: 200 700): ").split()
start = Point(int(sx), int(sy), -1)
start.parent = -1
end = Point(int(ex), int(ey), -1)
start_time = time.time()
if int(alg) == 1:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(0)
elif int(alg) == 2:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(0)
elif int(alg) == 3:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(1)
elif int(alg) == 4:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(1)
elif int(alg) == 5:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(0)
output1 = Output(result_image, total_time, n_elements, max_elements)
print(n_elements, total_time, max_elements)
output1.name = "BFS with Stack"
print("1/4")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
start_time = time.time()
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(0)
output2 = Output(result_image, total_time, n_elements, max_elements)
print(n_elements, total_time, max_elements)
output2.name = "BFS with Heap"
print("2/4")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
start_time = time.time()
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(1)
output3 = Output(result_image, total_time, n_elements, max_elements)
output3.name = "A* with Stack"
print(n_elements, total_time, max_elements)
print("3/4")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
start_time = time.time()
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(1)
output4 = Output(result_image, total_time, n_elements, max_elements)
output4.name = "A* with Heap"
print("4/4")
output1.plot_times(output2, output3, output4)
output1.plot_max_elements(output2, output3, output4)
output1.plot_n_elements(output2, output3, output4)
print("Bastırılan görüntüler sırasıyla BFS stack, BFS heap, A* stack ve A* heap şeklindedir.")
fname = image_name.split('.')
output1.result_image.show()
output1.result_image.save(fname[0] + "BFS_stack.png")
output2.result_image.show()
output2.result_image.save(fname[0] + "BFS_heap.png")
output3.result_image.show()
output3.result_image.save(fname[0] + "A_star_stack.png")
output4.result_image.show()
output4.result_image.save(fname[0] + "A_star_heap.png")
exit(0)
else:
print("Algoritma numarası hatalı girildi, tekrar deneyin.")
exit(0)
print("Stackten çekilen eleman sayısı: ", n_elements)
print("Stackteki maksimum eleman sayısı: ", max_elements)
print("Toplam süre: ", total_time)
result_image.show()
|
[
"PIL.Image.open",
"matplotlib.pyplot.savefig",
"math.sqrt",
"numpy.zeros",
"time.time",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((2189, 2234), 'math.sqrt', 'sqrt', (['((point.x - x) ** 2 + (point.y - y) ** 2)'], {}), '((point.x - x) ** 2 + (point.y - y) ** 2)\n', (2193, 2234), False, 'from math import sqrt\n'), ((6777, 6788), 'time.time', 'time.time', ([], {}), '()\n', (6786, 6788), False, 'import time\n'), ((7223, 7248), 'numpy.zeros', 'np.zeros', (['(width, height)'], {}), '((width, height))\n', (7231, 7248), True, 'import numpy as np\n'), ((8920, 8945), 'numpy.zeros', 'np.zeros', (['(width, height)'], {}), '((width, height))\n', (8928, 8945), True, 'import numpy as np\n'), ((10232, 10254), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (10242, 10254), False, 'from PIL import Image\n'), ((10876, 10887), 'time.time', 'time.time', ([], {}), '()\n', (10885, 10887), False, 'import time\n'), ((962, 976), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (974, 976), True, 'import matplotlib.pyplot as plt\n'), ((1226, 1261), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname[0] + 'times.png')"], {}), "(fname[0] + 'times.png')\n", (1237, 1261), True, 'import matplotlib.pyplot as plt\n'), ((1271, 1281), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1279, 1281), True, 'import matplotlib.pyplot as plt\n'), ((1359, 1373), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1371, 1373), True, 'import matplotlib.pyplot as plt\n'), ((1646, 1686), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname[0] + 'n_elements.png')"], {}), "(fname[0] + 'n_elements.png')\n", (1657, 1686), True, 'import matplotlib.pyplot as plt\n'), ((1696, 1706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1704, 1706), True, 'import matplotlib.pyplot as plt\n'), ((1786, 1800), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1798, 1800), True, 'import matplotlib.pyplot as plt\n'), ((2082, 2124), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname[0] + 'max_elements.png')"], {}), "(fname[0] + 'max_elements.png')\n", (2093, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2144), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2142, 2144), True, 'import matplotlib.pyplot as plt\n'), ((11681, 11703), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (11691, 11703), False, 'from PIL import Image\n'), ((11802, 11813), 'time.time', 'time.time', ([], {}), '()\n', (11811, 11813), False, 'import time\n'), ((12116, 12138), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (12126, 12138), False, 'from PIL import Image\n'), ((12237, 12248), 'time.time', 'time.time', ([], {}), '()\n', (12246, 12248), False, 'import time\n'), ((12552, 12574), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (12562, 12574), False, 'from PIL import Image\n'), ((12673, 12684), 'time.time', 'time.time', ([], {}), '()\n', (12682, 12684), False, 'import time\n')]
|
#Author <NAME>
import time
import rnnoise
import numpy as np
def time_rnnoise(rounds=1000):
a = rnnoise.RNNoise()
timer = 0.0
st = time.time()
for i in range(rounds):
inp = np.random.bytes(960)
timer = (time.time() - st)
print(timer)
st = time.time()
for i in range(rounds):
inp = np.random.bytes(960)
va,out = a.process_frame(inp)
time_taken_per_frame = ((time.time()-st)-timer) /rounds
print("time taken for one frame - " + str(time_taken_per_frame ))
print("time in a frame - " +str(480.0/48000.0))
print(str((480.0/48000.0)/time_taken_per_frame )+"X faster than real")
a.destroy()
time_rnnoise()
|
[
"rnnoise.RNNoise",
"time.time",
"numpy.random.bytes"
] |
[((97, 114), 'rnnoise.RNNoise', 'rnnoise.RNNoise', ([], {}), '()\n', (112, 114), False, 'import rnnoise\n'), ((134, 145), 'time.time', 'time.time', ([], {}), '()\n', (143, 145), False, 'import time\n'), ((248, 259), 'time.time', 'time.time', ([], {}), '()\n', (257, 259), False, 'import time\n'), ((179, 199), 'numpy.random.bytes', 'np.random.bytes', (['(960)'], {}), '(960)\n', (194, 199), True, 'import numpy as np\n'), ((210, 221), 'time.time', 'time.time', ([], {}), '()\n', (219, 221), False, 'import time\n'), ((293, 313), 'numpy.random.bytes', 'np.random.bytes', (['(960)'], {}), '(960)\n', (308, 313), True, 'import numpy as np\n'), ((372, 383), 'time.time', 'time.time', ([], {}), '()\n', (381, 383), False, 'import time\n')]
|
# Copyright (c) 2009-2020, quasardb SAS. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of quasardb nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY QUASARDB AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
from builtins import range as xrange, int
import os
from socket import gethostname
import sys
import inspect
import traceback
import random
import time
import datetime
import locale
import numpy as np
import quasardb
STOCK_COLUMN = "stock_id"
OPEN_COLUMN = "open"
CLOSE_COLUMN = "close"
HIGH_COLUMN = "high"
LOW_COLUMN = "low"
VOLUME_COLUMN = "volume"
def time_execution(str, f, *args):
print(" - ", str, end='')
start_time = time.time()
res = f(*args)
end_time = time.time()
print(" [duration: {}s]".format(end_time - start_time))
return res
def gen_ts_name():
return "test.{}.{}.{}".format(gethostname(), os.getpid(), random.randint(0, 100000))
def create_ts(q, name):
ts = q.ts(name)
ts.create([quasardb.ColumnInfo(quasardb.ColumnType.Int64, STOCK_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, OPEN_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, CLOSE_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, HIGH_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, LOW_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Int64, VOLUME_COLUMN)])
return ts
def create_many_ts(q, names):
return [create_ts(q, x) for x in names]
def generate_prices(price_count):
return np.random.uniform(-100.0, 100.0, price_count)
def generate_points(points_count):
start_time = np.datetime64('2017-01-01', 'ns')
dates = np.array([(start_time + np.timedelta64(i, 'm')) for i in range(points_count)]).astype('datetime64[ns]')
stock_ids = np.random.randint(1, 25, size=points_count)
prices = np.array([generate_prices(60) for i in range(points_count)]).astype('double')
volumes = np.random.randint(0, 10000, points_count)
return (dates, stock_ids, prices, volumes)
def batch_ts_columns(ts_name, prealloc_size):
return (quasardb.BatchColumnInfo(ts_name, STOCK_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, OPEN_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, CLOSE_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, HIGH_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, LOW_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, VOLUME_COLUMN, prealloc_size))
def calculate_minute_bar(prices):
# Takes all prices for a single minute, and calculate OHLC
return (prices[0], prices[-1], np.amax(prices), np.amin(prices))
def bulk_insert(q, ts_names, dates, stock_ids, prices, volumes):
# We generate a flattened list of columns for each timeseries; for example,
# for 2 columns for 4 timeseries each, we have 8 columns.
columns = [column for nested in (batch_ts_columns(ts_name, len(dates))
for ts_name in ts_names)
for column in nested]
batch_inserter = q.ts_batch(columns)
for i in range(len(stock_ids)):
# We use the known layout of column (2 for each timeseries, alternating with
# STOCK_COLUMN and PRICE_COLUMN) to set the values.
for j in range(0, len(ts_names) * 6, 6):
(o, c, h, l) = calculate_minute_bar(prices[i])
batch_inserter.start_row(dates[i])
batch_inserter.set_int64(j, stock_ids[i]) # set stock_id
batch_inserter.set_double(j + 1, o) # open
batch_inserter.set_double(j + 2, c) # close
batch_inserter.set_double(j + 3, h) # high
batch_inserter.set_double(j + 4, l) # low
batch_inserter.set_int64(j + 5, volumes[i]) # low
batch_inserter.push()
def make_it_so(q, points_count):
ts_names = [gen_ts_name(), gen_ts_name()]
ts = time_execution("Creating a time series with names {}".format(ts_names), create_many_ts, q, ts_names)
(dates, stock_ids, prices, volumes) = time_execution("Generating {:,} points".format(points_count), generate_points, points_count)
time_execution("Inserting {:,} points into timeseries with names {}".format(points_count, ts_names), bulk_insert, q, ts_names, dates, stock_ids, prices, volumes)
return (ts_names, dates, np.unique(stock_ids))
def main(quasardb_uri, points_count):
print("Connecting to: ", quasardb_uri)
q = quasardb.Cluster(uri=quasardb_uri)
print(" *** Inserting {:,} into {}".format(points_count, quasardb_uri))
make_it_so(q, points_count)
if __name__ == "__main__":
try:
if len(sys.argv) != 3:
print("usage: ", sys.argv[0], " quasardb_uri points_count")
sys.exit(1)
main(sys.argv[1], int(sys.argv[2]))
except Exception as ex: # pylint: disable=W0703
print("An error ocurred:", str(ex))
traceback.print_exc()
|
[
"numpy.amax",
"numpy.amin",
"numpy.unique",
"sys.exit",
"quasardb.ColumnInfo",
"quasardb.Cluster",
"numpy.random.randint",
"numpy.random.uniform",
"quasardb.BatchColumnInfo",
"os.getpid",
"numpy.datetime64",
"numpy.timedelta64",
"builtins.int",
"socket.gethostname",
"traceback.print_exc",
"time.time",
"random.randint"
] |
[((2039, 2050), 'time.time', 'time.time', ([], {}), '()\n', (2048, 2050), False, 'import time\n'), ((2085, 2096), 'time.time', 'time.time', ([], {}), '()\n', (2094, 2096), False, 'import time\n'), ((2928, 2973), 'numpy.random.uniform', 'np.random.uniform', (['(-100.0)', '(100.0)', 'price_count'], {}), '(-100.0, 100.0, price_count)\n', (2945, 2973), True, 'import numpy as np\n'), ((3027, 3060), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-01"""', '"""ns"""'], {}), "('2017-01-01', 'ns')\n", (3040, 3060), True, 'import numpy as np\n'), ((3194, 3237), 'numpy.random.randint', 'np.random.randint', (['(1)', '(25)'], {'size': 'points_count'}), '(1, 25, size=points_count)\n', (3211, 3237), True, 'import numpy as np\n'), ((3343, 3384), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)', 'points_count'], {}), '(0, 10000, points_count)\n', (3360, 3384), True, 'import numpy as np\n'), ((5873, 5907), 'quasardb.Cluster', 'quasardb.Cluster', ([], {'uri': 'quasardb_uri'}), '(uri=quasardb_uri)\n', (5889, 5907), False, 'import quasardb\n'), ((2228, 2241), 'socket.gethostname', 'gethostname', ([], {}), '()\n', (2239, 2241), False, 'from socket import gethostname\n'), ((2243, 2254), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2252, 2254), False, 'import os\n'), ((2256, 2281), 'random.randint', 'random.randint', (['(0)', '(100000)'], {}), '(0, 100000)\n', (2270, 2281), False, 'import random\n'), ((3492, 3554), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'STOCK_COLUMN', 'prealloc_size'], {}), '(ts_name, STOCK_COLUMN, prealloc_size)\n', (3516, 3554), False, 'import quasardb\n'), ((3568, 3629), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'OPEN_COLUMN', 'prealloc_size'], {}), '(ts_name, OPEN_COLUMN, prealloc_size)\n', (3592, 3629), False, 'import quasardb\n'), ((3643, 3705), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'CLOSE_COLUMN', 'prealloc_size'], {}), '(ts_name, CLOSE_COLUMN, prealloc_size)\n', (3667, 3705), False, 'import quasardb\n'), ((3719, 3780), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'HIGH_COLUMN', 'prealloc_size'], {}), '(ts_name, HIGH_COLUMN, prealloc_size)\n', (3743, 3780), False, 'import quasardb\n'), ((3794, 3854), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'LOW_COLUMN', 'prealloc_size'], {}), '(ts_name, LOW_COLUMN, prealloc_size)\n', (3818, 3854), False, 'import quasardb\n'), ((3868, 3931), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'VOLUME_COLUMN', 'prealloc_size'], {}), '(ts_name, VOLUME_COLUMN, prealloc_size)\n', (3892, 3931), False, 'import quasardb\n'), ((4066, 4081), 'numpy.amax', 'np.amax', (['prices'], {}), '(prices)\n', (4073, 4081), True, 'import numpy as np\n'), ((4083, 4098), 'numpy.amin', 'np.amin', (['prices'], {}), '(prices)\n', (4090, 4098), True, 'import numpy as np\n'), ((5761, 5781), 'numpy.unique', 'np.unique', (['stock_ids'], {}), '(stock_ids)\n', (5770, 5781), True, 'import numpy as np\n'), ((2343, 2403), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Int64', 'STOCK_COLUMN'], {}), '(quasardb.ColumnType.Int64, STOCK_COLUMN)\n', (2362, 2403), False, 'import quasardb\n'), ((2420, 2480), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Double', 'OPEN_COLUMN'], {}), '(quasardb.ColumnType.Double, OPEN_COLUMN)\n', (2439, 2480), False, 'import quasardb\n'), ((2497, 2558), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Double', 'CLOSE_COLUMN'], {}), '(quasardb.ColumnType.Double, CLOSE_COLUMN)\n', (2516, 2558), False, 'import quasardb\n'), ((2575, 2635), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Double', 'HIGH_COLUMN'], {}), '(quasardb.ColumnType.Double, HIGH_COLUMN)\n', (2594, 2635), False, 'import quasardb\n'), ((2652, 2711), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Double', 'LOW_COLUMN'], {}), '(quasardb.ColumnType.Double, LOW_COLUMN)\n', (2671, 2711), False, 'import quasardb\n'), ((2728, 2789), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Int64', 'VOLUME_COLUMN'], {}), '(quasardb.ColumnType.Int64, VOLUME_COLUMN)\n', (2747, 2789), False, 'import quasardb\n'), ((6170, 6181), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6178, 6181), False, 'import sys\n'), ((6209, 6225), 'builtins.int', 'int', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (6212, 6225), False, 'from builtins import range as xrange, int\n'), ((6333, 6354), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6352, 6354), False, 'import traceback\n'), ((3098, 3120), 'numpy.timedelta64', 'np.timedelta64', (['i', '"""m"""'], {}), "(i, 'm')\n", (3112, 3120), True, 'import numpy as np\n')]
|
import numpy as np
import ROOT
from dummy_distributions import dummy_pt_eta
counts, test_in1, test_in2 = dummy_pt_eta()
f = ROOT.TFile.Open("samples/testSF2d.root")
sf = f.Get("scalefactors_Tight_Electron")
xmin, xmax = sf.GetXaxis().GetXmin(), sf.GetXaxis().GetXmax()
ymin, ymax = sf.GetYaxis().GetXmin(), sf.GetYaxis().GetXmax()
test_out = np.empty_like(test_in1)
for i, (eta, pt) in enumerate(zip(test_in1, test_in2)):
if xmax <= eta:
eta = xmax - 1.0e-5
elif eta < xmin:
eta = xmin
if ymax <= pt:
pt = ymax - 1.0e-5
elif pt < ymin:
pt = ymin
ib = sf.FindBin(eta, pt)
test_out[i] = sf.GetBinContent(ib)
print(repr(test_out))
|
[
"numpy.empty_like",
"dummy_distributions.dummy_pt_eta",
"ROOT.TFile.Open"
] |
[((107, 121), 'dummy_distributions.dummy_pt_eta', 'dummy_pt_eta', ([], {}), '()\n', (119, 121), False, 'from dummy_distributions import dummy_pt_eta\n'), ((127, 167), 'ROOT.TFile.Open', 'ROOT.TFile.Open', (['"""samples/testSF2d.root"""'], {}), "('samples/testSF2d.root')\n", (142, 167), False, 'import ROOT\n'), ((347, 370), 'numpy.empty_like', 'np.empty_like', (['test_in1'], {}), '(test_in1)\n', (360, 370), True, 'import numpy as np\n')]
|
"""
Totally untested file. Will be removed in subsequent commits
"""
import tensorflow as tf
import matplotlib.image as mpimg
import numpy as np
from math import ceil, floor
import os
IMAGE_SIZE = 720
def central_scale_images(X_imgs, scales):
# Various settings needed for Tensorflow operation
boxes = np.zeros((len(scales), 4), dtype = np.float32)
for index, scale in enumerate(scales):
x1 = y1 = 0.5 - 0.5 * scale # To scale centrally
x2 = y2 = 0.5 + 0.5 * scale
boxes[index] = np.array([y1, x1, y2, x2], dtype = np.float32)
box_ind = np.zeros((len(scales)), dtype = np.int32)
crop_size = np.array([IMAGE_SIZE, IMAGE_SIZE], dtype = np.int32)
X_scale_data = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (1, IMAGE_SIZE, IMAGE_SIZE, 3))
# Define Tensorflow operation for all scales but only one base image at a time
tf_img = tf.image.crop_and_resize(X, boxes, box_ind, crop_size)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img_data in X_imgs:
batch_img = np.expand_dims(img_data, axis = 0)
scaled_imgs = sess.run(tf_img, feed_dict = {X: batch_img})
X_scale_data.extend(scaled_imgs)
X_scale_data = np.array(X_scale_data, dtype = np.float32)
return X_scale_data
from math import ceil, floor
def get_translate_parameters(index):
if index == 0: # Translate left 20 percent
offset = np.array([0.0, 0.2], dtype = np.float32)
size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32)
w_start = 0
w_end = int(ceil(0.8 * IMAGE_SIZE))
h_start = 0
h_end = IMAGE_SIZE
elif index == 1: # Translate right 20 percent
offset = np.array([0.0, -0.2], dtype = np.float32)
size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32)
w_start = int(floor((1 - 0.8) * IMAGE_SIZE))
w_end = IMAGE_SIZE
h_start = 0
h_end = IMAGE_SIZE
elif index == 2: # Translate top 20 percent
offset = np.array([0.2, 0.0], dtype = np.float32)
size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32)
w_start = 0
w_end = IMAGE_SIZE
h_start = 0
h_end = int(ceil(0.8 * IMAGE_SIZE))
else: # Translate bottom 20 percent
offset = np.array([-0.2, 0.0], dtype = np.float32)
size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32)
w_start = 0
w_end = IMAGE_SIZE
h_start = int(floor((1 - 0.8) * IMAGE_SIZE))
h_end = IMAGE_SIZE
return offset, size, w_start, w_end, h_start, h_end
def translate_images(X_imgs):
offsets = np.zeros((len(X_imgs), 2), dtype = np.float32)
n_translations = 4
X_translated_arr = []
tf.reset_default_graph()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(n_translations):
X_translated = np.zeros((len(X_imgs), IMAGE_SIZE, IMAGE_SIZE, 3),
dtype = np.float32)
X_translated.fill(0.0) # Filling background color
base_offset, size, w_start, w_end, h_start, h_end = get_translate_parameters(i)
offsets[:, :] = base_offset
glimpses = tf.image.extract_glimpse(X_imgs, size, offsets)
glimpses = sess.run(glimpses)
X_translated[:, h_start: h_start + size[0], \
w_start: w_start + size[1], :] = glimpses
X_translated_arr.extend(X_translated)
X_translated_arr = np.array(X_translated_arr, dtype = np.float32)
return X_translated_arr
def rotate_images(X_imgs):
X_rotate = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
k = tf.placeholder(tf.int32)
tf_img = tf.image.rot90(X, k = k)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img in X_imgs:
for i in range(3): # Rotation at 90, 180 and 270 degrees
rotated_img = sess.run(tf_img, feed_dict = {X: img, k: i + 1})
X_rotate.append(rotated_img)
X_rotate = np.array(X_rotate, dtype = np.float32)
return X_rotate
def flip_images(X_imgs):
X_flip = []
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
tf_img1 = tf.image.flip_left_right(X)
tf_img2 = tf.image.flip_up_down(X)
tf_img3 = tf.image.transpose_image(X)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for img in X_imgs:
flipped_imgs = sess.run([tf_img1, tf_img2, tf_img3], feed_dict = {X: img})
X_flip.extend(flipped_imgs)
X_flip = np.array(X_flip, dtype = np.float32)
return X_flip
# Produce each image at scaling of 90%, 75% and 60% of original image.
X_imgs = os.listdir("/home/pallab/gestures-cnn/images/resized/")
scaled_imgs = central_scale_images(X_imgs, [0.90, 0.75, 0.60])
translated_imgs = translate_images(X_imgs)
rotated_imgs = rotate_images(X_imgs)
flipped_images = flip_images(X_imgs)
|
[
"tensorflow.image.transpose_image",
"os.listdir",
"tensorflow.reset_default_graph",
"tensorflow.image.rot90",
"math.ceil",
"math.floor",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.image.flip_up_down",
"tensorflow.global_variables_initializer",
"numpy.array",
"tensorflow.image.crop_and_resize",
"numpy.expand_dims",
"tensorflow.image.extract_glimpse",
"tensorflow.image.flip_left_right"
] |
[((4951, 5006), 'os.listdir', 'os.listdir', (['"""/home/pallab/gestures-cnn/images/resized/"""'], {}), "('/home/pallab/gestures-cnn/images/resized/')\n", (4961, 5006), False, 'import os\n'), ((637, 687), 'numpy.array', 'np.array', (['[IMAGE_SIZE, IMAGE_SIZE]'], {'dtype': 'np.int32'}), '([IMAGE_SIZE, IMAGE_SIZE], dtype=np.int32)\n', (645, 687), True, 'import numpy as np\n'), ((721, 745), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (743, 745), True, 'import tensorflow as tf\n'), ((754, 818), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(1, IMAGE_SIZE, IMAGE_SIZE, 3)'}), '(tf.float32, shape=(1, IMAGE_SIZE, IMAGE_SIZE, 3))\n', (768, 818), True, 'import tensorflow as tf\n'), ((917, 971), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (['X', 'boxes', 'box_ind', 'crop_size'], {}), '(X, boxes, box_ind, crop_size)\n', (941, 971), True, 'import tensorflow as tf\n'), ((1295, 1335), 'numpy.array', 'np.array', (['X_scale_data'], {'dtype': 'np.float32'}), '(X_scale_data, dtype=np.float32)\n', (1303, 1335), True, 'import numpy as np\n'), ((2863, 2887), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2885, 2887), True, 'import tensorflow as tf\n'), ((3616, 3660), 'numpy.array', 'np.array', (['X_translated_arr'], {'dtype': 'np.float32'}), '(X_translated_arr, dtype=np.float32)\n', (3624, 3660), True, 'import numpy as np\n'), ((3741, 3765), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3763, 3765), True, 'import tensorflow as tf\n'), ((3774, 3835), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(IMAGE_SIZE, IMAGE_SIZE, 3)'}), '(tf.float32, shape=(IMAGE_SIZE, IMAGE_SIZE, 3))\n', (3788, 3835), True, 'import tensorflow as tf\n'), ((3846, 3870), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (3860, 3870), True, 'import tensorflow as tf\n'), ((3884, 3906), 'tensorflow.image.rot90', 'tf.image.rot90', (['X'], {'k': 'k'}), '(X, k=k)\n', (3898, 3906), True, 'import tensorflow as tf\n'), ((4237, 4273), 'numpy.array', 'np.array', (['X_rotate'], {'dtype': 'np.float32'}), '(X_rotate, dtype=np.float32)\n', (4245, 4273), True, 'import numpy as np\n'), ((4343, 4367), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4365, 4367), True, 'import tensorflow as tf\n'), ((4376, 4437), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(IMAGE_SIZE, IMAGE_SIZE, 3)'}), '(tf.float32, shape=(IMAGE_SIZE, IMAGE_SIZE, 3))\n', (4390, 4437), True, 'import tensorflow as tf\n'), ((4454, 4481), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (['X'], {}), '(X)\n', (4478, 4481), True, 'import tensorflow as tf\n'), ((4496, 4520), 'tensorflow.image.flip_up_down', 'tf.image.flip_up_down', (['X'], {}), '(X)\n', (4517, 4520), True, 'import tensorflow as tf\n'), ((4535, 4562), 'tensorflow.image.transpose_image', 'tf.image.transpose_image', (['X'], {}), '(X)\n', (4559, 4562), True, 'import tensorflow as tf\n'), ((4813, 4847), 'numpy.array', 'np.array', (['X_flip'], {'dtype': 'np.float32'}), '(X_flip, dtype=np.float32)\n', (4821, 4847), True, 'import numpy as np\n'), ((518, 562), 'numpy.array', 'np.array', (['[y1, x1, y2, x2]'], {'dtype': 'np.float32'}), '([y1, x1, y2, x2], dtype=np.float32)\n', (526, 562), True, 'import numpy as np\n'), ((981, 993), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (991, 993), True, 'import tensorflow as tf\n'), ((1495, 1533), 'numpy.array', 'np.array', (['[0.0, 0.2]'], {'dtype': 'np.float32'}), '([0.0, 0.2], dtype=np.float32)\n', (1503, 1533), True, 'import numpy as np\n'), ((2897, 2909), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2907, 2909), True, 'import tensorflow as tf\n'), ((3918, 3930), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3928, 3930), True, 'import tensorflow as tf\n'), ((4572, 4584), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4582, 4584), True, 'import tensorflow as tf\n'), ((1020, 1053), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1051, 1053), True, 'import tensorflow as tf\n'), ((1120, 1152), 'numpy.expand_dims', 'np.expand_dims', (['img_data'], {'axis': '(0)'}), '(img_data, axis=0)\n', (1134, 1152), True, 'import numpy as np\n'), ((1656, 1678), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (1660, 1678), False, 'from math import ceil, floor\n'), ((1794, 1833), 'numpy.array', 'np.array', (['[0.0, -0.2]'], {'dtype': 'np.float32'}), '([0.0, -0.2], dtype=np.float32)\n', (1802, 1833), True, 'import numpy as np\n'), ((2936, 2969), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2967, 2969), True, 'import tensorflow as tf\n'), ((3336, 3383), 'tensorflow.image.extract_glimpse', 'tf.image.extract_glimpse', (['X_imgs', 'size', 'offsets'], {}), '(X_imgs, size, offsets)\n', (3360, 3383), True, 'import tensorflow as tf\n'), ((3957, 3990), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3988, 3990), True, 'import tensorflow as tf\n'), ((4611, 4644), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4642, 4644), True, 'import tensorflow as tf\n'), ((1573, 1595), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (1577, 1595), False, 'from math import ceil, floor\n'), ((1938, 1967), 'math.floor', 'floor', (['((1 - 0.8) * IMAGE_SIZE)'], {}), '((1 - 0.8) * IMAGE_SIZE)\n', (1943, 1967), False, 'from math import ceil, floor\n'), ((2108, 2146), 'numpy.array', 'np.array', (['[0.2, 0.0]'], {'dtype': 'np.float32'}), '([0.2, 0.0], dtype=np.float32)\n', (2116, 2146), True, 'import numpy as np\n'), ((2398, 2437), 'numpy.array', 'np.array', (['[-0.2, 0.0]'], {'dtype': 'np.float32'}), '([-0.2, 0.0], dtype=np.float32)\n', (2406, 2437), True, 'import numpy as np\n'), ((1873, 1895), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (1877, 1895), False, 'from math import ceil, floor\n'), ((2316, 2338), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (2320, 2338), False, 'from math import ceil, floor\n'), ((2589, 2618), 'math.floor', 'floor', (['((1 - 0.8) * IMAGE_SIZE)'], {}), '((1 - 0.8) * IMAGE_SIZE)\n', (2594, 2618), False, 'from math import ceil, floor\n'), ((2174, 2196), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (2178, 2196), False, 'from math import ceil, floor\n'), ((2465, 2487), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (2469, 2487), False, 'from math import ceil, floor\n')]
|
import unittest
import numpy as np
from astroNN.lamost import wavelength_solution, pseudo_continuum
class LamostToolsTestCase(unittest.TestCase):
def test_wavelength_solution(self):
wavelength_solution()
wavelength_solution(dr=5)
self.assertRaises(ValueError, wavelength_solution, dr=1)
def test_norm(self):
pseudo_continuum(np.ones(3909), np.ones(3909))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"astroNN.lamost.wavelength_solution",
"numpy.ones"
] |
[((432, 447), 'unittest.main', 'unittest.main', ([], {}), '()\n', (445, 447), False, 'import unittest\n'), ((197, 218), 'astroNN.lamost.wavelength_solution', 'wavelength_solution', ([], {}), '()\n', (216, 218), False, 'from astroNN.lamost import wavelength_solution, pseudo_continuum\n'), ((227, 252), 'astroNN.lamost.wavelength_solution', 'wavelength_solution', ([], {'dr': '(5)'}), '(dr=5)\n', (246, 252), False, 'from astroNN.lamost import wavelength_solution, pseudo_continuum\n'), ((369, 382), 'numpy.ones', 'np.ones', (['(3909)'], {}), '(3909)\n', (376, 382), True, 'import numpy as np\n'), ((384, 397), 'numpy.ones', 'np.ones', (['(3909)'], {}), '(3909)\n', (391, 397), True, 'import numpy as np\n')]
|
from django.http import HttpResponse
from rest_framework.decorators import api_view
from rest_framework.decorators import parser_classes
from rest_framework.parsers import JSONParser
import numpy as np
import json
import os
from .utils.spectrogram_utils import SpectrogramUtils
from .utils.feature_extraction_utils import FeatureExtractionUtils
from .utils.classification_utils import ClassificationUtils
from .utils.file_utils import FileUtils
from .utils.dir_utils import DirUtils
from .constants.headers import headers_data, headers_clusters, headers_clusters_no_display
file_utils = FileUtils()
dir_utils = DirUtils()
@api_view(['GET'])
@parser_classes((JSONParser,))
def get_species(request):
species = os.listdir('clusters/model/')
species_data = []
for specie in species:
with open('clusters/model/' + specie, 'r') as infile:
data = json.load(infile)
species_data.append(data)
return HttpResponse(json.dumps(species_data, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def get_clusters(request):
if request.method == 'POST':
data = request.data
directory = data['dir']
files = data['files']
features, segs, metadata = file_utils.process_files(
directory, files)
classification_utils = ClassificationUtils()
ex_level = 1
it_num = 5
data = np.hstack((features, metadata[:, 6].astype(float)[:, None]))
mad = 'binomial'
gad = '3pi'
datanorm, mininums, maximums = classification_utils.norm(data)
recon, mean_class, std_class = classification_utils.lamda(
ex_level, it_num, datanorm, mad, gad)
representive_calls = file_utils.get_representative_calls(
recon, datanorm, metadata)
keys_results = [header['label'] for header in headers_data]
keys_clusters = [header['label'] for header in headers_clusters]
keys_clusters_no_display = [header['label']
for header in headers_clusters_no_display]
data_results = []
for i, value in enumerate(metadata):
values = [value[0], str(recon[i]), *
(value[1:].tolist()), datanorm[i]]
zipbObj = zip(keys_results, values)
data_results.append(dict(zipbObj))
data_clusters = []
for i, value in enumerate(representive_calls):
zipbObj = zip(keys_clusters + keys_clusters_no_display, value)
data_clusters.append(dict(zipbObj))
response = {
'results': {
'headers': headers_data,
'data': data_results,
'model': {
'features': datanorm.tolist(),
'min_values': mininums.tolist(),
'max_values': maximums.tolist(),
'metadata': metadata.tolist()
}
},
'clusters': {
'headers': headers_clusters,
'data': data_clusters
}
}
return HttpResponse(json.dumps(response, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def get_segment_in_image(request):
if request.method == 'POST':
data = request.data
spectrogram_utils = SpectrogramUtils()
filename = spectrogram_utils.get_segment_in_image(data['dir'],
data['filename'], 1, float(data['start']) - 0.5, float(data['end']) + 0.5, float(data['min_freq']) - 200, float(data['max_freq']) + 200)
response = {
'url': filename
}
return HttpResponse(json.dumps(response, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def save_cluster(request):
if request.method == 'POST':
data = request.data
features = np.array(data['model']['features'])
min_values = data['model']['min_values']
max_values = data['model']['max_values']
metadata = np.array(data['model']['metadata'])
indices = np.array(data['selected'])
audio_path, image_path, metadata_representative = file_utils.save_representative_call(
data['name'], features[indices], metadata[indices])
model = {
'name': data['name'],
'metadata': metadata_representative.tolist(),
'mean_values': np.mean(features[indices], axis=0).tolist(),
'std_values': np.std(features[indices], axis=0).tolist(),
'min_values': min_values,
'max_values': max_values,
'image_path': image_path,
'audio_path': audio_path
}
dir_utils.create_dir('clusters/model/')
with open('clusters/model/' + data['name'], 'w') as outfile:
json.dump(model, outfile)
return HttpResponse(json.dumps(model, separators=(',', ':')))
@api_view(['GET', 'POST'])
@parser_classes((JSONParser,))
def search_clusters(request):
if request.method == 'POST':
data = request.data
directory = data['dir']
files = data['files']
species = data['species']
features, segs, metadata = file_utils.process_files(
directory, files)
classification_utils = ClassificationUtils()
ex_level = 1
it_num = 5
data = np.hstack((features, metadata[:, 6].astype(float)[:, None]))
mad = 'binomial'
gad = '3pi'
num_datos, num_feat = data.shape
mean_class = 0.5 * np.ones((1, num_feat))
std_class = 0.25 * np.ones((1, num_feat))
min_values = np.empty((0, num_feat))
max_values = np.empty((0, num_feat))
for specie in species:
with open('clusters/model/' + specie, 'r') as infile:
model = json.load(infile)
mean_class = np.vstack(
(mean_class, np.array(model['mean_values'])))
std_class = np.vstack(
(std_class, np.array(model['std_values'])))
min_values = np.vstack(
(min_values, np.array(model['min_values'])))
max_values = np.vstack(
(max_values, np.array(model['max_values'])))
general_min_values = np.min(min_values, axis=0)
general_max_values = np.max(max_values, axis=0)
datanorm, mininums, maximums = classification_utils.norm(
data, general_min_values, general_max_values)
recon = classification_utils.predict_lamda(
ex_level, datanorm, mad, gad, mean_class, std_class)
representive_calls = file_utils.get_representative_calls(
recon, datanorm, metadata)
keys_results = [header['label'] for header in headers_data]
keys_clusters = [header['label'] for header in headers_clusters]
keys_clusters_no_display = [header['label']
for header in headers_clusters_no_display]
data_results = []
for i, value in enumerate(metadata):
species_name = species[recon[i] - 1] if recon[i] > 0 else 'NIC'
values = [value[0], species_name, *
(value[1:].tolist()), datanorm[i]]
zipbObj = zip(keys_results, values)
data_results.append(dict(zipbObj))
data_clusters = []
for i, value in enumerate(representive_calls):
value[0] = species[i - 1] if i > 0 else 'NIC'
zipbObj = zip(keys_clusters + keys_clusters_no_display, value)
data_clusters.append(dict(zipbObj))
response = {
'results': {
'headers': headers_data,
'data': data_results,
'model': {
'features': datanorm.tolist(),
'min_values': mininums.tolist(),
'max_values': maximums.tolist(),
'metadata': metadata.tolist()
}
},
'clusters': {
'headers': headers_clusters,
'data': data_clusters
}
}
return HttpResponse(json.dumps(response, separators=(',', ':')))
|
[
"numpy.mean",
"os.listdir",
"numpy.ones",
"numpy.std",
"json.dumps",
"numpy.max",
"numpy.array",
"numpy.empty",
"rest_framework.decorators.parser_classes",
"numpy.min",
"json.load",
"rest_framework.decorators.api_view",
"json.dump"
] |
[((626, 643), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (634, 643), False, 'from rest_framework.decorators import api_view\n'), ((645, 674), 'rest_framework.decorators.parser_classes', 'parser_classes', (['(JSONParser,)'], {}), '((JSONParser,))\n', (659, 674), False, 'from rest_framework.decorators import parser_classes\n'), ((1007, 1032), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (1015, 1032), False, 'from rest_framework.decorators import api_view\n'), ((1034, 1063), 'rest_framework.decorators.parser_classes', 'parser_classes', (['(JSONParser,)'], {}), '((JSONParser,))\n', (1048, 1063), False, 'from rest_framework.decorators import parser_classes\n'), ((3171, 3196), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (3179, 3196), False, 'from rest_framework.decorators import api_view\n'), ((3198, 3227), 'rest_framework.decorators.parser_classes', 'parser_classes', (['(JSONParser,)'], {}), '((JSONParser,))\n', (3212, 3227), False, 'from rest_framework.decorators import parser_classes\n'), ((3774, 3799), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (3782, 3799), False, 'from rest_framework.decorators import api_view\n'), ((3801, 3830), 'rest_framework.decorators.parser_classes', 'parser_classes', (['(JSONParser,)'], {}), '((JSONParser,))\n', (3815, 3830), False, 'from rest_framework.decorators import parser_classes\n'), ((4978, 5003), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (4986, 5003), False, 'from rest_framework.decorators import api_view\n'), ((5005, 5034), 'rest_framework.decorators.parser_classes', 'parser_classes', (['(JSONParser,)'], {}), '((JSONParser,))\n', (5019, 5034), False, 'from rest_framework.decorators import parser_classes\n'), ((715, 744), 'os.listdir', 'os.listdir', (['"""clusters/model/"""'], {}), "('clusters/model/')\n", (725, 744), False, 'import os\n'), ((955, 1002), 'json.dumps', 'json.dumps', (['species_data'], {'separators': "(',', ':')"}), "(species_data, separators=(',', ':'))\n", (965, 1002), False, 'import json\n'), ((3939, 3974), 'numpy.array', 'np.array', (["data['model']['features']"], {}), "(data['model']['features'])\n", (3947, 3974), True, 'import numpy as np\n'), ((4092, 4127), 'numpy.array', 'np.array', (["data['model']['metadata']"], {}), "(data['model']['metadata'])\n", (4100, 4127), True, 'import numpy as np\n'), ((4147, 4173), 'numpy.array', 'np.array', (["data['selected']"], {}), "(data['selected'])\n", (4155, 4173), True, 'import numpy as np\n'), ((5692, 5715), 'numpy.empty', 'np.empty', (['(0, num_feat)'], {}), '((0, num_feat))\n', (5700, 5715), True, 'import numpy as np\n'), ((5737, 5760), 'numpy.empty', 'np.empty', (['(0, num_feat)'], {}), '((0, num_feat))\n', (5745, 5760), True, 'import numpy as np\n'), ((6349, 6375), 'numpy.min', 'np.min', (['min_values'], {'axis': '(0)'}), '(min_values, axis=0)\n', (6355, 6375), True, 'import numpy as np\n'), ((6405, 6431), 'numpy.max', 'np.max', (['max_values'], {'axis': '(0)'}), '(max_values, axis=0)\n', (6411, 6431), True, 'import numpy as np\n'), ((875, 892), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (884, 892), False, 'import json\n'), ((3123, 3166), 'json.dumps', 'json.dumps', (['response'], {'separators': "(',', ':')"}), "(response, separators=(',', ':'))\n", (3133, 3166), False, 'import json\n'), ((3726, 3769), 'json.dumps', 'json.dumps', (['response'], {'separators': "(',', ':')"}), "(response, separators=(',', ':'))\n", (3736, 3769), False, 'import json\n'), ((4878, 4903), 'json.dump', 'json.dump', (['model', 'outfile'], {}), '(model, outfile)\n', (4887, 4903), False, 'import json\n'), ((4933, 4973), 'json.dumps', 'json.dumps', (['model'], {'separators': "(',', ':')"}), "(model, separators=(',', ':'))\n", (4943, 4973), False, 'import json\n'), ((5598, 5620), 'numpy.ones', 'np.ones', (['(1, num_feat)'], {}), '((1, num_feat))\n', (5605, 5620), True, 'import numpy as np\n'), ((5648, 5670), 'numpy.ones', 'np.ones', (['(1, num_feat)'], {}), '((1, num_feat))\n', (5655, 5670), True, 'import numpy as np\n'), ((8220, 8263), 'json.dumps', 'json.dumps', (['response'], {'separators': "(',', ':')"}), "(response, separators=(',', ':'))\n", (8230, 8263), False, 'import json\n'), ((5882, 5899), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (5891, 5899), False, 'import json\n'), ((4472, 4506), 'numpy.mean', 'np.mean', (['features[indices]'], {'axis': '(0)'}), '(features[indices], axis=0)\n', (4479, 4506), True, 'import numpy as np\n'), ((4543, 4576), 'numpy.std', 'np.std', (['features[indices]'], {'axis': '(0)'}), '(features[indices], axis=0)\n', (4549, 4576), True, 'import numpy as np\n'), ((5973, 6003), 'numpy.array', 'np.array', (["model['mean_values']"], {}), "(model['mean_values'])\n", (5981, 6003), True, 'import numpy as np\n'), ((6077, 6106), 'numpy.array', 'np.array', (["model['std_values']"], {}), "(model['std_values'])\n", (6085, 6106), True, 'import numpy as np\n'), ((6182, 6211), 'numpy.array', 'np.array', (["model['min_values']"], {}), "(model['min_values'])\n", (6190, 6211), True, 'import numpy as np\n'), ((6287, 6316), 'numpy.array', 'np.array', (["model['max_values']"], {}), "(model['max_values'])\n", (6295, 6316), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
#
# base.py
"""
Base functionality.
"""
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Based on cyberpandas
# https://github.com/ContinuumIO/cyberpandas
# Copyright (c) 2018, Anaconda, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# _isstringslice based on awkward-array
# https://github.com/scikit-hep/awkward-array
# Copyright (c) 2018-2019, <NAME>
# Licensed under the BSD 3-Clause License
#
# stdlib
from abc import abstractmethod
from numbers import Real
from typing import Dict, Iterable, List, Optional, Sequence, SupportsFloat, Tuple, Type, TypeVar, Union, overload
# 3rd party
import numpy # type: ignore
from domdf_python_tools.doctools import prettify_docstrings
from pandas.core.arrays import ExtensionArray # type: ignore
from pandas.core.dtypes.base import ExtensionDtype # type: ignore
from pandas.core.dtypes.generic import ABCExtensionArray # type: ignore
from typing_extensions import Literal, Protocol
__all__ = ["NumPyBackedExtensionArrayMixin"]
class NumPyBackedExtensionArrayMixin(ExtensionArray):
"""
Mixin for pandas extension backed by a numpy array.
"""
_dtype: Type[ExtensionDtype]
@property
def dtype(self):
"""
The dtype for this extension array, :class:`~.CelsiusType`.
"""
return self._dtype
@classmethod
def _from_sequence(cls, scalars: Iterable, dtype=None, copy: bool = False):
"""
Construct a new ExtensionArray from a sequence of scalars.
:param scalars: Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
:param dtype: Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
:type dtype: dtype, optional
:param copy: If True, copy the underlying data.
"""
return cls(scalars, dtype=dtype)
@classmethod
def _from_factorized(cls, values: numpy.ndarray, original: ExtensionArray):
"""
Reconstruct an ExtensionArray after factorization.
:param values: An integer ndarray with the factorized values.
:param original: The original ExtensionArray that factorize was called on.
.. seealso::
:meth:`pandas.pandas.api.extensions.ExtensionArray.factorize`
"""
return cls(values)
@property
def shape(self) -> Tuple[int]:
"""
Return a tuple of the array dimensions.
"""
return len(self.data),
def __len__(self) -> int:
"""
Returns the length of this array.
"""
return len(self.data)
def setitem(self, indexer, value):
"""
Set the 'value' inplace.
"""
# I think having a separate than __setitem__ is good
# since we have to return here, but __setitem__ doesn't.
self[indexer] = value
return self
@property
def nbytes(self) -> int:
"""
The number of bytes needed to store this object in memory.
"""
return self._itemsize * len(self)
def _formatting_values(self):
return numpy.array(self._format_values(), dtype="object")
def copy(self, deep: bool = False) -> ABCExtensionArray:
"""
Return a copy of the array.
:param deep:
:return:
:rtype:
"""
return type(self)(self.data.copy())
@classmethod
def _concat_same_type(cls, to_concat: Sequence[ABCExtensionArray]) -> ABCExtensionArray:
"""
Concatenate multiple arrays.
:param to_concat: sequence of this type
"""
return cls(numpy.concatenate([array.data for array in to_concat]))
def tolist(self) -> List:
"""
Convert the array to a Python list.
"""
return self.data.tolist()
def argsort(
self,
ascending: bool = True,
kind: Union[Literal["quicksort"], Literal["mergesort"], Literal["heapsort"]] = "quicksort",
*args,
**kwargs,
) -> numpy.ndarray:
r"""
Return the indices that would sort this array.
:param ascending: Whether the indices should result in an ascending
or descending sort.
:param kind: {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
\*args and \*\*kwargs are passed through to :func:`numpy.argsort`.
:return: Array of indices that sort ``self``. If NaN values are contained,
NaN values are placed at the end.
.. seealso::
:class:`numpy.argsort`: Sorting implementation used internally.
"""
return self.data.argsort()
def unique(self) -> ExtensionArray: # noqa: D102
# https://github.com/pandas-dev/pandas/pull/19869
_, indices = numpy.unique(self.data, return_index=True)
data = self.data.take(numpy.sort(indices))
return self._from_ndarray(data)
_A = TypeVar("_A")
class BaseArray(numpy.lib.mixins.NDArrayOperatorsMixin, NumPyBackedExtensionArrayMixin):
ndim: int = 1
data: numpy.ndarray
@classmethod
def _from_ndarray(cls: _A, data: numpy.ndarray, copy: bool = False) -> _A:
"""
Zero-copy construction of a BaseArray from an ndarray.
:param data: This should have CelsiusType._record_type dtype
:param copy: Whether to copy the data.
:return:
"""
if copy:
data = data.copy()
new = cls([]) # type: ignore
new.data = data
return new
@property
def na_value(self):
"""
The missing value.
**Example:**
.. code-block::
>>> BaseArray([]).na_value
numpy.nan
"""
return self.dtype.na_value
def take(self, indices, allow_fill: bool = False, fill_value=None):
# Can't use pandas' take yet
# 1. axis
# 2. I don't know how to do the reshaping correctly.
indices = numpy.asarray(indices, dtype="int")
if allow_fill and fill_value is None:
fill_value = self.na_value
elif allow_fill and not isinstance(fill_value, tuple):
if not numpy.isnan(fill_value):
fill_value = int(fill_value)
if allow_fill:
mask = (indices == -1)
if not len(self):
if not (indices == -1).all():
msg = "Invalid take for empty array. Must be all -1."
raise IndexError(msg)
else:
# all NA take from and empty array
took = (
numpy.full(
(len(indices), 2),
fill_value,
dtype=">u8",
).reshape(-1).astype(self.dtype._record_type)
)
return self._from_ndarray(took)
if (indices < -1).any():
msg = "Invalid value in 'indicies'. Must be all >= -1 for 'allow_fill=True'"
raise ValueError(msg)
took = self.data.take(indices)
if allow_fill:
took[mask] = fill_value
return self._from_ndarray(took)
def __repr__(self) -> str:
formatted = self._format_values()
return f"{self.__class__.__name__}({formatted!r})"
def isna(self):
"""
Indicator for whether each element is missing.
"""
if numpy.isnan(self.na_value):
return numpy.isnan(self.data)
else:
return self.data == self.na_value
# From https://github.com/scikit-hep/awkward-array/blob/2bbdb68d7a4fff2eeaed81eb76195e59232e8c13/awkward/array/base.py#L611
def _isstringslice(self, where):
if isinstance(where, str):
return True
elif isinstance(where, bytes):
raise TypeError("column selection must be str, not bytes, in Python 3")
elif isinstance(where, tuple):
return False
elif (
isinstance(where, (numpy.ndarray, self.__class__))
and issubclass(where.dtype.type, (numpy.str, numpy.str_))
):
return True
elif isinstance(where, (numpy.ndarray, self.__class__)) and issubclass(
where.dtype.type, (numpy.object, numpy.object_)
) and not issubclass(where.dtype.type, (numpy.bool, numpy.bool_)):
return len(where) > 0 and all(isinstance(x, str) for x in where)
elif isinstance(where, (numpy.ndarray, self.__class__)):
return False
try:
assert len(where) > 0
assert all(isinstance(x, str) for x in where)
except (TypeError, AssertionError):
return False
else:
return True
def __delitem__(self, where):
if isinstance(where, str):
del self.data[where]
elif self._isstringslice(where):
for x in where:
del self.data[x]
else:
raise TypeError(f"invalid index for removing column from Table: {where}")
@property
@abstractmethod
def _parser(self):
raise NotImplementedError
def append(self, value) -> None:
"""
Append a value to this BaseArray.
:param value:
"""
self.data = numpy.append(self.data, self._parser(value).data)
def __setitem__(self, key, value):
value = self._parser(value).data
self.data[key] = value
class _SupportsIndex(Protocol):
def __index__(self) -> int:
...
_F = TypeVar("_F", bound="UserFloat")
@prettify_docstrings
class UserFloat(Real):
"""
Class that simulates a float.
:param value: Values to initialise the :class:`~domdf_python_tools.bases.UserFloat` with.
.. versionadded:: 1.6.0
"""
def __init__(self, value: Union[SupportsFloat, _SupportsIndex, str, bytes, bytearray] = 0.0):
self._value = (float(value), )
def as_integer_ratio(self) -> Tuple[int, int]:
return float(self).as_integer_ratio()
def hex(self) -> str: # noqa: A003 # pylint: disable=redefined-builtin
return float(self).hex()
def is_integer(self) -> bool:
return float(self).is_integer()
@classmethod
def fromhex(cls: Type[_F], __s: str) -> _F:
return cls(float.fromhex(__s))
def __add__(self: _F, other: float) -> _F:
return self.__class__(float(self).__add__(other))
def __sub__(self: _F, other: float) -> _F:
return self.__class__(float(self).__sub__(other))
def __mul__(self: _F, other: float) -> _F:
return self.__class__(float(self).__mul__(other))
def __floordiv__(self: _F, other: float) -> _F: # type: ignore
return self.__class__(float(self).__floordiv__(other))
def __truediv__(self: _F, other: float) -> _F:
return self.__class__(float(self).__truediv__(other))
def __mod__(self: _F, other: float) -> _F:
return self.__class__(float(self).__mod__(other))
def __divmod__(self: _F, other: float) -> Tuple[_F, _F]:
return tuple(self.__class__(x) for x in float(self).__divmod__(other)) # type: ignore
def __pow__(self: _F, other: float, mod=None) -> _F:
return self.__class__(float(self).__pow__(other, mod))
def __radd__(self: _F, other: float) -> _F:
return self.__class__(float(self).__radd__(other))
def __rsub__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rsub__(other))
def __rmul__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rmul__(other))
def __rfloordiv__(self: _F, other: float) -> _F: # type: ignore
return self.__class__(float(self).__rfloordiv__(other))
def __rtruediv__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rtruediv__(other))
def __rmod__(self: _F, other: float) -> _F:
return self.__class__(float(self).__rmod__(other))
def __rdivmod__(self: _F, other: float) -> Tuple[_F, _F]:
return tuple(self.__class__(x) for x in float(self).__rdivmod__(other)) # type: ignore
def __rpow__(self: _F, other: float, mod=None) -> _F:
return self.__class__(float(self).__rpow__(other, mod))
def __getnewargs__(self) -> Tuple[float]:
return self._value
def __trunc__(self) -> int:
return float(self).__trunc__()
@overload
def __round__(self, ndigits: int) -> float:
...
@overload
def __round__(self, ndigits: None = ...) -> int:
...
def __round__(self, ndigits: Optional[int] = None) -> Union[int, float]:
return float(self).__round__(ndigits)
def __eq__(self, other: object) -> bool:
if isinstance(other, UserFloat):
return self._value == other._value
else:
return float(self).__eq__(other)
def __ne__(self, other: object) -> bool:
if isinstance(other, UserFloat):
return self._value != other._value
else:
return float(self).__ne__(other)
def __lt__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value < other._value
else:
return float(self).__lt__(other)
def __le__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value <= other._value
else:
return float(self).__le__(other)
def __gt__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value > other._value
else:
return float(self).__gt__(other)
def __ge__(self, other: float) -> bool:
if isinstance(other, UserFloat):
return self._value >= other._value
else:
return float(self).__ge__(other)
def __neg__(self: _F) -> _F:
return self.__class__(float(self).__neg__())
def __pos__(self: _F) -> _F:
return self.__class__(float(self).__pos__())
def __str__(self) -> str:
return str(float(self))
def __int__(self) -> int:
return int(float(self))
def __float__(self) -> float:
return self._value[0]
def __abs__(self: _F) -> _F:
return self.__class__(float(self).__abs__())
def __hash__(self) -> int:
return float(self).__hash__()
def __repr__(self) -> str:
return str(self)
def __ceil__(self):
raise NotImplementedError
def __floor__(self):
raise NotImplementedError
|
[
"numpy.unique",
"numpy.sort",
"numpy.asarray",
"numpy.isnan",
"numpy.concatenate",
"typing.TypeVar"
] |
[((5872, 5885), 'typing.TypeVar', 'TypeVar', (['"""_A"""'], {}), "('_A')\n", (5879, 5885), False, 'from typing import Dict, Iterable, List, Optional, Sequence, SupportsFloat, Tuple, Type, TypeVar, Union, overload\n'), ((9643, 9675), 'typing.TypeVar', 'TypeVar', (['"""_F"""'], {'bound': '"""UserFloat"""'}), "('_F', bound='UserFloat')\n", (9650, 9675), False, 'from typing import Dict, Iterable, List, Optional, Sequence, SupportsFloat, Tuple, Type, TypeVar, Union, overload\n'), ((5743, 5785), 'numpy.unique', 'numpy.unique', (['self.data'], {'return_index': '(True)'}), '(self.data, return_index=True)\n', (5755, 5785), False, 'import numpy\n'), ((6745, 6780), 'numpy.asarray', 'numpy.asarray', (['indices'], {'dtype': '"""int"""'}), "(indices, dtype='int')\n", (6758, 6780), False, 'import numpy\n'), ((7868, 7894), 'numpy.isnan', 'numpy.isnan', (['self.na_value'], {}), '(self.na_value)\n', (7879, 7894), False, 'import numpy\n'), ((4728, 4782), 'numpy.concatenate', 'numpy.concatenate', (['[array.data for array in to_concat]'], {}), '([array.data for array in to_concat])\n', (4745, 4782), False, 'import numpy\n'), ((5810, 5829), 'numpy.sort', 'numpy.sort', (['indices'], {}), '(indices)\n', (5820, 5829), False, 'import numpy\n'), ((7906, 7928), 'numpy.isnan', 'numpy.isnan', (['self.data'], {}), '(self.data)\n', (7917, 7928), False, 'import numpy\n'), ((6919, 6942), 'numpy.isnan', 'numpy.isnan', (['fill_value'], {}), '(fill_value)\n', (6930, 6942), False, 'import numpy\n')]
|
"""
view predication for point cloud,
Run valid_one_point_cloud first
"""
import torch
import numpy as np
import sys
import os
import pptk
# ------ Configurations ------
# path to pth file
pth_file = "../tmp/scene0015_00_vh_clean_2.pth.Random.100"
show_gt = False # show groundtruth or not; groudtruth draw first, i.e., on back
# --- end of configurations ---
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf',
'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink',
'bathtub', 'otherfurniture']
# CLASS_COLOR = [
# [138, 43, 226], [0, 128, 128], [0, 255, 0], [0, 0, 255], [255, 255, 0],
# [0, 255, 255], [255, 0, 255], [192, 192, 192], [128, 128, 128], [128, 0, 0],
# [128, 128, 0], [0, 128, 0], [128, 0, 128], [255, 0, 0], [0, 0, 128],
# [34, 139, 34], [64, 224, 208], [0, 0, 0], [75, 0, 130], [205, 133, 63]
# ]
SCANNET_COLOR_MAP = SCANNET_COLOR_MAP = {
0: (0., 0., 0.),
1: (174., 199., 232.),
2: (152., 223., 138.),
3: (31., 119., 180.),
4: (255., 187., 120.),
5: (188., 189., 34.),
6: (140., 86., 75.),
7: (255., 152., 150.),
8: (214., 39., 40.),
9: (197., 176., 213.),
10: (148., 103., 189.),
11: (196., 156., 148.),
12: (23., 190., 207.),
14: (247., 182., 210.),
15: (66., 188., 102.),
16: (219., 219., 141.),
17: (140., 57., 197.),
18: (202., 185., 52.),
19: (51., 176., 203.),
20: (200., 54., 131.),
21: (92., 193., 61.),
22: (78., 71., 183.),
23: (172., 114., 82.),
24: (255., 127., 14.),
25: (91., 163., 138.),
26: (153., 98., 156.),
27: (140., 153., 101.),
28: (158., 218., 229.),
29: (100., 125., 154.),
30: (178., 127., 135.),
32: (146., 111., 194.),
33: (44., 160., 44.),
34: (112., 128., 144.),
35: (96., 207., 209.),
36: (227., 119., 194.),
37: (213., 92., 176.),
38: (94., 106., 211.),
39: (82., 84., 163.),
40: (100., 85., 144.),
}
VALID_CLASS_IDS = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39
]
CLASS_COLOR = []
for valid_id in VALID_CLASS_IDS:
CLASS_COLOR.append(SCANNET_COLOR_MAP[valid_id])
CLASS_COLOR = np.array(CLASS_COLOR) / 255.0
def show_predication_result(pth_file, show_gt):
data = torch.load(pth_file)
coords, colors, labels, pred = data
ignore_index = labels == -100
coords = coords[~ignore_index]
colors = colors[~ignore_index]
labels = labels[~ignore_index]
pred = pred[~ignore_index]
gt_color = [CLASS_COLOR[x] for x in labels.astype("int32")]
pred_color = [CLASS_COLOR[x] for x in pred.astype("int32")]
if show_gt:
v1 = pptk.viewer(coords, gt_color)
v1.set(point_size=0.01, bg_color=[1, 1, 1, 1], floor_color=[1, 1, 1, 1], show_grid=False, show_axis=False, show_info=False)
v1.set(theta=1.8, lookat=[0, 0, 0], phi=0.52)
v2 = pptk.viewer(coords, pred_color)
v2.set(point_size=0.01, bg_color=[1, 1, 1, 1], floor_color=[1, 1, 1, 1], show_grid=False, show_axis=False, show_info=False)
v2.set(theta=1.8, lookat=[0, 0, 0], phi=0.52)
if __name__ == "__main__":
show_predication_result(pth_file, show_gt)
|
[
"numpy.array",
"torch.load",
"pptk.viewer"
] |
[((2278, 2299), 'numpy.array', 'np.array', (['CLASS_COLOR'], {}), '(CLASS_COLOR)\n', (2286, 2299), True, 'import numpy as np\n'), ((2370, 2390), 'torch.load', 'torch.load', (['pth_file'], {}), '(pth_file)\n', (2380, 2390), False, 'import torch\n'), ((2987, 3018), 'pptk.viewer', 'pptk.viewer', (['coords', 'pred_color'], {}), '(coords, pred_color)\n', (2998, 3018), False, 'import pptk\n'), ((2761, 2790), 'pptk.viewer', 'pptk.viewer', (['coords', 'gt_color'], {}), '(coords, gt_color)\n', (2772, 2790), False, 'import pptk\n')]
|
import datetime
from pymongo import MongoClient
import pymongo
import pprint
try:
db = MongoClient("mongodb://localhost:27017")["hkust"]
f=0.05
try:
print("Querying Documents...")
listOfCourseWithWaitingListSize = db.course.aggregate([
{ "$unwind": "$sections" },
# { "$project": { "newProduct": {"$multiply": [f, "$sections.enrol"]}, "satisfied": satisfied} },
# { "$project": { "compareResult": {"$gte": ["$sections.wait", "$newProduct"]}, "match_ts" : "$sections.recordTime"} },
{"$match": #filter timeslot
{"$and":[
# {"compareResult": "true"},
# {"satisfied" : "Yes"},
#{"sections.sectionId": {"$ne": null}},
#{"sections.sectionId": {"$exists": true}},
# {"sections.sectionId": {"$regex": '^L'}},
{"sections.recordTime": {"$gte": datetime.datetime.strptime("2018-01-26T14:00Z", "%Y-%m-%dT%H:%MZ")}},
{"sections.recordTime": {"$lte": datetime.datetime.strptime("2018-02-01T11:30Z", "%Y-%m-%dT%H:%MZ")}}
]
}
},
{ "$project":
{"code": 1,
"title": 1,
"credits": 1,
"sections":1,
# "description":1,
"satisfied":{"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]},
"lecSatisfied":{
"$cond":[{
"$and":[
{
"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]
},
{
"$eq":[{"$substr": ["$sections.sectionId",0,1]},"L"]
}
]
},1,0]
}
},
},
{
"$sort": {"sections.sectionId": 1 }
},
{
"$group":{
"_id":{ "code": "$code", "recordTime":"$sections.recordTime"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$sections.recordTime"},
"sections":{
"$push": {
"sectionId":"$sections.sectionId",
"dateAndTime":"$sections.offerings.dateAndTime",
"quota":"$sections.quota",
"enrol":"$sections.enrol",
"avail": { "$subtract": [ "$sections.quota", "$sections.enrol"] } ,
"wait":"$sections.wait",
"satisfied":"$satisfied",
}
},
"lecSatisfiedCount":{"$sum":"$lecSatisfied"}
}
},
{ "$match": {"lecSatisfiedCount": {"$gt":0}}
},
{
"$sort": {"recordTime": 1 }
},
{
"$group":{
"_id":{ "code": "$code"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$recordTime"},
"sections":{"$last": "$sections"},
"lecSatisfiedCount":{"$last": "$lecSatisfiedCount"}
}
},
{
"$project":{
"_id":0,
"code": 1,
"title":1,
"credits": 1,
"recordTime":1,
"sections":1
}
}
]
)
# pprint.pprint(listOfCourseWithWaitingListSize)
recordNo = 0
for oneCourse in listOfCourseWithWaitingListSize:
recordNo = recordNo + 1
print("Record {:d}:".format(recordNo))
pprint.pprint(oneCourse)
# print("code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}".format(oneCourse["code"], oneCourse["title"], oneCourse["credits"],oneCourse["sections"][0]["quota"],oneCourse["sections"][0]["enrol"],oneCourse["sections"][0]["avail"],oneCourse["sections"][0]["wait"]))
# for oneSection in oneCourse["sections"]:
# print("sections: {:s}, Date & Time: {:s}".format(oneSection["sectionId"],' '.join(oneSection["dateAndTime"])))
# print("description: {:s}".format(oneCourse["description"]))
#pprint(" Record {:d}: (sid={:s}, sname={:s}, byear={:d})".format(recordNo, oneStudent["sid"], oneStudent["sname"], oneStudent["byear"]))
#print("Record {:d}: (course={:s})".format(recordNo, oneCourse))
except pymongo.errors.ConnectionFailure as error:
print("Document Querying Failed! Error Message: \"{}\"".format(error))
#return outputCourseDetails(courseCode, lectureSection, satisfied)
except pymongo.errors.ConnectionFailure as error:
print("Document Insertion Failed! Error Message: \"{}\"".format(error))
import numpy
import time
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
#Model 1
def trainModel(trainingDataFilename):
# to set a seed of a random number generator used in the "optimization" tool in the neural network model
numpy.random.seed(time.time())
# Step 1: to load the data
# Step 1a: to read the dataset with "numpy" function
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
# Step 1b: to split the dataset into two datasets, namely the input attribute dataset (X) and the target attribute dataset (Y)
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='relu'))
model.add(Dense(7, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
# Step 5: To evaluate the model
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 2:
def trainModel2(trainingDataFilename):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(10, input_dim=4, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 3:
def trainModel3(trainingDataFilename):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(64, input_dim=4, activation='softmax'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 4:
def trainModel4(trainingDataFilename):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFilename, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='softmax'))
model.add(Dense(7, activation='softmax'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss='logcosh', optimizer='rmsprop', metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.3, epochs=300, batch_size=7)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
# model 5:
def trainModel5(trainingDataFilename):
def trainModel5_beforeAddDrop(trainingDataFile_beforeAddDrop):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFile_beforeAddDrop, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='relu'))
model.add(Dense(7, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
def trainModel5_afterAddDrop(trainingDataFile_afterAddDrop):
numpy.random.seed(time.time())
dataset = numpy.loadtxt(trainingDataFile_afterAddDrop, delimiter=",")
X = dataset[:,0:4]
Y = dataset[:,4]
# Step 2: to define the model
model = Sequential()
model.add(Dense(13, input_dim=4, activation='relu'))
model.add(Dense(7, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Step 3: to compile the model
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Step 4: To fit the model
model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10)
scores = model.evaluate(X, Y)
print("Evaluation: ")
print("{}: {}".format(model.metrics_names[1], scores[1]*100))
return model
|
[
"datetime.datetime.strptime",
"keras.models.Sequential",
"keras.layers.Dense",
"pymongo.MongoClient",
"numpy.loadtxt",
"time.time",
"pprint.pprint"
] |
[((4443, 4493), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFilename'], {'delimiter': '""","""'}), "(trainingDataFilename, delimiter=',')\n", (4456, 4493), False, 'import numpy\n'), ((4702, 4714), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4712, 4714), False, 'from keras.models import Sequential\n'), ((5321, 5371), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFilename'], {'delimiter': '""","""'}), "(trainingDataFilename, delimiter=',')\n", (5334, 5371), False, 'import numpy\n'), ((5450, 5462), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5460, 5462), False, 'from keras.models import Sequential\n'), ((6199, 6249), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFilename'], {'delimiter': '""","""'}), "(trainingDataFilename, delimiter=',')\n", (6212, 6249), False, 'import numpy\n'), ((6328, 6340), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6338, 6340), False, 'from keras.models import Sequential\n'), ((6882, 6932), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFilename'], {'delimiter': '""","""'}), "(trainingDataFilename, delimiter=',')\n", (6895, 6932), False, 'import numpy\n'), ((7011, 7023), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7021, 7023), False, 'from keras.models import Sequential\n'), ((88, 128), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://localhost:27017"""'], {}), "('mongodb://localhost:27017')\n", (99, 128), False, 'from pymongo import MongoClient\n'), ((4335, 4346), 'time.time', 'time.time', ([], {}), '()\n', (4344, 4346), False, 'import time\n'), ((4726, 4767), 'keras.layers.Dense', 'Dense', (['(13)'], {'input_dim': '(4)', 'activation': '"""relu"""'}), "(13, input_dim=4, activation='relu')\n", (4731, 4767), False, 'from keras.layers import Dense\n'), ((4780, 4807), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""relu"""'}), "(7, activation='relu')\n", (4785, 4807), False, 'from keras.layers import Dense\n'), ((4820, 4850), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4825, 4850), False, 'from keras.layers import Dense\n'), ((5297, 5308), 'time.time', 'time.time', ([], {}), '()\n', (5306, 5308), False, 'import time\n'), ((5474, 5515), 'keras.layers.Dense', 'Dense', (['(10)'], {'input_dim': '(4)', 'activation': '"""relu"""'}), "(10, input_dim=4, activation='relu')\n", (5479, 5515), False, 'from keras.layers import Dense\n'), ((5528, 5556), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5533, 5556), False, 'from keras.layers import Dense\n'), ((5569, 5597), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5574, 5597), False, 'from keras.layers import Dense\n'), ((5610, 5638), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5615, 5638), False, 'from keras.layers import Dense\n'), ((5651, 5679), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5656, 5679), False, 'from keras.layers import Dense\n'), ((5692, 5720), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5697, 5720), False, 'from keras.layers import Dense\n'), ((5733, 5763), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (5738, 5763), False, 'from keras.layers import Dense\n'), ((6175, 6186), 'time.time', 'time.time', ([], {}), '()\n', (6184, 6186), False, 'import time\n'), ((6352, 6396), 'keras.layers.Dense', 'Dense', (['(64)'], {'input_dim': '(4)', 'activation': '"""softmax"""'}), "(64, input_dim=4, activation='softmax')\n", (6357, 6396), False, 'from keras.layers import Dense\n'), ((6409, 6439), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (6414, 6439), False, 'from keras.layers import Dense\n'), ((6858, 6869), 'time.time', 'time.time', ([], {}), '()\n', (6867, 6869), False, 'import time\n'), ((7035, 7079), 'keras.layers.Dense', 'Dense', (['(13)'], {'input_dim': '(4)', 'activation': '"""softmax"""'}), "(13, input_dim=4, activation='softmax')\n", (7040, 7079), False, 'from keras.layers import Dense\n'), ((7092, 7122), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""softmax"""'}), "(7, activation='softmax')\n", (7097, 7122), False, 'from keras.layers import Dense\n'), ((7135, 7165), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (7140, 7165), False, 'from keras.layers import Dense\n'), ((7656, 7716), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFile_beforeAddDrop'], {'delimiter': '""","""'}), "(trainingDataFile_beforeAddDrop, delimiter=',')\n", (7669, 7716), False, 'import numpy\n'), ((7799, 7811), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7809, 7811), False, 'from keras.models import Sequential\n'), ((8407, 8466), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFile_afterAddDrop'], {'delimiter': '""","""'}), "(trainingDataFile_afterAddDrop, delimiter=',')\n", (8420, 8466), False, 'import numpy\n'), ((8549, 8561), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (8559, 8561), False, 'from keras.models import Sequential\n'), ((2914, 2938), 'pprint.pprint', 'pprint.pprint', (['oneCourse'], {}), '(oneCourse)\n', (2927, 2938), False, 'import pprint\n'), ((7631, 7642), 'time.time', 'time.time', ([], {}), '()\n', (7640, 7642), False, 'import time\n'), ((7824, 7865), 'keras.layers.Dense', 'Dense', (['(13)'], {'input_dim': '(4)', 'activation': '"""relu"""'}), "(13, input_dim=4, activation='relu')\n", (7829, 7865), False, 'from keras.layers import Dense\n'), ((7879, 7906), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""relu"""'}), "(7, activation='relu')\n", (7884, 7906), False, 'from keras.layers import Dense\n'), ((7920, 7950), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (7925, 7950), False, 'from keras.layers import Dense\n'), ((8382, 8393), 'time.time', 'time.time', ([], {}), '()\n', (8391, 8393), False, 'import time\n'), ((8574, 8615), 'keras.layers.Dense', 'Dense', (['(13)'], {'input_dim': '(4)', 'activation': '"""relu"""'}), "(13, input_dim=4, activation='relu')\n", (8579, 8615), False, 'from keras.layers import Dense\n'), ((8629, 8656), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""relu"""'}), "(7, activation='relu')\n", (8634, 8656), False, 'from keras.layers import Dense\n'), ((8670, 8700), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (8675, 8700), False, 'from keras.layers import Dense\n'), ((792, 858), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2018-01-26T14:00Z"""', '"""%Y-%m-%dT%H:%MZ"""'], {}), "('2018-01-26T14:00Z', '%Y-%m-%dT%H:%MZ')\n", (818, 858), False, 'import datetime\n'), ((900, 966), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2018-02-01T11:30Z"""', '"""%Y-%m-%dT%H:%MZ"""'], {}), "('2018-02-01T11:30Z', '%Y-%m-%dT%H:%MZ')\n", (926, 966), False, 'import datetime\n')]
|
# encoding: utf-8
from __future__ import print_function
import os
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import Formatter
from jaqs.trade.analyze.report import Report
from jaqs.data import RemoteDataService
from jaqs.data.basic.instrument import InstManager
from jaqs.trade import common
import jaqs.util as jutil
STATIC_FOLDER = jutil.join_relative_path("trade/analyze/static")
TO_PCT = 100.0
MPL_RCPARAMS = {'figure.facecolor': '#F6F6F6',
'axes.facecolor': '#F6F6F6',
'axes.edgecolor': '#D3D3D3',
'text.color': '#555555',
'grid.color': '#B1B1B1',
'grid.alpha': 0.3,
# scale
'axes.linewidth': 2.0,
'axes.titlepad': 12,
'grid.linewidth': 1.0,
'grid.linestyle': '-',
# font size
'font.size': 13,
'axes.titlesize': 18,
'axes.labelsize': 14,
'legend.fontsize': 'small',
'lines.linewidth': 2.5,
}
class TradeRecordEmptyError(Exception):
def __init__(self, *args):
super(TradeRecordEmptyError, self).__init__(*args)
class MyFormatter(Formatter):
def __init__(self, dates, fmt='%Y%m'):
self.dates = dates
self.fmt = fmt
def __call__(self, x, pos=0):
"""Return the label for time x at position pos"""
ind = int(np.round(x))
if ind >= len(self.dates) or ind < 0:
return ''
# return self.dates[ind].strftime(self.fmt)
return pd.to_datetime(self.dates[ind], format="%Y%m%d").strftime(self.fmt)
class BaseAnalyzer(object):
"""
Attributes
----------
_trades : pd.DataFrame
_configs : dict
data_api : BaseDataServer
_universe : set
All securities that have been traded.
"""
def __init__(self):
self.file_folder = ""
self._trades = None
self._configs = None
self.data_api = None
self.dataview = None
self._universe = []
self._closes = None
self._closes_adj = None
self.daily_position = None
self.adjust_mode = None
self.inst_map = dict()
self.performance_metrics = dict()
self.risk_metrics = dict()
self.report_dic = dict()
@property
def trades(self):
"""Read-only attribute"""
return self._trades
@property
def universe(self):
"""Read-only attribute"""
return self._universe
@property
def configs(self):
"""Read-only attribute"""
return self._configs
@property
def closes(self):
"""Read-only attribute, close prices of securities in the universe"""
return self._closes
@property
def closes_adj(self):
"""Read-only attribute, close prices of securities in the universe"""
return self._closes_adj
def initialize(self, data_api=None, dataview=None, file_folder='.'):
"""
Read trades from csv file to DataFrame of given data type.
Parameters
----------
data_api : RemoteDataService
dataview : DataView
file_folder : str
Directory path where trades and configs are stored.
"""
self.data_api = data_api
self.dataview = dataview
type_map = {'task_id': str,
'entrust_no': str,
'entrust_action': str,
'symbol': str,
'fill_price': float,
'fill_size': float,
'fill_date': np.integer,
'fill_time': np.integer,
'fill_no': str,
'commission': float}
abs_path = os.path.abspath(file_folder)
self.file_folder = abs_path
trades = pd.read_csv(os.path.join(self.file_folder, 'trades.csv'), ',', dtype=type_map)
if trades.empty:
raise TradeRecordEmptyError("No trade records found in your 'trades.csv' file. Analysis stopped.")
self._init_universe(trades.loc[:, 'symbol'].values)
self._init_configs(self.file_folder)
self._init_trades(trades)
self._init_symbol_price()
self._init_inst_data()
def _init_inst_data(self):
symbol_str = ','.join(self.universe)
if self.dataview is not None:
data_inst = self.dataview.data_inst
self.inst_map = data_inst.to_dict(orient='index')
elif self.data_api is not None:
inst_mgr = InstManager(data_api=self.data_api, symbol=symbol_str)
self.inst_map = {k: v.__dict__ for k, v in inst_mgr.inst_map.items()}
del inst_mgr
else:
raise ValueError("no dataview or dataapi provided.")
def _init_trades(self, df):
"""Add datetime column. """
df.loc[:, 'fill_dt'] = jutil.combine_date_time(df.loc[:, 'fill_date'], df.loc[:, 'fill_time'])
df = df.set_index(['symbol', 'fill_dt']).sort_index(axis=0)
# self._trades = jutil.group_df_to_dict(df, by='symbol')
self._trades = df
def _init_symbol_price(self):
"""Get close price of securities in the universe from data server."""
if self.dataview is not None:
df_close = self.dataview.get_ts('close', start_date=self.start_date, end_date=self.end_date)
df_close_adj = self.dataview.get_ts('close_adj', start_date=self.start_date, end_date=self.end_date)
else:
df, msg = self.data_api.daily(symbol=','.join(self.universe), fields='trade_date,symbol,close',
start_date=self.start_date, end_date=self.end_date)
if msg != '0,':
print(msg)
df_close = df.pivot(index='trade_date', columns='symbol', values='close')
df_adj, msg = self.data_api.daily(symbol=','.join(self.universe), fields='trade_date,symbol,close',
start_date=self.start_date, end_date=self.end_date)
if msg != '0,':
print(msg)
df_close_adj = df_adj.pivot(index='trade_date', columns='symbol', values='close')
self._closes = df_close
self._closes_adj = df_close_adj
def _init_universe(self, securities):
"""Return a set of securities."""
self._universe = set(securities)
def _init_configs(self, folder):
import codecs
with codecs.open(os.path.join(folder, 'configs.json'), 'r', encoding='utf-8') as f:
configs = json.load(f)
self._configs = configs
self.init_balance = self.configs['init_balance']
self.start_date = self.configs['start_date']
self.end_date = self.configs['end_date']
@staticmethod
def _process_trades(df):
"""Add various statistics to trades DataFrame."""
from jaqs.trade import common
# df = df.set_index('fill_date')
# pre-process
cols_to_drop = ['task_id', 'entrust_no', 'fill_no']
df = df.drop(cols_to_drop, axis=1)
def _apply(gp_df):
# calculation of non-cumulative fields
direction = gp_df['entrust_action'].apply(lambda s: 1 if common.ORDER_ACTION.is_positive(s) else -1)
fill_size, fill_price = gp_df['fill_size'], gp_df['fill_price']
turnover = fill_size * fill_price
gp_df.loc[:, 'BuyVolume'] = (direction + 1) / 2 * fill_size
gp_df.loc[:, 'SellVolume'] = (direction - 1) / -2 * fill_size
# Calculation of cumulative fields
gp_df.loc[:, 'CumVolume'] = fill_size.cumsum()
gp_df.loc[:, 'CumTurnOver'] = turnover.cumsum()
gp_df.loc[:, 'CumNetTurnOver'] = (turnover * -direction).cumsum()
gp_df.loc[:, 'position'] = (fill_size * direction).cumsum()
gp_df.loc[:, 'AvgPosPrice'] = calc_avg_pos_price(gp_df.loc[:, 'position'].values, fill_price.values)
gp_df.loc[:, 'CumProfit'] = (gp_df.loc[:, 'CumNetTurnOver'] + gp_df.loc[:, 'position'] * fill_price)
return gp_df
gp = df.groupby(by='symbol')
res = gp.apply(_apply)
return res
def process_trades(self):
# self._trades = {k: self._process_trades(v) for k, v in self.trades.items()}
self._trades = self._process_trades(self._trades)
def get_pos_change_info(self):
trades = pd.concat(self.trades.values(), axis=0)
gp = trades.groupby(by=['fill_date'], as_index=False)
res = OrderedDict()
account = OrderedDict()
for date, df in gp:
df_mod = df.loc[:, ['symbol', 'entrust_action', 'fill_size', 'fill_price',
'position', 'AvgPosPrice']]
df_mod.columns = ['symbol', 'action', 'size', 'price',
'position', 'cost price']
res[str(date)] = df_mod
mv = sum(df_mod.loc[:, 'price'] * df.loc[:, 'position'])
current_profit = sum(df.loc[:, 'CumProfit'])
cash = self.configs['init_balance'] + current_profit - mv
account[str(date)] = {'market_value': mv, 'cash': cash}
self.position_change = res
self.account = account
def get_daily(self):
close = self.closes
trade = self.trades
# pro-process
trade_cols = ['fill_date', 'BuyVolume', 'SellVolume', 'commission', 'position', 'AvgPosPrice', 'CumNetTurnOver']
trade = trade.loc[:, trade_cols]
gp = trade.groupby(by=['symbol', 'fill_date'])
func_last = lambda ser: ser.iat[-1]
trade = gp.agg({'BuyVolume': np.sum, 'SellVolume': np.sum, 'commission': np.sum,
'position': func_last, 'AvgPosPrice': func_last, 'CumNetTurnOver': func_last})
trade.index.names = ['symbol', 'trade_date']
# get daily position
df_position = trade['position'].unstack('symbol').fillna(method='ffill').fillna(0.0)
daily_position = df_position.reindex(close.index)
daily_position = daily_position.fillna(method='ffill').fillna(0)
self.daily_position = daily_position
# calculate statistics
close = pd.DataFrame(close.T.stack())
close.columns = ['close']
close.index.names = ['symbol', 'trade_date']
merge = pd.concat([close, trade], axis=1, join='outer')
def _apply(gp_df):
cols_nan_to_zero = ['BuyVolume', 'SellVolume', 'commission']
cols_nan_fill = ['close', 'position', 'AvgPosPrice', 'CumNetTurnOver']
# merge: pd.DataFrame
gp_df.loc[:, cols_nan_fill] = gp_df.loc[:, cols_nan_fill].fillna(method='ffill')
gp_df.loc[:, cols_nan_fill] = gp_df.loc[:, cols_nan_fill].fillna(0)
gp_df.loc[:, cols_nan_to_zero] = gp_df.loc[:, cols_nan_to_zero].fillna(0)
mask = gp_df.loc[:, 'AvgPosPrice'] < 1e-5
gp_df.loc[mask, 'AvgPosPrice'] = gp_df.loc[mask, 'close']
gp_df.loc[:, 'CumProfit'] = gp_df.loc[:, 'CumNetTurnOver'] + gp_df.loc[:, 'position'] * gp_df.loc[:, 'close']
gp_df.loc[:, 'CumProfitComm'] = gp_df['CumProfit'] - gp_df['commission'].cumsum()
daily_net_turnover = gp_df['CumNetTurnOver'].diff(1).fillna(gp_df['CumNetTurnOver'].iat[0])
daily_position_change = gp_df['position'].diff(1).fillna(gp_df['position'].iat[0])
gp_df['trading_pnl'] = (daily_net_turnover + gp_df['close'] * daily_position_change)
gp_df['holding_pnl'] = (gp_df['close'].diff(1) * gp_df['position'].shift(1)).fillna(0.0)
gp_df.loc[:, 'total_pnl'] = gp_df['trading_pnl'] + gp_df['holding_pnl']
return gp_df
gp = merge.groupby(by='symbol')
res = gp.apply(_apply)
self.daily = res
'''
def get_daily(self):
"""Add various statistics to daily DataFrame."""
self.daily = self._get_daily(self.closes, self.trades)
daily_dic = dict()
for sec, df_trade in self.trades.items():
df_close = self.closes[sec].rename('close')
res = self._get_daily(df_close, df_trade)
daily_dic[sec] = res
self.daily = daily_dic
'''
def get_returns(self, compound_return=True, consider_commission=True):
cols = ['trading_pnl', 'holding_pnl', 'total_pnl', 'commission', 'CumProfitComm', 'CumProfit']
'''
dic_symbol = {sec: self.inst_map[sec]['multiplier'] * df_daily.loc[:, cols]
for sec, df_daily in self.daily.items()}
df_profit = pd.concat(dic_symbol, axis=1) # this is cumulative profit
df_profit = df_profit.fillna(method='ffill').fillna(0.0)
df_pnl = df_profit.stack(level=1)
df_pnl = df_pnl.sum(axis=1)
df_pnl = df_pnl.unstack(level=1)
'''
daily = self.daily.loc[:, cols]
daily = daily.stack().unstack('symbol')
df_pnl = daily.sum(axis=1)
df_pnl = df_pnl.unstack(level=1)
self.df_pnl = df_pnl
# TODO temperary solution
if consider_commission:
strategy_value = (df_pnl['total_pnl'] - df_pnl['commission']).cumsum() + self.init_balance
else:
strategy_value = df_pnl['total_pnl'].cumsum() + self.init_balance
market_values = pd.concat([strategy_value, self.data_benchmark], axis=1).fillna(method='ffill')
market_values.columns = ['strat', 'bench']
df_returns = market_values.pct_change(periods=1).fillna(0.0)
df_returns = df_returns.join((df_returns.loc[:, ['strat', 'bench']] + 1.0).cumprod(), rsuffix='_cum')
if compound_return:
df_returns.loc[:, 'active_cum'] = df_returns['strat_cum'] - df_returns['bench_cum'] + 1
df_returns.loc[:, 'active'] = df_returns['active_cum'].pct_change(1).fillna(0.0)
else:
df_returns.loc[:, 'active'] = df_returns['strat'] - df_returns['bench']
df_returns.loc[:, 'active_cum'] = df_returns['active'].add(1.0).cumprod(axis=0)
start = pd.to_datetime(self.configs['start_date'], format="%Y%m%d")
end = pd.to_datetime(self.configs['end_date'], format="%Y%m%d")
years = (end - start).days / 365.0
self.performance_metrics['Annual Return (%)'] =\
100 * (np.power(df_returns.loc[:, 'active_cum'].values[-1], 1. / years) - 1)
self.performance_metrics['Annual Volatility (%)'] =\
100 * (df_returns.loc[:, 'active'].std() * np.sqrt(common.CALENDAR_CONST.TRADE_DAYS_PER_YEAR))
self.performance_metrics['Sharpe Ratio'] = (self.performance_metrics['Annual Return (%)']
/ self.performance_metrics['Annual Volatility (%)'])
self.risk_metrics['Beta'] = np.corrcoef(df_returns.loc[:, 'bench'], df_returns.loc[:, 'strat'])[0, 1]
# bt_strat_mv = pd.read_csv('bt_strat_mv.csv').set_index('trade_date')
# df_returns = df_returns.join(bt_strat_mv, how='right')
self.returns = df_returns
def plot_pnl(self, save_folder=None):
old_mpl_rcparams = {k: v for k, v in mpl.rcParams.items()}
mpl.rcParams.update(MPL_RCPARAMS)
if save_folder is None:
save_folder = self.file_folder
fig1 = plot_portfolio_bench_pnl(self.returns.loc[:, 'strat_cum'],
self.returns.loc[:, 'bench_cum'],
self.returns.loc[:, 'active_cum'])
fig1.savefig(os.path.join(save_folder,'pnl_img.png'), facecolor=fig1.get_facecolor(), dpi=fig1.get_dpi())
fig2 = plot_daily_trading_holding_pnl(self.df_pnl['trading_pnl'],
self.df_pnl['holding_pnl'],
self.df_pnl['total_pnl'],
self.df_pnl['total_pnl'].cumsum())
fig2.savefig(os.path.join(save_folder,'pnl_img_trading_holding.png'), facecolor=fig2.get_facecolor(), dpi=fig2.get_dpi())
mpl.rcParams.update(old_mpl_rcparams)
def plot_pnl_OLD(self, save_folder=None):
if save_folder is None:
save_folder = self.file_folder
fig, (ax0, ax1, ax2) = plt.subplots(3, 1, figsize=(21, 8), dpi=300, sharex=True)
idx0 = self.returns.index
idx = np.arange(len(idx0))
bar_width = 0.3
ax0.bar(idx-bar_width/2, self.df_pnl['trading_pnl'], width=bar_width, color='indianred', label='Trading PnL',)
ax0.bar(idx+bar_width/2, self.df_pnl['holding_pnl'], width=bar_width, color='royalblue', label='Holding PnL')
ax0.axhline(0.0, color='k', lw=1, ls='--')
# ax0.plot(idx, self.pnl['total_pnl'], lw=1.5, color='violet', label='Total PnL')
ax0.legend(loc='upper left')
ax1.plot(idx, self.returns.loc[:, 'bench_cum'], label='Benchmark')
ax1.plot(idx, self.returns.loc[:, 'strat_cum'], label='Strategy')
ax1.legend(loc='upper left')
ax2.plot(idx, self.returns.loc[:, 'active_cum'], label='Extra Return')
ax2.legend(loc='upper left')
ax2.set_xlabel("Date")
ax2.set_ylabel("Net Value")
ax1.set_ylabel("Net Value")
ax2.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m-%d'))
plt.tight_layout()
fig.savefig(os.path.join(save_folder, 'pnl_img.png'))
plt.close()
def gen_report(self, source_dir, template_fn, out_folder='.', selected=None):
"""
Generate HTML (and PDF) report of the trade analysis.
Parameters
----------
source_dir : str
path of directory where HTML template and css files are stored.
template_fn : str
File name of HTML template.
out_folder : str
Output folder of report.
selected : list of str or None
List of symbols whose detailed PnL curve and position will be plotted.
# TODO: this parameter should not belong to function
"""
dic = dict()
dic['html_title'] = "Alpha Strategy Backtest Result"
dic['selected_securities'] = selected
# we do not want to show username / password in report
dic['props'] = {k: v for k, v in self.configs.items() if ('username' not in k and 'password' not in k)}
dic['performance_metrics'] = self.performance_metrics
dic['risk_metrics'] = self.risk_metrics
dic['position_change'] = self.position_change
dic['account'] = self.account
dic['df_daily'] = jutil.group_df_to_dict(self.daily, by='symbol')
dic['daily_position'] = self.daily_position
self.report_dic.update(dic)
self.returns.to_csv(os.path.join(out_folder, 'returns.csv'))
r = Report(self.report_dic, source_dir=source_dir, template_fn=template_fn, out_folder=out_folder)
r.generate_html()
r.output_html('report.html')
def do_analyze(self, result_dir, selected_sec=None):
if selected_sec is None:
selected_sec = []
print("process trades...")
self.process_trades()
print("get daily stats...")
self.get_daily()
print("calc strategy return...")
self.get_returns(consider_commission=False)
if len(selected_sec) > 0:
print("Plot single securities PnL")
for symbol in selected_sec:
df_daily = self.daily.loc[pd.IndexSlice[symbol, :], :]
df_daily.index = df_daily.index.droplevel(0)
if df_daily is not None:
plot_trades(df_daily, symbol=symbol, save_folder=self.file_folder)
print("Plot strategy PnL...")
self.plot_pnl(result_dir)
print("generate report...")
self.gen_report(source_dir=STATIC_FOLDER, template_fn='report_template.html',
out_folder=result_dir,
selected=selected_sec)
class EventAnalyzer(BaseAnalyzer):
def __init__(self):
super(EventAnalyzer, self).__init__()
self.metrics = dict()
self.daily = None
self.data_benchmark = None
self.returns = None # OrderedDict
self.position_change = None # OrderedDict
self.account = None # OrderedDict
def initialize(self, data_server_=None, dataview=None, file_folder='.'):
super(EventAnalyzer, self).initialize(data_api=data_server_, dataview=dataview,
file_folder=file_folder)
if self.dataview is not None and self.dataview.data_benchmark is not None:
self.data_benchmark = self.dataview.data_benchmark.loc[(self.dataview.data_benchmark.index >= self.start_date)
&(self.dataview.data_benchmark.index <= self.end_date)]
else:
benchmark = self.configs.get('benchmark', "")
if benchmark and data_server_:
df, msg = data_server_.daily(benchmark, start_date=self.closes.index[0], end_date=self.closes.index[-1])
self.data_benchmark = df.set_index('trade_date').loc[:, ['close']]
self.data_benchmark.columns = ['bench']
else:
self.data_benchmark = pd.DataFrame(index=self.closes.index, columns=['bench'], data=np.ones(len(self.closes), dtype=float))
class AlphaAnalyzer(BaseAnalyzer):
def __init__(self):
super(AlphaAnalyzer, self).__init__()
self.metrics = dict()
self.daily = None
self.returns = None # OrderedDict
self.position_change = None # OrderedDict
self.account = None # OrderedDict
self.df_brinson = None
self.data_benchmark = None
def initialize(self, data_api=None, dataview=None, file_folder='.'):
super(AlphaAnalyzer, self).initialize(data_api=data_api, dataview=dataview,
file_folder=file_folder)
if self.dataview is not None and self.dataview.data_benchmark is not None:
self.data_benchmark = self.dataview.data_benchmark.loc[(self.dataview.data_benchmark.index >= self.start_date)
&(self.dataview.data_benchmark.index <= self.end_date)]
@staticmethod
def _to_pct_return(arr, cumulative=False):
"""Convert portfolio value to portfolio (linear) return."""
r = np.empty_like(arr)
r[0] = 0.0
if cumulative:
r[1:] = arr[1:] / arr[0] - 1
else:
r[1:] = arr[1:] / arr[:-1] - 1
return r
'''
def get_returns_OLD(self, compound_return=True, consider_commission=True):
profit_col_name = 'CumProfitComm' if consider_commission else 'CumProfit'
vp_list = {sec: df_profit.loc[:, profit_col_name] for sec, df_profit in self.daily.items()}
df_profit = pd.concat(vp_list, axis=1) # this is cumulative profit
# TODO temperary solution
df_profit = df_profit.fillna(method='ffill').fillna(0.0)
strategy_value = df_profit.sum(axis=1) + self.configs['init_balance']
market_values = pd.concat([strategy_value, self.data_benchmark], axis=1).fillna(method='ffill')
market_values.columns = ['strat', 'bench']
df_returns = market_values.pct_change(periods=1).fillna(0.0)
df_returns = df_returns.join((df_returns.loc[:, ['strat', 'bench']] + 1.0).cumprod(), rsuffix='_cum')
if compound_return:
df_returns.loc[:, 'active_cum'] = df_returns['strat_cum'] - df_returns['bench_cum'] + 1
df_returns.loc[:, 'active'] = df_returns['active_cum'].pct_change(1).fillna(0.0)
else:
df_returns.loc[:, 'active'] = df_returns['strat'] - df_returns['bench']
df_returns.loc[:, 'active_cum'] = df_returns['active'].add(1.0).cumprod(axis=0)
start = pd.to_datetime(self.configs['start_date'], format="%Y%m%d")
end = pd.to_datetime(self.configs['end_date'], format="%Y%m%d")
years = (end - start).days / 365.0
self.metrics['yearly_return'] = np.power(df_returns.loc[:, 'active_cum'].values[-1], 1. / years) - 1
self.metrics['yearly_vol'] = df_returns.loc[:, 'active'].std() * np.sqrt(225.)
self.metrics['beta'] = np.corrcoef(df_returns.loc[:, 'bench'], df_returns.loc[:, 'strat'])[0, 1]
self.metrics['sharpe'] = self.metrics['yearly_return'] / self.metrics['yearly_vol']
# bt_strat_mv = pd.read_csv('bt_strat_mv.csv').set_index('trade_date')
# df_returns = df_returns.join(bt_strat_mv, how='right')
self.returns = df_returns
'''
def _get_index_weight(self):
if self.dataview is not None:
res = self.dataview.get_ts('index_weight', start_date=self.start_date, end_date=self.end_date)
else:
res = self.data_api.get_index_weights_daily(self.universe, self.start_date, self.end_date)
return res
def _brinson(self, close, pos, index_weight, group):
"""
Brinson Attribution.
Parameters
----------
close : pd.DataFrame
Index is date, columns are symbols.
pos : pd.DataFrame
Index is date, columns are symbols.
index_weight : pd.DataFrame
Index is date, columns are symbols.
group : pd.DataFrame
Index is date, columns are symbols.
Returns
-------
dict
"""
def group_sum(df, group_daily):
groups = np.unique(group_daily.values.flatten())
mask = np.isnan(groups.astype(float))
groups = groups[np.logical_not(mask)]
res = pd.DataFrame(index=df.index, columns=groups, data=np.nan)
for g in groups:
mask = group_daily == g
tmp = df[mask]
res.loc[:, g] = tmp.sum(axis=1)
return res
ret = close.pct_change(1)
pos_sum = pos.sum(axis=1)
pf_weight = pos.div(pos_sum, axis=0)
pf_weight.loc[pos_sum == 0, :] = 0.0
assert pf_weight.isnull().sum().sum() == 0
pf_weight = pf_weight.reindex(index=ret.index, columns=ret.columns)
pf_weight = pf_weight.fillna(0.0)
weighted_ret_pf = ret.mul(pf_weight)
weighted_ret_index = ret.mul(index_weight)
index_group_weight = group_sum(index_weight, group)
pf_group_weight = group_sum(pf_weight, group)
pf_group_ret = group_sum(weighted_ret_pf, group).div(pf_group_weight)
index_group_ret = group_sum(weighted_ret_index, group).div(index_group_weight)
allo_ret_group = (pf_group_weight - index_group_weight).mul(index_group_ret)
allo_ret = allo_ret_group.sum(axis=1)
selection_ret_group = (pf_group_ret - index_group_ret).mul(index_group_weight)
selection_ret = selection_ret_group.sum(axis=1)
active_ret = (weighted_ret_pf.sum(axis=1) - weighted_ret_index.sum(axis=1))
inter_ret = active_ret - selection_ret - allo_ret
df_brinson = pd.DataFrame(index=allo_ret.index,
data={'allocation': allo_ret,
'selection': selection_ret,
'interaction': inter_ret,
'total_active': active_ret})
return {'df_brinson': df_brinson, 'allocation': allo_ret_group, 'selection': selection_ret_group}
def brinson(self, group):
"""
Parameters
----------
group : str or pd.DataFrame
If group is string, this function will try to fetch the corresponding DataFrame from DataView.
If group is pd.DataFrame, it will be used as-is.
Returns
-------
"""
if isinstance(group, str):
group = self.dataview.get_ts(group, start_date=self.start_date, end_date=self.end_date)
elif isinstance(group, pd.DataFrame):
pass
else:
raise ValueError("Group must be string or DataFrame. But {} is provided.".format(group))
if group is None or group.empty:
raise ValueError("group is None or group is empty")
close = self.closes_adj
pos = self.daily_position
index_weight = self._get_index_weight()
res_dic = self._brinson(close, pos, index_weight, group)
df_brinson = res_dic['df_brinson']
self.df_brinson = df_brinson
self.report_dic['df_brinson'] = df_brinson
plot_brinson(df_brinson, save_folder=self.file_folder)
def do_analyze(self, result_dir, selected_sec=None, brinson_group=None):
if selected_sec is None:
selected_sec = []
print("process trades...")
self.process_trades()
print("get daily stats...")
self.get_daily()
print("calc strategy return...")
self.get_returns(consider_commission=False)
not_none_sec = []
if len(selected_sec) > 0:
print("Plot single securities PnL")
for symbol in selected_sec:
df_daily = self.daily.loc[pd.IndexSlice[symbol, :], :]
df_daily.index = df_daily.index.droplevel(0)
if df_daily is not None:
not_none_sec.append(symbol)
plot_trades(df_daily, symbol=symbol, save_folder=self.file_folder)
print("Plot strategy PnL...")
self.plot_pnl(result_dir)
if brinson_group is not None:
print("Do brinson attribution.")
group = self.dataview.get_ts(brinson_group)
if group is None:
raise ValueError("group data is None.")
self.brinson(group)
print("generate report...")
self.gen_report(source_dir=STATIC_FOLDER, template_fn='report_template.html',
out_folder=result_dir,
selected=not_none_sec)
def plot_daily_trading_holding_pnl(trading, holding, total, total_cum):
"""
Parameters
----------
Series
"""
idx0 = total.index
n = len(idx0)
idx = np.arange(n)
fig, (ax0, ax2, ax3) = plt.subplots(3, 1, figsize=(16, 13.5), sharex=True)
ax1 = ax0.twinx()
bar_width = 0.4
profit_color, lose_color = '#D63434', '#2DB635'
curve_color = '#174F67'
y_label = 'Profit / Loss ($)'
color_arr_raw = np.array([profit_color] * n)
color_arr = color_arr_raw.copy()
color_arr[total < 0] = lose_color
ax0.bar(idx, total, width=bar_width, color=color_arr)
ax0.set(title='Daily PnL', ylabel=y_label, xlim=[-2, n+2],)
ax0.xaxis.set_major_formatter(MyFormatter(idx0, '%y-%m-%d'))
ax1.plot(idx, total_cum, lw=1.5, color=curve_color)
ax1.set(ylabel='Cum. ' + y_label)
ax1.yaxis.label.set_color(curve_color)
color_arr = color_arr_raw.copy()
color_arr[trading < 0] = lose_color
ax2.bar(idx-bar_width/2, trading, width=bar_width, color=color_arr)
ax2.set(title='Daily Trading PnL', ylabel=y_label)
color_arr = color_arr_raw.copy()
color_arr[holding < 0] = lose_color
ax3.bar(idx+bar_width/2, holding, width=bar_width, color=color_arr)
ax3.set(title='Daily Holding PnL', ylabel=y_label, xticks=idx[: : n//10])
return fig
def plot_portfolio_bench_pnl(portfolio_cum_ret, benchmark_cum_ret, excess_cum_ret):
"""
Parameters
----------
Series
"""
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16, 9), sharex=True)
idx_dt = portfolio_cum_ret.index
idx = np.arange(len(idx_dt))
y_label_ret = "Cumulative Return (%)"
ax1.plot(idx, (benchmark_cum_ret-1) * TO_PCT, label='Benchmark', color='#174F67')
ax1.plot(idx, (portfolio_cum_ret-1) * TO_PCT, label='Strategy', color='#198DD6')
ax1.legend(loc='upper left')
ax1.set(title="Absolute Return of Portfolio and Benchmark",
#xlabel="Date",
ylabel=y_label_ret)
ax1.grid(axis='y')
ax2.plot(idx, (excess_cum_ret-1) * TO_PCT, label='Extra Return', color='#C37051')
ax2.set(title="Excess Return Compared to Benchmark", ylabel=y_label_ret
#xlabel="Date",
)
ax2.grid(axis='y')
ax2.xaxis.set_major_formatter(MyFormatter(idx_dt, '%y-%m-%d')) # 17-09-31
fig.tight_layout()
return fig
def plot_brinson(df, save_folder):
"""
Parameters
----------
df : pd.DataFrame
"""
allo, selec, inter, total = df['allocation'], df['selection'], df['interaction'], df['total_active']
fig, ax1 = plt.subplots(1, 1, figsize=(21, 8))
idx0 = df.index
idx = range(len(idx0))
ax1.plot(idx, selec, lw=1.5, color='indianred', label='Selection Return')
ax1.plot(idx, allo, lw=1.5, color='royalblue', label='Allocation Return')
ax1.plot(idx, inter, lw=1.5, color='purple', label='Interaction Return')
# ax1.plot(idx, total, lw=1.5, ls='--', color='k', label='Total Active Return')
ax1.axhline(0.0, color='k', lw=0.5, ls='--')
ax1.legend(loc='upper left')
ax1.set_xlabel("Date")
ax1.set_ylabel("Return")
ax1.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m-%d'))
plt.tight_layout()
fig.savefig(os.path.join(save_folder, 'brinson_attribution.png'))
plt.close()
def calc_avg_pos_price(pos_arr, price_arr):
"""
Calculate average cost price using position and fill price.
When position = 0, cost price = symbol price.
"""
assert len(pos_arr) == len(price_arr)
avg_price = np.zeros_like(pos_arr, dtype=float)
avg_price[0] = price_arr[0]
for i in range(pos_arr.shape[0] - 1):
if pos_arr[i+1] == 0:
avg_price[i+1] = 0.0
else:
pos_diff = pos_arr[i+1] - pos_arr[i]
if pos_arr[i] == 0 or pos_diff * pos_arr[i] > 0:
count = True
else:
count = False
if count:
avg_price[i+1] = (avg_price[i] * pos_arr[i] + pos_diff * price_arr[i+1]) * 1. / pos_arr[i+1]
else:
avg_price[i+1] = avg_price[i]
return avg_price
def plot_trades(df, symbol="", save_folder='.', marker_size_adjust_ratio=0.1):
old_mpl_rcparams = {k: v for k, v in mpl.rcParams.items()}
mpl.rcParams.update(MPL_RCPARAMS)
idx0 = df.index
idx = range(len(idx0))
price = df.loc[:, 'close']
bv, sv = df.loc[:, 'BuyVolume'].values, df.loc[:, 'SellVolume'].values
profit = df.loc[:, 'CumProfit'].values
avgpx = df.loc[:, 'AvgPosPrice']
bv_m = np.max(bv)
sv_m = np.max(sv)
if bv_m > 0:
bv = bv / bv_m * 100
if sv_m > 0:
sv = sv / sv_m * 100
fig = plt.figure(figsize=(14, 10))
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=3)
ax3 = plt.subplot2grid((4, 1), (3, 0), rowspan=1, sharex=ax1)
ax2 = ax1.twinx()
ax1.plot(idx, price, label='Price', linestyle='-', lw=1, marker='', color='yellow')
ax1.scatter(idx, price, label='buy', marker='o', s=bv, color='indianred')
ax1.scatter(idx, price, label='sell', marker='o', s=sv, color='forestgreen')
ax1.plot(idx, avgpx, lw=1, marker='', color='green')
ax1.legend(loc='upper left')
ax1.set(title="Price, Trades and PnL for {:s}".format(symbol), ylabel="Price ($)")
ax1.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m'))
ax2.plot(idx, profit, label='PnL', color='k', lw=1, ls='--', alpha=.4)
ax2.legend(loc='upper right')
ax2.set(ylabel="Profit / Loss ($)")
# ax1.xaxis.set_major_formatter(MyFormatter(df.index))#, '%H:%M'))
ax3.plot(idx, df.loc[:, 'position'], marker='D', markersize=3, lw=2)
ax3.axhline(0, color='k', lw=1, ls='--', alpha=0.8)
ax3.set(title="Position of {:s}".format(symbol))
fig.tight_layout()
fig.savefig(save_folder + '/' + "{}.png".format(symbol), facecolor=fig.get_facecolor(), dpi=fig.get_dpi())
mpl.rcParams.update(old_mpl_rcparams)
|
[
"numpy.sqrt",
"numpy.logical_not",
"numpy.array",
"jaqs.data.basic.instrument.InstManager",
"pandas.to_datetime",
"numpy.arange",
"jaqs.util.group_df_to_dict",
"jaqs.trade.analyze.report.Report",
"numpy.zeros_like",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"jaqs.util.join_relative_path",
"pandas.DataFrame",
"numpy.round",
"collections.OrderedDict",
"matplotlib.rcParams.update",
"numpy.corrcoef",
"jaqs.util.combine_date_time",
"numpy.power",
"jaqs.trade.common.ORDER_ACTION.is_positive",
"os.path.join",
"json.load",
"matplotlib.pyplot.figure",
"numpy.empty_like",
"matplotlib.pyplot.tight_layout",
"os.path.abspath",
"matplotlib.rcParams.items",
"pandas.concat",
"matplotlib.pyplot.subplot2grid"
] |
[((462, 510), 'jaqs.util.join_relative_path', 'jutil.join_relative_path', (['"""trade/analyze/static"""'], {}), "('trade/analyze/static')\n", (486, 510), True, 'import jaqs.util as jutil\n'), ((31059, 31071), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (31068, 31071), True, 'import numpy as np\n'), ((31104, 31155), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(16, 13.5)', 'sharex': '(True)'}), '(3, 1, figsize=(16, 13.5), sharex=True)\n', (31116, 31155), True, 'import matplotlib.pyplot as plt\n'), ((31337, 31365), 'numpy.array', 'np.array', (['([profit_color] * n)'], {}), '([profit_color] * n)\n', (31345, 31365), True, 'import numpy as np\n'), ((32410, 32458), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(16, 9)', 'sharex': '(True)'}), '(2, 1, figsize=(16, 9), sharex=True)\n', (32422, 32458), True, 'import matplotlib.pyplot as plt\n'), ((33525, 33560), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(21, 8)'}), '(1, 1, figsize=(21, 8))\n', (33537, 33560), True, 'import matplotlib.pyplot as plt\n'), ((34149, 34167), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (34165, 34167), True, 'import matplotlib.pyplot as plt\n'), ((34242, 34253), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (34251, 34253), True, 'import matplotlib.pyplot as plt\n'), ((34493, 34528), 'numpy.zeros_like', 'np.zeros_like', (['pos_arr'], {'dtype': 'float'}), '(pos_arr, dtype=float)\n', (34506, 34528), True, 'import numpy as np\n'), ((35240, 35273), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['MPL_RCPARAMS'], {}), '(MPL_RCPARAMS)\n', (35259, 35273), True, 'import matplotlib as mpl\n'), ((35524, 35534), 'numpy.max', 'np.max', (['bv'], {}), '(bv)\n', (35530, 35534), True, 'import numpy as np\n'), ((35546, 35556), 'numpy.max', 'np.max', (['sv'], {}), '(sv)\n', (35552, 35556), True, 'import numpy as np\n'), ((35664, 35692), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 10)'}), '(figsize=(14, 10))\n', (35674, 35692), True, 'import matplotlib.pyplot as plt\n'), ((35703, 35746), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 1)', '(0, 0)'], {'rowspan': '(3)'}), '((4, 1), (0, 0), rowspan=3)\n', (35719, 35746), True, 'import matplotlib.pyplot as plt\n'), ((35757, 35812), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 1)', '(3, 0)'], {'rowspan': '(1)', 'sharex': 'ax1'}), '((4, 1), (3, 0), rowspan=1, sharex=ax1)\n', (35773, 35812), True, 'import matplotlib.pyplot as plt\n'), ((36892, 36929), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['old_mpl_rcparams'], {}), '(old_mpl_rcparams)\n', (36911, 36929), True, 'import matplotlib as mpl\n'), ((4001, 4029), 'os.path.abspath', 'os.path.abspath', (['file_folder'], {}), '(file_folder)\n', (4016, 4029), False, 'import os\n'), ((5152, 5223), 'jaqs.util.combine_date_time', 'jutil.combine_date_time', (["df.loc[:, 'fill_date']", "df.loc[:, 'fill_time']"], {}), "(df.loc[:, 'fill_date'], df.loc[:, 'fill_time'])\n", (5175, 5223), True, 'import jaqs.util as jutil\n'), ((8911, 8924), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8922, 8924), False, 'from collections import OrderedDict\n'), ((8943, 8956), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8954, 8956), False, 'from collections import OrderedDict\n'), ((10765, 10812), 'pandas.concat', 'pd.concat', (['[close, trade]'], {'axis': '(1)', 'join': '"""outer"""'}), "([close, trade], axis=1, join='outer')\n", (10774, 10812), True, 'import pandas as pd\n'), ((14599, 14658), 'pandas.to_datetime', 'pd.to_datetime', (["self.configs['start_date']"], {'format': '"""%Y%m%d"""'}), "(self.configs['start_date'], format='%Y%m%d')\n", (14613, 14658), True, 'import pandas as pd\n'), ((14673, 14730), 'pandas.to_datetime', 'pd.to_datetime', (["self.configs['end_date']"], {'format': '"""%Y%m%d"""'}), "(self.configs['end_date'], format='%Y%m%d')\n", (14687, 14730), True, 'import pandas as pd\n'), ((15720, 15753), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['MPL_RCPARAMS'], {}), '(MPL_RCPARAMS)\n', (15739, 15753), True, 'import matplotlib as mpl\n'), ((16632, 16669), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['old_mpl_rcparams'], {}), '(old_mpl_rcparams)\n', (16651, 16669), True, 'import matplotlib as mpl\n'), ((16832, 16889), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(21, 8)', 'dpi': '(300)', 'sharex': '(True)'}), '(3, 1, figsize=(21, 8), dpi=300, sharex=True)\n', (16844, 16889), True, 'import matplotlib.pyplot as plt\n'), ((17912, 17930), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17928, 17930), True, 'import matplotlib.pyplot as plt\n'), ((18001, 18012), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18010, 18012), True, 'import matplotlib.pyplot as plt\n'), ((19170, 19217), 'jaqs.util.group_df_to_dict', 'jutil.group_df_to_dict', (['self.daily'], {'by': '"""symbol"""'}), "(self.daily, by='symbol')\n", (19192, 19217), True, 'import jaqs.util as jutil\n'), ((19410, 19508), 'jaqs.trade.analyze.report.Report', 'Report', (['self.report_dic'], {'source_dir': 'source_dir', 'template_fn': 'template_fn', 'out_folder': 'out_folder'}), '(self.report_dic, source_dir=source_dir, template_fn=template_fn,\n out_folder=out_folder)\n', (19416, 19508), False, 'from jaqs.trade.analyze.report import Report\n'), ((23184, 23202), 'numpy.empty_like', 'np.empty_like', (['arr'], {}), '(arr)\n', (23197, 23202), True, 'import numpy as np\n'), ((27894, 28049), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'allo_ret.index', 'data': "{'allocation': allo_ret, 'selection': selection_ret, 'interaction':\n inter_ret, 'total_active': active_ret}"}), "(index=allo_ret.index, data={'allocation': allo_ret,\n 'selection': selection_ret, 'interaction': inter_ret, 'total_active':\n active_ret})\n", (27906, 28049), True, 'import pandas as pd\n'), ((34184, 34236), 'os.path.join', 'os.path.join', (['save_folder', '"""brinson_attribution.png"""'], {}), "(save_folder, 'brinson_attribution.png')\n", (34196, 34236), False, 'import os\n'), ((1565, 1576), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (1573, 1576), True, 'import numpy as np\n'), ((4095, 4139), 'os.path.join', 'os.path.join', (['self.file_folder', '"""trades.csv"""'], {}), "(self.file_folder, 'trades.csv')\n", (4107, 4139), False, 'import os\n'), ((6863, 6875), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6872, 6875), False, 'import json\n'), ((15341, 15408), 'numpy.corrcoef', 'np.corrcoef', (["df_returns.loc[:, 'bench']", "df_returns.loc[:, 'strat']"], {}), "(df_returns.loc[:, 'bench'], df_returns.loc[:, 'strat'])\n", (15352, 15408), True, 'import numpy as np\n'), ((16082, 16122), 'os.path.join', 'os.path.join', (['save_folder', '"""pnl_img.png"""'], {}), "(save_folder, 'pnl_img.png')\n", (16094, 16122), False, 'import os\n'), ((16506, 16562), 'os.path.join', 'os.path.join', (['save_folder', '"""pnl_img_trading_holding.png"""'], {}), "(save_folder, 'pnl_img_trading_holding.png')\n", (16518, 16562), False, 'import os\n'), ((17951, 17991), 'os.path.join', 'os.path.join', (['save_folder', '"""pnl_img.png"""'], {}), "(save_folder, 'pnl_img.png')\n", (17963, 17991), False, 'import os\n'), ((19352, 19391), 'os.path.join', 'os.path.join', (['out_folder', '"""returns.csv"""'], {}), "(out_folder, 'returns.csv')\n", (19364, 19391), False, 'import os\n'), ((26513, 26570), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index', 'columns': 'groups', 'data': 'np.nan'}), '(index=df.index, columns=groups, data=np.nan)\n', (26525, 26570), True, 'import pandas as pd\n'), ((35214, 35234), 'matplotlib.rcParams.items', 'mpl.rcParams.items', ([], {}), '()\n', (35232, 35234), True, 'import matplotlib as mpl\n'), ((1714, 1762), 'pandas.to_datetime', 'pd.to_datetime', (['self.dates[ind]'], {'format': '"""%Y%m%d"""'}), "(self.dates[ind], format='%Y%m%d')\n", (1728, 1762), True, 'import pandas as pd\n'), ((4803, 4857), 'jaqs.data.basic.instrument.InstManager', 'InstManager', ([], {'data_api': 'self.data_api', 'symbol': 'symbol_str'}), '(data_api=self.data_api, symbol=symbol_str)\n', (4814, 4857), False, 'from jaqs.data.basic.instrument import InstManager\n'), ((6774, 6810), 'os.path.join', 'os.path.join', (['folder', '"""configs.json"""'], {}), "(folder, 'configs.json')\n", (6786, 6810), False, 'import os\n'), ((13847, 13903), 'pandas.concat', 'pd.concat', (['[strategy_value, self.data_benchmark]'], {'axis': '(1)'}), '([strategy_value, self.data_benchmark], axis=1)\n', (13856, 13903), True, 'import pandas as pd\n'), ((14855, 14920), 'numpy.power', 'np.power', (["df_returns.loc[:, 'active_cum'].values[-1]", '(1.0 / years)'], {}), "(df_returns.loc[:, 'active_cum'].values[-1], 1.0 / years)\n", (14863, 14920), True, 'import numpy as np\n'), ((15041, 15091), 'numpy.sqrt', 'np.sqrt', (['common.CALENDAR_CONST.TRADE_DAYS_PER_YEAR'], {}), '(common.CALENDAR_CONST.TRADE_DAYS_PER_YEAR)\n', (15048, 15091), True, 'import numpy as np\n'), ((15690, 15710), 'matplotlib.rcParams.items', 'mpl.rcParams.items', ([], {}), '()\n', (15708, 15710), True, 'import matplotlib as mpl\n'), ((26473, 26493), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (26487, 26493), True, 'import numpy as np\n'), ((7555, 7589), 'jaqs.trade.common.ORDER_ACTION.is_positive', 'common.ORDER_ACTION.is_positive', (['s'], {}), '(s)\n', (7586, 7589), False, 'from jaqs.trade import common\n')]
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tests.common.tensorio import compare_tensor
from tests.common.test_op import triangle
from akg.utils import kernel_exec as utils
from tests.common.gen_random import random_gaussian
def triangle_execute(shape, const_value, lower, dtype, attrs):
support_type = ['float16', 'float32']
assert dtype in support_type
assert len(shape) <= 2
if attrs is None:
attrs = {'enable_pre_poly_loop_partition': False}
attrs['enable_pre_poly_loop_partition'] = False
attrs['enable_post_poly_loop_partition'] = False
attrs['enable_convert_if'] = True
attrs['enable_double_buffer'] = False
output_shape = shape
if len(shape) == 1:
output_shape = [shape[0], shape[0]]
input, bench_mark = gen_data(shape, output_shape, const_value, lower, dtype)
op_attrs = [const_value, lower]
mod = triangle_compile(shape, dtype, op_attrs, attrs)
source_code = mod.imported_modules[0].get_source()
output = np.full(output_shape, np.nan, dtype)
output = utils.mod_launch(mod, (input, output), expect=bench_mark)
# compare result
compare_result = compare_tensor(output, bench_mark, rtol=5e-3, equal_nan=True)
return input, output, bench_mark, compare_result
def triangle_compile(shape, dtype, op_attrs, attrs):
return utils.op_build_test(triangle.triangle, [shape], [dtype], op_attrs, kernel_name='triangle', attrs=attrs)
def gen_data(shape, output_shape, const_value, lower, dtype):
input = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
if len(shape) == 2:
bench_mark = input
else:
bench_mark = np.zeros(output_shape).astype(dtype)
for i in range(output_shape[0]):
bench_mark[i] = input
if lower:
for i in range(output_shape[0]):
bench_mark[i][i + 1:] = const_value
else:
for i in range(output_shape[0]):
bench_mark[i][:i] = const_value
return input, bench_mark
|
[
"tests.common.gen_random.random_gaussian",
"akg.utils.kernel_exec.mod_launch",
"numpy.zeros",
"numpy.full",
"akg.utils.kernel_exec.op_build_test",
"tests.common.tensorio.compare_tensor"
] |
[((1566, 1602), 'numpy.full', 'np.full', (['output_shape', 'np.nan', 'dtype'], {}), '(output_shape, np.nan, dtype)\n', (1573, 1602), True, 'import numpy as np\n'), ((1616, 1673), 'akg.utils.kernel_exec.mod_launch', 'utils.mod_launch', (['mod', '(input, output)'], {'expect': 'bench_mark'}), '(mod, (input, output), expect=bench_mark)\n', (1632, 1673), True, 'from akg.utils import kernel_exec as utils\n'), ((1717, 1779), 'tests.common.tensorio.compare_tensor', 'compare_tensor', (['output', 'bench_mark'], {'rtol': '(0.005)', 'equal_nan': '(True)'}), '(output, bench_mark, rtol=0.005, equal_nan=True)\n', (1731, 1779), False, 'from tests.common.tensorio import compare_tensor\n'), ((1898, 2005), 'akg.utils.kernel_exec.op_build_test', 'utils.op_build_test', (['triangle.triangle', '[shape]', '[dtype]', 'op_attrs'], {'kernel_name': '"""triangle"""', 'attrs': 'attrs'}), "(triangle.triangle, [shape], [dtype], op_attrs,\n kernel_name='triangle', attrs=attrs)\n", (1917, 2005), True, 'from akg.utils import kernel_exec as utils\n'), ((2078, 2118), 'tests.common.gen_random.random_gaussian', 'random_gaussian', (['shape'], {'miu': '(1)', 'sigma': '(0.3)'}), '(shape, miu=1, sigma=0.3)\n', (2093, 2118), False, 'from tests.common.gen_random import random_gaussian\n'), ((2215, 2237), 'numpy.zeros', 'np.zeros', (['output_shape'], {}), '(output_shape)\n', (2223, 2237), True, 'import numpy as np\n')]
|
# LSTM with Variable Length Input Sequences to One Character Output
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.utils import np_utils
from keras.preprocessing.sequence import pad_sequences
from theano.tensor.shared_randomstreams import RandomStreams
# fix random seed for reproducibility
numpy.random.seed(7)
# define the raw dataset
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# create mapping of characters to integers (0-25) and the reverse
char_to_int = dict((c, i) for i, c in enumerate(alphabet))
int_to_char = dict((i, c) for i, c in enumerate(alphabet))
# prepare the dataset of input to output pairs encoded as integers
num_inputs = 16
max_len = 5
dataX = []
dataY = []
for i in range(num_inputs):
start = numpy.random.randint(len(alphabet)-2)
end = numpy.random.randint(start, min(start+max_len,len(alphabet)-1))
sequence_in = alphabet[start:end+1]
sequence_out = alphabet[end + 1]
dataX.append([char_to_int[char] for char in sequence_in])
dataY.append(char_to_int[sequence_out])
print( sequence_in, '->', sequence_out )
# convert list of lists to array and pad sequences if needed
X = pad_sequences(dataX, maxlen=max_len, dtype='float32')
# reshape X to be [samples, time steps, features]
X = numpy.reshape(X, (X.shape[0], max_len, 1))
# normalize
X = X / float(len(alphabet))
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# create and fit the model
batch_size = 1
model = Sequential()
model.add(LSTM(16, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
for i in range(1):
model.fit(X, y, nb_epoch=1, batch_size=batch_size, verbose=2, shuffle=False)
model.reset_states()
# summarize performance of the model
scores = model.evaluate(X, y, batch_size=batch_size, verbose=0)
model.reset_states()
print("Model Accuracy: %.2f%%" % (scores[1]*100))
# demonstrate some model predictions
for i in range(1):
pattern_index = numpy.random.randint(len(dataX))
pattern = dataX[pattern_index]
x = pad_sequences([pattern], maxlen=max_len, dtype='float32')
x = numpy.reshape(x, (1, max_len, 1))
x = x / float(len(alphabet))
prediction = model.predict(x, verbose=0)
index = numpy.argmax(prediction)
result = int_to_char[index]
seq_in = [int_to_char[value] for value in pattern]
print( seq_in, "->", result )
|
[
"numpy.reshape",
"numpy.argmax",
"keras.models.Sequential",
"keras.layers.LSTM",
"keras.utils.np_utils.to_categorical",
"numpy.random.seed",
"keras.layers.Dense",
"keras.preprocessing.sequence.pad_sequences"
] |
[((374, 394), 'numpy.random.seed', 'numpy.random.seed', (['(7)'], {}), '(7)\n', (391, 394), False, 'import numpy\n'), ((1226, 1279), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['dataX'], {'maxlen': 'max_len', 'dtype': '"""float32"""'}), "(dataX, maxlen=max_len, dtype='float32')\n", (1239, 1279), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1336, 1378), 'numpy.reshape', 'numpy.reshape', (['X', '(X.shape[0], max_len, 1)'], {}), '(X, (X.shape[0], max_len, 1))\n', (1349, 1378), False, 'import numpy\n'), ((1465, 1495), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['dataY'], {}), '(dataY)\n', (1488, 1495), False, 'from keras.utils import np_utils\n'), ((1549, 1561), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1559, 1561), False, 'from keras.models import Sequential\n'), ((1573, 1652), 'keras.layers.LSTM', 'LSTM', (['(16)'], {'batch_input_shape': '(batch_size, X.shape[1], X.shape[2])', 'stateful': '(True)'}), '(16, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True)\n', (1577, 1652), False, 'from keras.layers import LSTM\n'), ((1665, 1704), 'keras.layers.Dense', 'Dense', (['y.shape[1]'], {'activation': '"""softmax"""'}), "(y.shape[1], activation='softmax')\n", (1670, 1704), False, 'from keras.layers import Dense\n'), ((2255, 2312), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[pattern]'], {'maxlen': 'max_len', 'dtype': '"""float32"""'}), "([pattern], maxlen=max_len, dtype='float32')\n", (2268, 2312), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2322, 2355), 'numpy.reshape', 'numpy.reshape', (['x', '(1, max_len, 1)'], {}), '(x, (1, max_len, 1))\n', (2335, 2355), False, 'import numpy\n'), ((2449, 2473), 'numpy.argmax', 'numpy.argmax', (['prediction'], {}), '(prediction)\n', (2461, 2473), False, 'import numpy\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>.
@mail: <EMAIL>
"""
# from qc.__version__ import __version__
import georinex as gr
import numpy as np
from matplotlib.pyplot import figure, show
import matplotlib.pyplot as plt
obs = gr.load(
'tests/test_data/Rinex3/KLSQ00GRL_R_20213070000_01D_15S_MO.rnx',
# tlim=['2021-11-03T12:00', '2021-11-03T12:30'])
tlim=['2021-11-03T05:30', '2021-11-03T07:30'])
# tlim=['2021-11-03T15:00', '2021-11-03T18:00'])
# hdr = gr.rinexheader(
# 'tests/test_data/Rinex3/KLSQ00GRL_R_20213070000_01D_15S_MO.rnx')
# rnx_version = 3
# %% Starting test
# Copying helper functions from Multipath class - later on, it could be turned
# into a separate class with helper functions
# Pick GPS satellites
svG = []
for i in range(0, len(obs.sv)):
if str(obs.sv[i].values)[0] == 'G':
svG.append(str(obs.sv[i].values))
else:
continue
# %%
# 5:30 to 7:30, G08 and G21 give 2 cycle slips # [290:300]
# 'G01','G06','G08','G10','G12','G14','G17','G19','G21','G22','G24','G30','G32'
sat = 'G21'
sattest = obs.sel(sv=sat).dropna(dim='time', how='all')
# G02 data vars with no-nan: C1C, D1C, L1C, S1C, C1W, C2W, D2W, L2W, S1W, S2W
I_max = 0.4 # Maximal ionospheric delay [m/h]
k = 4 # criterion factor
L1 = sattest['L1C'] # GPS
L2 = sattest['L2W'] # GPS
# L1 = sattest['L1C'] # Galileo
# L2 = sattest['L8Q'] # Galileo
L4 = np.abs(L1 - L2)
sigma_L4 = np.std(L4)
criterion = k*sigma_L4 + I_max
slips_nr = 0
L4_diff = []
for i in range(1, len(L4)):
L4_diff.append(np.abs(L4[i] - L4[i-1]))
if (np.abs(L4[i] - L4[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L4_diff, label=sat)
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L4')
plt.title('Single-frequency Melbourne-Wuebbena')
show()
print('Slips:', slips_nr, ', Slip criterion:', criterion.values)
# %%
# Plot all loaded sats, L1 and L2
ax = figure(figsize=(10, 6)).gca()
for i in range(0, len(svG)):
test = obs.sel(sv=svG[i]).dropna(dim='time', how='all')
L1test = test['L1C']
L2test = test['L2W']
ax.plot(L1test.time, L1test, label=svG[i], linewidth=2.0)
#ax.plot(L2test.time, L2test, label='L2', linewidth=0.5)
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('Carrier phases')
show()
# %%
# Plot separate sats, L1 and L2
ax = figure(figsize=(10, 6)).gca()
test = obs.sel(sv='E21').dropna(dim='time', how='all')
L1test = test['L1C']
L2test = test['L2W']
ax.plot(L1test.time, L1test, label='L1', linewidth=2.0)
ax.plot(L2test.time, L2test, label='L2', linewidth=1.0)
ax.grid()
# ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('Carrier phases')
show()
# %% Dual-frequency Melbourne-Wuebbena testing
# 'G01','G06','G08','G10','G12','G14','G17','G19','G21','G22','G24','G30','G32'
sat = 'G21'
sattest = obs.sel(sv=sat).dropna(dim='time', how='all')
# G02 data vars with no-nan: C1C, D1C, L1C, S1C, C1W, C2W, D2W, L2W, S1W, S2W
freq = [1575.42, 1227.60, 1176.45] # L1, L2, L5 for GPS
f1 = freq[0]*1e6
f2 = freq[1]*1e6
P1 = sattest['C1C']
P2 = sattest['C2W']
L1 = sattest['L1C'] # GPS
L2 = sattest['L2W'] # GPS
# L1 = sattest['L1C'] # Galileo
# L2 = sattest['L8Q'] # Galileo
L6 = (1/(f1-f2))*(f1*L1 - f2*L2) - (1/(f1+f2))*(f1*P1 + f2*P2)
sigma_L6 = np.std(L6)
k = 4 # criterion factor
criterion = k*sigma_L6
slips_nr = 0
L6_diff = []
for i in range(1, len(L6)):
L6_diff.append(np.abs(L6[i] - L6[i-1]))
if (np.abs(L6[i] - L6[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L6_diff, label=sat)
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L6')
plt.title('Dual-frequency Melbourne-Wuebbena')
show()
print('Slips:', slips_nr, ', Slip criterion:', criterion.values)
# %% Work in Progress
class Slips:
"""
Class for cycle slip detection of RINEX files.
Provides options for different detection algorithms.
Parameters
----------
L1 : TYPE
DESCRIPTION.
Returns
-------
L4 : TYPE
DESCRIPTION.
"""
def __init__(self):
pass
def slips_MW_single_freq(self, obs):
"""
Cycle slip detection algorithm 1.
Based on Melbourne-Wuebbena,
but only on carrier phase data (single-frequency)
(from Vaclavovic-Dousa 2016 article)
Parameters
----------
obs : TYPE
DESCRIPTION.
Returns
-------
None.
"""
# Select a list of GPS satellites
svG = []
for i in range(0, len(obs.sv)):
if str(obs.sv[i].values)[0] == 'G':
svG.append(str(obs.sv[i].values))
else:
continue
# Melbourne-Wuebbena parameters (predetermined)
I_max = 0.4 # Maximal ionospheric delay [m/h]
k = 4 # criterion factor
# For each tracked satellite
for i in range(0, len(svG)):
current_sat = obs.sel(sv=svG[i]).dropna(dim='time', how='all')
L1 = current_sat['L1C']
L2 = current_sat['L2W']
L4 = np.abs(L1 - L2)
sigma_L4 = np.std(L4)
criterion = k*sigma_L4 + I_max
slips_nr = 0
L4_diff = []
for j in range(1, len(L4)):
L4_diff.append(np.abs(L4[j] - L4[j-1]))
if (np.abs(L4[j] - L4[j-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
print('Sat:', svG[i],
', Slips:', slips_nr,
', Slip criterion:', criterion.values)
def plot_slips(self, obs, sat_nr: str):
"""
Plot cycle slips for one satellite vehicle.
Parameters
----------
obs : TYPE
DESCRIPTION.
sat_nr : str
DESCRIPTION.
Returns
-------
None.
"""
sat = obs.sel(sv=sat_nr).dropna(dim='time', how='all')
I_max = 0.4 # Maximal ionospheric delay [m/h]
k = 4 # criterion factor
L1 = sat['L1C']
L2 = sat['L2W']
L4 = np.abs(L1 - L2)
sigma_L4 = np.std(L4)
criterion = k*sigma_L4 + I_max
slips_nr = 0
L4_diff = []
for i in range(1, len(L4)):
L4_diff.append(np.abs(L4[i] - L4[i-1]))
if (np.abs(L4[i] - L4[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L4_diff, label=sat_nr, linewidth=1.0)
# labelfull = 'Slip limit: ', criterion.values
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L4')
show()
print('Sat:', sat_nr,
', Slips:', slips_nr,
', Slip criterion:', criterion.values)
# %% Testing first algorithm
sliptest = Slips().slips_MW_single_freq(obs)
# %% Testing plot function
sliptest = Slips().plot_slips(obs, 'G08')
|
[
"numpy.abs",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"georinex.load",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.figure",
"numpy.std",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] |
[((254, 377), 'georinex.load', 'gr.load', (['"""tests/test_data/Rinex3/KLSQ00GRL_R_20213070000_01D_15S_MO.rnx"""'], {'tlim': "['2021-11-03T05:30', '2021-11-03T07:30']"}), "('tests/test_data/Rinex3/KLSQ00GRL_R_20213070000_01D_15S_MO.rnx',\n tlim=['2021-11-03T05:30', '2021-11-03T07:30'])\n", (261, 377), True, 'import georinex as gr\n'), ((1410, 1425), 'numpy.abs', 'np.abs', (['(L1 - L2)'], {}), '(L1 - L2)\n', (1416, 1425), True, 'import numpy as np\n'), ((1438, 1448), 'numpy.std', 'np.std', (['L4'], {}), '(L4)\n', (1444, 1448), True, 'import numpy as np\n'), ((1781, 1851), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'criterion', 'label': '"""Slip limit"""', 'linestyle': '"""-"""', 'color': '"""r"""'}), "(y=criterion, label='Slip limit', linestyle='-', color='r')\n", (1792, 1851), True, 'import matplotlib.pyplot as plt\n'), ((1874, 1901), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [epochs]"""'], {}), "('Time [epochs]')\n", (1884, 1901), True, 'import matplotlib.pyplot as plt\n'), ((1902, 1918), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""L4"""'], {}), "('L4')\n", (1912, 1918), True, 'import matplotlib.pyplot as plt\n'), ((1919, 1967), 'matplotlib.pyplot.title', 'plt.title', (['"""Single-frequency Melbourne-Wuebbena"""'], {}), "('Single-frequency Melbourne-Wuebbena')\n", (1928, 1967), True, 'import matplotlib.pyplot as plt\n'), ((1968, 1974), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (1972, 1974), False, 'from matplotlib.pyplot import figure, show\n'), ((2402, 2429), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [epochs]"""'], {}), "('Time [epochs]')\n", (2412, 2429), True, 'import matplotlib.pyplot as plt\n'), ((2430, 2458), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Carrier phases"""'], {}), "('Carrier phases')\n", (2440, 2458), True, 'import matplotlib.pyplot as plt\n'), ((2459, 2465), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (2463, 2465), False, 'from matplotlib.pyplot import figure, show\n'), ((2772, 2799), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [epochs]"""'], {}), "('Time [epochs]')\n", (2782, 2799), True, 'import matplotlib.pyplot as plt\n'), ((2800, 2828), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Carrier phases"""'], {}), "('Carrier phases')\n", (2810, 2828), True, 'import matplotlib.pyplot as plt\n'), ((2829, 2835), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (2833, 2835), False, 'from matplotlib.pyplot import figure, show\n'), ((3441, 3451), 'numpy.std', 'np.std', (['L6'], {}), '(L6)\n', (3447, 3451), True, 'import numpy as np\n'), ((3802, 3872), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'criterion', 'label': '"""Slip limit"""', 'linestyle': '"""-"""', 'color': '"""r"""'}), "(y=criterion, label='Slip limit', linestyle='-', color='r')\n", (3813, 3872), True, 'import matplotlib.pyplot as plt\n'), ((3895, 3922), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [epochs]"""'], {}), "('Time [epochs]')\n", (3905, 3922), True, 'import matplotlib.pyplot as plt\n'), ((3923, 3939), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""L6"""'], {}), "('L6')\n", (3933, 3939), True, 'import matplotlib.pyplot as plt\n'), ((3940, 3986), 'matplotlib.pyplot.title', 'plt.title', (['"""Dual-frequency Melbourne-Wuebbena"""'], {}), "('Dual-frequency Melbourne-Wuebbena')\n", (3949, 3986), True, 'import matplotlib.pyplot as plt\n'), ((3987, 3993), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (3991, 3993), False, 'from matplotlib.pyplot import figure, show\n'), ((1555, 1580), 'numpy.abs', 'np.abs', (['(L4[i] - L4[i - 1])'], {}), '(L4[i] - L4[i - 1])\n', (1561, 1580), True, 'import numpy as np\n'), ((1588, 1613), 'numpy.abs', 'np.abs', (['(L4[i] - L4[i - 1])'], {}), '(L4[i] - L4[i - 1])\n', (1594, 1613), True, 'import numpy as np\n'), ((1710, 1733), 'matplotlib.pyplot.figure', 'figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1716, 1733), False, 'from matplotlib.pyplot import figure, show\n'), ((2088, 2111), 'matplotlib.pyplot.figure', 'figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2094, 2111), False, 'from matplotlib.pyplot import figure, show\n'), ((2509, 2532), 'matplotlib.pyplot.figure', 'figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2515, 2532), False, 'from matplotlib.pyplot import figure, show\n'), ((3576, 3601), 'numpy.abs', 'np.abs', (['(L6[i] - L6[i - 1])'], {}), '(L6[i] - L6[i - 1])\n', (3582, 3601), True, 'import numpy as np\n'), ((3609, 3634), 'numpy.abs', 'np.abs', (['(L6[i] - L6[i - 1])'], {}), '(L6[i] - L6[i - 1])\n', (3615, 3634), True, 'import numpy as np\n'), ((3731, 3754), 'matplotlib.pyplot.figure', 'figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (3737, 3754), False, 'from matplotlib.pyplot import figure, show\n'), ((6444, 6459), 'numpy.abs', 'np.abs', (['(L1 - L2)'], {}), '(L1 - L2)\n', (6450, 6459), True, 'import numpy as np\n'), ((6480, 6490), 'numpy.std', 'np.std', (['L4'], {}), '(L4)\n', (6486, 6490), True, 'import numpy as np\n'), ((6984, 7054), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'criterion', 'label': '"""Slip limit"""', 'linestyle': '"""-"""', 'color': '"""r"""'}), "(y=criterion, label='Slip limit', linestyle='-', color='r')\n", (6995, 7054), True, 'import matplotlib.pyplot as plt\n'), ((7101, 7128), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [epochs]"""'], {}), "('Time [epochs]')\n", (7111, 7128), True, 'import matplotlib.pyplot as plt\n'), ((7137, 7153), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""L4"""'], {}), "('L4')\n", (7147, 7153), True, 'import matplotlib.pyplot as plt\n'), ((7162, 7168), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (7166, 7168), False, 'from matplotlib.pyplot import figure, show\n'), ((5396, 5411), 'numpy.abs', 'np.abs', (['(L1 - L2)'], {}), '(L1 - L2)\n', (5402, 5411), True, 'import numpy as np\n'), ((5436, 5446), 'numpy.std', 'np.std', (['L4'], {}), '(L4)\n', (5442, 5446), True, 'import numpy as np\n'), ((6637, 6662), 'numpy.abs', 'np.abs', (['(L4[i] - L4[i - 1])'], {}), '(L4[i] - L4[i - 1])\n', (6643, 6662), True, 'import numpy as np\n'), ((6678, 6703), 'numpy.abs', 'np.abs', (['(L4[i] - L4[i - 1])'], {}), '(L4[i] - L4[i - 1])\n', (6684, 6703), True, 'import numpy as np\n'), ((6824, 6847), 'matplotlib.pyplot.figure', 'figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (6830, 6847), False, 'from matplotlib.pyplot import figure, show\n'), ((5613, 5638), 'numpy.abs', 'np.abs', (['(L4[j] - L4[j - 1])'], {}), '(L4[j] - L4[j - 1])\n', (5619, 5638), True, 'import numpy as np\n'), ((5658, 5683), 'numpy.abs', 'np.abs', (['(L4[j] - L4[j - 1])'], {}), '(L4[j] - L4[j - 1])\n', (5664, 5683), True, 'import numpy as np\n')]
|
from copy import deepcopy
import numpy as np
import pybullet as p
import gym
from gym import spaces
from env.robot import Manipulator
from env.work import Work
class Env():
def __init__(self, reward,
step_max_pos = 0.002,
step_max_orn = 0.02,
initial_pos_noise = 0.001,
initial_orn_noise = 0.001,
step_pos_noise = 0.0002,
step_orn_noise = 0.0002):
p.connect(p.GUI)
p.setPhysicsEngineParameter(enableFileCaching=0)
p.setRealTimeSimulation(False)
p.setGravity(0, 0, -9.8)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.setPhysicsEngineParameter(contactBreakingThreshold=0.001)
# Init
self._is_init_env = False
# Plane
self.plane_pos = [0, 0, -0.1]
p.loadURDF("urdf/plane/plane.urdf", self.plane_pos)
self.reward = reward
self.max_initial_pos_noise = initial_pos_noise
self.max_initial_orn_noise = initial_orn_noise
self.max_step_pos_noise = step_pos_noise
self.max_step_orn_noise = step_orn_noise
# robot
self.step_max_pos = step_max_pos
self.step_max_orn = step_max_orn
self.inv_scaled_force_coef = 5000
# for learning
self.action_space = spaces.Box(
low=-1,
high=1,
shape=(6,),
dtype=np.float32
)
self.observation_space = spaces.Box(
low=-1,
high=1,
shape=(12,),
dtype=np.float32
)
self._act_rel_tcp_pose = [0, 0, 0, 0, 0, 0]
def init_env(self, mode = 'rel',
robot_tcp_pose = [0, 0, 0, 0, 0, 0],
robot_base_pose = [0, 0, 0, 0, 0, 0],
robot_tool_pose = [0, 0, 0, 0, 0, 0],
work_base_pose = [0, 0, 0, 0, 0, 0]):
if self._is_init_env == False:
# Load work
self.work = Work(base_pose = work_base_pose)
self.act_abs_work_pose = work_base_pose
# Load robot
self.robot = Manipulator(tool_pose=robot_tool_pose, base_pose=robot_base_pose)
self._reset_robot_pose(mode=mode, tcp_pose=robot_tcp_pose)
self.initial_pos_noise = np.random.uniform(-self.max_initial_pos_noise,
self.max_initial_pos_noise, 3)
self.initial_orn_noise = np.random.uniform(-self.max_initial_orn_noise,
self.max_initial_orn_noise, 3)
self._is_init_env = True
return self.observe_state(mode = mode)
def _reset_robot_pose(self, mode='rel', tcp_pose=[0, 0, 0, 0, 0, 0]):
abs_tcp_pose = np.zeros(6)
if mode == 'rel':
abs_tcp_pose = np.array(self.act_abs_work_pose) + np.array(tcp_pose)
elif mode == 'abs':
abs_tcp_pose = tcp_pose
else:
print("ERROR(enviroment.py): mode is not correct.")
abs_tcp_pose = [0, 0, 0, 0, 0, 0]
self.robot.reset_pose(abs_tcp_pose=abs_tcp_pose)
def reset(self,
mode = 'rel',
tcp_pose = [0, 0, 0, 0, 0, 0],
base_pose = [0, 0, 0, 0, 0, 0],
tool_pose = [0, 0, 0, 0, 0, 0],
work_pose = [0, 0, 0, 0, 0, 0]):
if self._is_init_env == False:
return self.init_env(mode = mode,
robot_tcp_pose = tcp_pose,
robot_base_pose = base_pose,
robot_tool_pose = tool_pose,
work_base_pose = work_pose)
# For 処理の高速化
'''
if np.linalg.norm( np.array(tool_pose) - self.prev_tool_pose ) < 1e-6:
else:
'''
# Reset env
p.resetSimulation()
# Load Plane
p.loadURDF("urdf/plane/plane.urdf", self.plane_pos)
# Reset work
self.work.reset(base_pose = work_pose)
# Reset Robot
self.robot.reset_base(base_pose=base_pose, tool_pose=tool_pose)
self._reset_robot_pose(mode='rel', tcp_pose=tcp_pose)
self.initial_pos_noise = np.random.uniform(-self.max_initial_pos_noise,
self.max_initial_pos_noise, 3)
self.initial_orn_noise = np.random.uniform(-self.max_initial_orn_noise,
self.max_initial_orn_noise, 3)
self.prev_tool_pose = tool_pose
return self.observe_state(mode = mode)
def destory(self):
p.disconnect()
def step(self, action, step):
# ここは指令値生成なので,真値が良い
cmd_abs_tcp_pose = np.zeros(6)
cmd_abs_tcp_pose[:3] = np.array(self._act_abs_tcp_pose[:3]) + np.array(action[:3])
cmd_abs_tcp_pose[3:6] = np.array(self._act_abs_tcp_pose[3:6]) + np.array(action[3:6])
print('next_pose:', cmd_abs_tcp_pose)
self.robot.move_to_pose(cmd_abs_tcp_pose, mode='direct')
pose, force, success, out_range = self.decision()
r = self.calc_reward(relative_pose = pose,
success = success,
out_range = out_range,
act_step = step)
done = success or out_range
return np.concatenate([pose, force]), r, done, success
def decision(self):
'''
observe
act_abs_tcp_pose
act_rel_tcp_pose
act_abs_work_pose
act_force
'''
act_pose_noisy, act_force = self.observe_state(mode='rel')
scaled_act_force = act_force / self.inv_scaled_force_coef
# [Note] ここは真値で評価
success_range_of_pos = 0.003
success_range_of_orn = 0.04
success = (np.linalg.norm(self._act_rel_tcp_pose[:3]) <= success_range_of_pos and \
np.linalg.norm(self._act_rel_tcp_pose[3:]) <= success_range_of_orn)
# [Note] ここは真値で評価は正しくない気がする.
out_range_of_pos = 0.1
out_range_of_orn = 0.8
out_range = any([abs(pos) > out_range_of_pos for pos in act_pose_noisy[:3]]) \
or any([abs(orn) > out_range_of_orn for orn in act_pose_noisy[3:6]])
return act_pose_noisy, scaled_act_force, success, out_range
def observe_state(self, mode='rel'):
self._act_abs_tcp_pose, self.act_force, _ = self.robot.get_state()
self._act_abs_work_pose = self.work.get_state()
self._act_rel_tcp_pose = np.array(self._act_abs_tcp_pose) - np.array(self._act_abs_work_pose)
'''
ノイズ処理
'''
act_rel_tcp_pose_noisy = np.zeros(6)
act_rel_tcp_pose_noisy[:3] = self._act_rel_tcp_pose[:3] + self.initial_pos_noise
act_rel_tcp_pose_noisy[3:6] = self._act_rel_tcp_pose[3:6] + self.initial_orn_noise
act_rel_tcp_pose_noisy[:3] += np.random.uniform(-self.max_step_pos_noise,
self.max_step_pos_noise, 3)
act_rel_tcp_pose_noisy[3:6] += np.random.uniform(-self.max_step_orn_noise,
self.max_step_orn_noise, 3)
if mode == 'rel':
return act_rel_tcp_pose_noisy, self.act_force
elif mode == 'abs':
act_abs_tcp_pose_noisy = np.zeros(6)
act_abs_tcp_pose_noisy[:3] = self._act_abs_tcp_pose[:3] + self.initial_pos_noise
act_abs_tcp_pose_noisy[3:6] = self._act_abs_tcp_pose[3:6] + self.initial_orn_noise
act_abs_work_pose_noisy = np.zeros(6)
act_abs_work_pose_noisy[:3] = self._act_abs_work_pose[:3] + self.initial_pos_noise
act_abs_work_pose_noisy[3:6] = self._act_abs_work_pose[3:6] + self.initial_orn_noise
return act_abs_tcp_pose_noisy, act_abs_work_pose_noisy, self.act_force
def calc_reward(self, relative_pose, success, out_range, act_step):
return self.reward.reward_function(relative_pose, success, out_range, act_step)
def scale_action(self, action):
scaled_action = deepcopy(action)
scaled_action[:3]*=self.step_max_pos
scaled_action[3:]*=self.step_max_orn
return scaled_action
|
[
"env.work.Work",
"pybullet.resetSimulation",
"copy.deepcopy",
"pybullet.connect",
"numpy.linalg.norm",
"pybullet.setGravity",
"gym.spaces.Box",
"numpy.array",
"pybullet.setPhysicsEngineParameter",
"pybullet.configureDebugVisualizer",
"numpy.zeros",
"pybullet.disconnect",
"numpy.concatenate",
"numpy.random.uniform",
"pybullet.setRealTimeSimulation",
"pybullet.loadURDF",
"env.robot.Manipulator"
] |
[((458, 474), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (467, 474), True, 'import pybullet as p\n'), ((483, 531), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'enableFileCaching': '(0)'}), '(enableFileCaching=0)\n', (510, 531), True, 'import pybullet as p\n'), ((540, 570), 'pybullet.setRealTimeSimulation', 'p.setRealTimeSimulation', (['(False)'], {}), '(False)\n', (563, 570), True, 'import pybullet as p\n'), ((579, 603), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-9.8)'], {}), '(0, 0, -9.8)\n', (591, 603), True, 'import pybullet as p\n'), ((612, 659), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_GUI', '(0)'], {}), '(p.COV_ENABLE_GUI, 0)\n', (638, 659), True, 'import pybullet as p\n'), ((668, 727), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'contactBreakingThreshold': '(0.001)'}), '(contactBreakingThreshold=0.001)\n', (695, 727), True, 'import pybullet as p\n'), ((841, 892), 'pybullet.loadURDF', 'p.loadURDF', (['"""urdf/plane/plane.urdf"""', 'self.plane_pos'], {}), "('urdf/plane/plane.urdf', self.plane_pos)\n", (851, 892), True, 'import pybullet as p\n'), ((1324, 1380), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(6,)', 'dtype': 'np.float32'}), '(low=-1, high=1, shape=(6,), dtype=np.float32)\n', (1334, 1380), False, 'from gym import spaces\n'), ((1472, 1529), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(12,)', 'dtype': 'np.float32'}), '(low=-1, high=1, shape=(12,), dtype=np.float32)\n', (1482, 1529), False, 'from gym import spaces\n'), ((2752, 2763), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (2760, 2763), True, 'import numpy as np\n'), ((3800, 3819), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (3817, 3819), True, 'import pybullet as p\n'), ((3849, 3900), 'pybullet.loadURDF', 'p.loadURDF', (['"""urdf/plane/plane.urdf"""', 'self.plane_pos'], {}), "('urdf/plane/plane.urdf', self.plane_pos)\n", (3859, 3900), True, 'import pybullet as p\n'), ((4160, 4237), 'numpy.random.uniform', 'np.random.uniform', (['(-self.max_initial_pos_noise)', 'self.max_initial_pos_noise', '(3)'], {}), '(-self.max_initial_pos_noise, self.max_initial_pos_noise, 3)\n', (4177, 4237), True, 'import numpy as np\n'), ((4323, 4400), 'numpy.random.uniform', 'np.random.uniform', (['(-self.max_initial_orn_noise)', 'self.max_initial_orn_noise', '(3)'], {}), '(-self.max_initial_orn_noise, self.max_initial_orn_noise, 3)\n', (4340, 4400), True, 'import numpy as np\n'), ((4575, 4589), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (4587, 4589), True, 'import pybullet as p\n'), ((4681, 4692), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (4689, 4692), True, 'import numpy as np\n'), ((6605, 6616), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (6613, 6616), True, 'import numpy as np\n'), ((6836, 6907), 'numpy.random.uniform', 'np.random.uniform', (['(-self.max_step_pos_noise)', 'self.max_step_pos_noise', '(3)'], {}), '(-self.max_step_pos_noise, self.max_step_pos_noise, 3)\n', (6853, 6907), True, 'import numpy as np\n'), ((6999, 7070), 'numpy.random.uniform', 'np.random.uniform', (['(-self.max_step_orn_noise)', 'self.max_step_orn_noise', '(3)'], {}), '(-self.max_step_orn_noise, self.max_step_orn_noise, 3)\n', (7016, 7070), True, 'import numpy as np\n'), ((8019, 8035), 'copy.deepcopy', 'deepcopy', (['action'], {}), '(action)\n', (8027, 8035), False, 'from copy import deepcopy\n'), ((1964, 1994), 'env.work.Work', 'Work', ([], {'base_pose': 'work_base_pose'}), '(base_pose=work_base_pose)\n', (1968, 1994), False, 'from env.work import Work\n'), ((2099, 2164), 'env.robot.Manipulator', 'Manipulator', ([], {'tool_pose': 'robot_tool_pose', 'base_pose': 'robot_base_pose'}), '(tool_pose=robot_tool_pose, base_pose=robot_base_pose)\n', (2110, 2164), False, 'from env.robot import Manipulator\n'), ((2273, 2350), 'numpy.random.uniform', 'np.random.uniform', (['(-self.max_initial_pos_noise)', 'self.max_initial_pos_noise', '(3)'], {}), '(-self.max_initial_pos_noise, self.max_initial_pos_noise, 3)\n', (2290, 2350), True, 'import numpy as np\n'), ((2440, 2517), 'numpy.random.uniform', 'np.random.uniform', (['(-self.max_initial_orn_noise)', 'self.max_initial_orn_noise', '(3)'], {}), '(-self.max_initial_orn_noise, self.max_initial_orn_noise, 3)\n', (2457, 2517), True, 'import numpy as np\n'), ((4724, 4760), 'numpy.array', 'np.array', (['self._act_abs_tcp_pose[:3]'], {}), '(self._act_abs_tcp_pose[:3])\n', (4732, 4760), True, 'import numpy as np\n'), ((4763, 4783), 'numpy.array', 'np.array', (['action[:3]'], {}), '(action[:3])\n', (4771, 4783), True, 'import numpy as np\n'), ((4816, 4853), 'numpy.array', 'np.array', (['self._act_abs_tcp_pose[3:6]'], {}), '(self._act_abs_tcp_pose[3:6])\n', (4824, 4853), True, 'import numpy as np\n'), ((4856, 4877), 'numpy.array', 'np.array', (['action[3:6]'], {}), '(action[3:6])\n', (4864, 4877), True, 'import numpy as np\n'), ((5297, 5326), 'numpy.concatenate', 'np.concatenate', (['[pose, force]'], {}), '([pose, force])\n', (5311, 5326), True, 'import numpy as np\n'), ((6465, 6497), 'numpy.array', 'np.array', (['self._act_abs_tcp_pose'], {}), '(self._act_abs_tcp_pose)\n', (6473, 6497), True, 'import numpy as np\n'), ((6500, 6533), 'numpy.array', 'np.array', (['self._act_abs_work_pose'], {}), '(self._act_abs_work_pose)\n', (6508, 6533), True, 'import numpy as np\n'), ((2817, 2849), 'numpy.array', 'np.array', (['self.act_abs_work_pose'], {}), '(self.act_abs_work_pose)\n', (2825, 2849), True, 'import numpy as np\n'), ((2852, 2870), 'numpy.array', 'np.array', (['tcp_pose'], {}), '(tcp_pose)\n', (2860, 2870), True, 'import numpy as np\n'), ((5756, 5798), 'numpy.linalg.norm', 'np.linalg.norm', (['self._act_rel_tcp_pose[:3]'], {}), '(self._act_rel_tcp_pose[:3])\n', (5770, 5798), True, 'import numpy as np\n'), ((5849, 5891), 'numpy.linalg.norm', 'np.linalg.norm', (['self._act_rel_tcp_pose[3:]'], {}), '(self._act_rel_tcp_pose[3:])\n', (5863, 5891), True, 'import numpy as np\n'), ((7272, 7283), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (7280, 7283), True, 'import numpy as np\n'), ((7510, 7521), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (7518, 7521), True, 'import numpy as np\n')]
|
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
nstep=200
nx=400
nv=3
u=np.zeros((nx,nv))
prim=np.zeros((nx,nv))
gam=5./3.
dx=1./nx
dt=1e-3
time=0
x=np.linspace(0,1,num=nx)
def ptou(pri):
u=np.zeros((nx,nv))
rho=pri[:,0]
v=pri[:,1]
prs=pri[:,2]
mom=rho*v
u[:,0]=rho
u[:,1]=mom
u[:,2]=0.5*mom*v+prs/(gam-1)
return(u)
def utop(u):
pri=np.zeros((nx,nv))
rho=u[:,0]
mom=u[:,1]
ene=u[:,2]
vel=mom/(rho+1e-6)
pri[:,0]=rho
pri[:,1]=vel
pri[:,2]=(ene-0.5*mom*vel)*(gam-1)
return(pri)
def getmaxv(pri):
rho=pri[:,0]
vel=pri[:,1]
prs=pri[:,2]
cs=np.sqrt(gam*prs/rho)
return(max(abs(vel)+cs))
def getflux(u):
f=np.zeros((nx,nv))
pri=utop(u)
rho=pri[:,0]
v=pri[:,1]
prs=pri[:,2]
mom=u[:,1]
ene=u[:,2]
f[:,0]=mom
f[:,1]=mom*v+prs
f[:,2]=(ene+prs)*v
return(f)
prim[:,0]=1.
prim[:,1]=0.
prim[:,2]=1.
for i in range(int(nx/2),nx):
prim[i,0]=0.1
prim[i,1]=0.
prim[i,2]=0.125
print (prim[:,2])
u=ptou(prim)
uold=u
pold=prim
fig = plt.figure()
gs = gridspec.GridSpec(nv,1)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[2,0])
ax1.plot(x,prim[:,0],'pres')
ax2.plot(x,prim[:,1],'pres')
ax3.plot(x,prim[:,2],'pres')
fig.show()
for nstep in range(0,nstep):
print (time)
um=np.roll(u, 1,axis=0)
up=np.roll(u,-1,axis=0)
um[0,:] =um[1,:]
up[nx-1,:]=up[nx-2,:]
fm=getflux(um)
fp=getflux(up)
cfl=0.49
dtdx=1./getmaxv(p)
dt=dtdx*dx
time=time+dt
un=0.5*(um+up) - cfl*dtdx* (fp-fm)
u=un
p=utop(u)
plt.close(fig)
fig = plt.figure()
gs = gridspec.GridSpec(nv,1)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[2,0])
ax1.plot(p[:,0])
ax2.plot(p[:,1])
ax3.plot(p[:,2])
fig.show()
|
[
"numpy.sqrt",
"numpy.roll",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec"
] |
[((118, 136), 'numpy.zeros', 'np.zeros', (['(nx, nv)'], {}), '((nx, nv))\n', (126, 136), True, 'import numpy as np\n'), ((141, 159), 'numpy.zeros', 'np.zeros', (['(nx, nv)'], {}), '((nx, nv))\n', (149, 159), True, 'import numpy as np\n'), ((195, 220), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'nx'}), '(0, 1, num=nx)\n', (206, 220), True, 'import numpy as np\n'), ((1105, 1117), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1115, 1117), True, 'import matplotlib.pyplot as plt\n'), ((1123, 1147), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['nv', '(1)'], {}), '(nv, 1)\n', (1140, 1147), False, 'from matplotlib import gridspec\n'), ((240, 258), 'numpy.zeros', 'np.zeros', (['(nx, nv)'], {}), '((nx, nv))\n', (248, 258), True, 'import numpy as np\n'), ((412, 430), 'numpy.zeros', 'np.zeros', (['(nx, nv)'], {}), '((nx, nv))\n', (420, 430), True, 'import numpy as np\n'), ((652, 676), 'numpy.sqrt', 'np.sqrt', (['(gam * prs / rho)'], {}), '(gam * prs / rho)\n', (659, 676), True, 'import numpy as np\n'), ((723, 741), 'numpy.zeros', 'np.zeros', (['(nx, nv)'], {}), '((nx, nv))\n', (731, 741), True, 'import numpy as np\n'), ((1399, 1420), 'numpy.roll', 'np.roll', (['u', '(1)'], {'axis': '(0)'}), '(u, 1, axis=0)\n', (1406, 1420), True, 'import numpy as np\n'), ((1432, 1454), 'numpy.roll', 'np.roll', (['u', '(-1)'], {'axis': '(0)'}), '(u, -1, axis=0)\n', (1439, 1454), True, 'import numpy as np\n'), ((1723, 1737), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1732, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1752, 1764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1762, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1778, 1802), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['nv', '(1)'], {}), '(nv, 1)\n', (1795, 1802), False, 'from matplotlib import gridspec\n')]
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #To suppress warnings thrown by tensorflow
from time import sleep
import numpy as np
from cv2 import cv2
import pyautogui as pg
import Sudoku_Core as SC
import OCR
s = 513//9 #Size of board//9
fs = 25 #Size of the final image
def getBoard():
pg.click(266, 740)
sleep(1)
pg.click(266, 930) #Changing the difficulty to expert
sleep(2)
image = pg.screenshot(region=(10, 187, 513, 513))
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2GRAY)
_,image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
return image
def readBoard(image):
for i in range(9):
for j in range(9):
subImage = image[i*s + 3: (i+1)*s - 3, j*s + 3: (j+1)*s - 3] #(+3, -3) is a hack to remove border contours
contour, _ = cv2.findContours(subImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contour != []:
(x, y, w, h) = cv2.boundingRect(contour[0])
img = cv2.resize(subImage[y: y+h, x: x+w], (fs, fs), interpolation=cv2.INTER_AREA)
else:
img = np.zeros((fs,fs), dtype='uint8')
SC.board[i][j] = OCR.model.predict(img.reshape(1, fs, fs, 1)).argmax()
def outputBoard():
for ((posY, posX), v) in SC.moves.items():
posX = 42 + posX * 57
posY = 216 + posY * 57
pg.moveTo(posX, posY, 0.1)
pg.click()
# vX = 42 + 55*(v-1)
# vY = 843
# pg.moveTo(vX, vY, 0.1) #To use the numpad in the app
# pg.click()
pg.typewrite(str(v)) #To send numbers from the keyboard
def main():
image = getBoard()
readBoard(image)
print('Got the board, now solving')
if SC.solve(0, 0):
outputBoard()
else:
print('Couldn\'t solve')
input('Press any key to exit')
if __name__ == '__main__':
main()
|
[
"cv2.cv2.threshold",
"pyautogui.moveTo",
"pyautogui.screenshot",
"cv2.cv2.findContours",
"numpy.asarray",
"time.sleep",
"pyautogui.click",
"Sudoku_Core.solve",
"cv2.cv2.resize",
"numpy.zeros",
"Sudoku_Core.moves.items",
"cv2.cv2.boundingRect"
] |
[((298, 316), 'pyautogui.click', 'pg.click', (['(266)', '(740)'], {}), '(266, 740)\n', (306, 316), True, 'import pyautogui as pg\n'), ((318, 326), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (323, 326), False, 'from time import sleep\n'), ((328, 346), 'pyautogui.click', 'pg.click', (['(266)', '(930)'], {}), '(266, 930)\n', (336, 346), True, 'import pyautogui as pg\n'), ((383, 391), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (388, 391), False, 'from time import sleep\n'), ((402, 443), 'pyautogui.screenshot', 'pg.screenshot', ([], {'region': '(10, 187, 513, 513)'}), '(region=(10, 187, 513, 513))\n', (415, 443), True, 'import pyautogui as pg\n'), ((516, 569), 'cv2.cv2.threshold', 'cv2.threshold', (['image', '(127)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(image, 127, 255, cv2.THRESH_BINARY_INV)\n', (529, 569), False, 'from cv2 import cv2\n'), ((1180, 1196), 'Sudoku_Core.moves.items', 'SC.moves.items', ([], {}), '()\n', (1194, 1196), True, 'import Sudoku_Core as SC\n'), ((1551, 1565), 'Sudoku_Core.solve', 'SC.solve', (['(0)', '(0)'], {}), '(0, 0)\n', (1559, 1565), True, 'import Sudoku_Core as SC\n'), ((466, 483), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (476, 483), True, 'import numpy as np\n'), ((1249, 1275), 'pyautogui.moveTo', 'pg.moveTo', (['posX', 'posY', '(0.1)'], {}), '(posX, posY, 0.1)\n', (1258, 1275), True, 'import pyautogui as pg\n'), ((1278, 1288), 'pyautogui.click', 'pg.click', ([], {}), '()\n', (1286, 1288), True, 'import pyautogui as pg\n'), ((776, 846), 'cv2.cv2.findContours', 'cv2.findContours', (['subImage', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(subImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (792, 846), False, 'from cv2 import cv2\n'), ((887, 915), 'cv2.cv2.boundingRect', 'cv2.boundingRect', (['contour[0]'], {}), '(contour[0])\n', (903, 915), False, 'from cv2 import cv2\n'), ((926, 1004), 'cv2.cv2.resize', 'cv2.resize', (['subImage[y:y + h, x:x + w]', '(fs, fs)'], {'interpolation': 'cv2.INTER_AREA'}), '(subImage[y:y + h, x:x + w], (fs, fs), interpolation=cv2.INTER_AREA)\n', (936, 1004), False, 'from cv2 import cv2\n'), ((1026, 1059), 'numpy.zeros', 'np.zeros', (['(fs, fs)'], {'dtype': '"""uint8"""'}), "((fs, fs), dtype='uint8')\n", (1034, 1059), True, 'import numpy as np\n')]
|
import argparse
import json
import numpy as np
import os
import torch
import data_
import models
import utils
from matplotlib import cm, pyplot as plt
from tensorboardX import SummaryWriter
from torch import optim
from torch.utils import data
from tqdm import tqdm
from utils import io
parser = argparse.ArgumentParser()
# CUDA
parser.add_argument('--use_gpu', type=bool, default=True, help='Whether to use GPU.')
# data
parser.add_argument('--dataset_name', type=str, default='spirals',
help='Name of dataset to use.')
parser.add_argument('--n_data_points', default=int(1e6),
help='Number of unique data points in training set.')
parser.add_argument('--batch_size', type=int, default=256,
help='Size of batch used for training.')
parser.add_argument('--num_workers', type=int, default=0,
help='Number of workers used in data loaders.')
# MADE
parser.add_argument('--n_residual_blocks_made', default=4,
help='Number of residual blocks in MADE.')
parser.add_argument('--hidden_dim_made', default=256,
help='Dimensionality of hidden layers in MADE.')
parser.add_argument('--activation_made', default='relu',
help='Activation function for MADE.')
parser.add_argument('--use_batch_norm_made', default=False,
help='Whether to use batch norm in MADE.')
parser.add_argument('--dropout_probability_made', default=None,
help='Dropout probability for MADE.')
# energy net
parser.add_argument('--context_dim', default=64,
help='Dimensionality of context vector.')
parser.add_argument('--n_residual_blocks_energy_net', default=4,
help='Number of residual blocks in energy net.')
parser.add_argument('--hidden_dim_energy_net', default=128,
help='Dimensionality of hidden layers in energy net.')
parser.add_argument('--energy_upper_bound', default=0,
help='Max value for output of energy net.')
parser.add_argument('--activation_energy_net', default='relu',
help='Activation function for energy net.')
parser.add_argument('--use_batch_norm_energy_net', default=False,
help='Whether to use batch norm in energy net.')
parser.add_argument('--dropout_probability_energy_net', default=None,
help='Dropout probability for energy net.')
parser.add_argument('--scale_activation', default='softplus',
help='Activation to use for scales in proposal mixture components.')
parser.add_argument('--apply_context_activation', default=False,
help='Whether to apply activation to context vector.')
# proposal
parser.add_argument('--n_mixture_components', default=10,
help='Number of proposal mixture components (per dimension).')
parser.add_argument('--proposal_component', default='gaussian',
help='Type of location-scale family distribution '
'to use in proposal mixture.')
parser.add_argument('--n_proposal_samples_per_input', default=20,
help='Number of proposal samples used to estimate '
'normalizing constant during training.')
parser.add_argument('--n_proposal_samples_per_input_validation', default=100,
help='Number of proposal samples used to estimate '
'normalizing constant during validation.')
parser.add_argument('--mixture_component_min_scale', default=1e-3,
help='Minimum scale for proposal mixture components.')
# optimization
parser.add_argument('--learning_rate', default=5e-4,
help='Learning rate for Adam.')
parser.add_argument('--n_total_steps', default=int(4e5),
help='Number of total training steps.')
parser.add_argument('--alpha_warm_up_steps', default=5000,
help='Number of warm-up steps for AEM density.')
parser.add_argument('--hard_alpha_warm_up', default=True,
help='Whether to use a hard warm up for alpha')
# logging and checkpoints
parser.add_argument('--monitor_interval', default=100,
help='Interval in steps at which to report training stats.')
parser.add_argument('--visualize_interval', default=10000,
help='Interval in steps at which to report training stats.')
parser.add_argument('--save_interval', default=10000,
help='Interval in steps at which to save model.')
# reproducibility
parser.add_argument('--seed', default=1638128,
help='Random seed for PyTorch and NumPy.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.use_gpu and torch.cuda.is_available():
device = torch.device('cuda')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = torch.device('cpu')
# Generate data
train_dataset = data_.load_plane_dataset(args.dataset_name, args.n_data_points)
train_loader = data_.InfiniteLoader(
dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_epochs=None
)
# Generate test grid data
n_points_per_axis = 512
bounds = np.array([
[-4, 4],
[-4, 4]
])
grid_dataset = data_.TestGridDataset(n_points_per_axis=n_points_per_axis, bounds=bounds)
grid_loader = data.DataLoader(
dataset=grid_dataset,
batch_size=1000,
drop_last=False
)
# various dimensions for autoregressive and energy nets
dim = 2 # D
output_dim_multiplier = args.context_dim + 3 * args.n_mixture_components # K + 3M
# Create MADE
made = models.ResidualMADE(
input_dim=dim,
n_residual_blocks=args.n_residual_blocks_made,
hidden_dim=args.hidden_dim_made,
output_dim_multiplier=output_dim_multiplier,
conditional=False,
activation=utils.parse_activation(args.activation_made),
use_batch_norm=args.use_batch_norm_made,
dropout_probability=args.dropout_probability_made
).to(device)
# create energy net
energy_net = models.ResidualEnergyNet(
input_dim=(args.context_dim + 1),
n_residual_blocks=args.n_residual_blocks_energy_net,
hidden_dim=args.hidden_dim_energy_net,
energy_upper_bound=args.energy_upper_bound,
activation=utils.parse_activation(args.activation_energy_net),
use_batch_norm=args.use_batch_norm_energy_net,
dropout_probability=args.dropout_probability_energy_net
).to(device)
# create AEM
aem = models.AEM(
autoregressive_net=made,
energy_net=energy_net,
context_dim=args.context_dim,
n_proposal_mixture_components=args.n_mixture_components,
proposal_component_family=args.proposal_component,
n_proposal_samples_per_input=args.n_proposal_samples_per_input,
mixture_component_min_scale=args.mixture_component_min_scale,
apply_context_activation=args.apply_context_activation
).to(device)
# make optimizer
parameters = list(made.parameters()) + list(energy_net.parameters())
optimizer = optim.Adam(parameters, lr=args.learning_rate)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_total_steps)
# create summary writer and write to log directory
timestamp = io.get_timestamp()
log_dir = os.path.join(io.get_log_root(), args.dataset_name, timestamp)
writer = SummaryWriter(log_dir=log_dir)
filename = os.path.join(log_dir, 'config.json')
with open(filename, 'w') as file:
json.dump(vars(args), file)
# Training loop
tbar = tqdm(range(args.n_total_steps))
alpha = 0
for step in tbar:
aem.train()
scheduler.step(step)
optimizer.zero_grad()
# training step
batch = next(train_loader).to(device)
log_density, log_proposal_density, _, log_normalizer = aem(batch)
mean_log_density = torch.mean(log_density)
mean_log_proposal_density = torch.mean(log_proposal_density)
mean_log_normalizer = torch.mean(log_normalizer)
if args.alpha_warm_up_steps is not None:
if args.hard_alpha_warm_up:
alpha = float(step > args.alpha_warm_up_steps)
else:
alpha = torch.Tensor([min(step / args.alpha_warm_up_steps, 1)])
loss = - (alpha * mean_log_density + mean_log_proposal_density)
else:
loss = - (mean_log_density + mean_log_proposal_density)
loss.backward()
optimizer.step()
if (step + 1) % args.monitor_interval == 0:
s = 'Loss: {:.4f}, log p: {:.4f}, log q: {:.4f}'.format(
loss.item(),
mean_log_density.item(),
mean_log_proposal_density.item()
)
tbar.set_description(s)
# write summaries
summaries = {
'loss': loss.detach(),
'log-prob-aem': mean_log_density.detach(),
'log-prob-proposal': mean_log_proposal_density.detach(),
'log-normalizer': mean_log_normalizer.detach(),
'learning-rate': torch.Tensor(scheduler.get_lr()),
}
for summary, value in summaries.items():
writer.add_scalar(tag=summary, scalar_value=value, global_step=step)
if (step + 1) % args.visualize_interval == 0:
# Plotting
aem.eval()
aem.set_n_proposal_samples_per_input_validation(
args.n_proposal_samples_per_input_validation)
log_density_np = []
log_proposal_density_np = []
for batch in grid_loader:
batch = batch.to(device)
log_density, log_proposal_density, unnormalized_log_density, log_normalizer = aem(
batch)
log_density_np = np.concatenate((
log_density_np, utils.tensor2numpy(log_density)
))
log_proposal_density_np = np.concatenate((
log_proposal_density_np, utils.tensor2numpy(log_proposal_density)
))
fig, axs = plt.subplots(1, 3, figsize=(7.5, 2.5))
axs[0].hist2d(train_dataset.data[:, 0], train_dataset.data[:, 1],
range=bounds, bins=512, cmap=cm.viridis, rasterized=False)
axs[0].set_xticks([])
axs[0].set_yticks([])
axs[1].pcolormesh(grid_dataset.X, grid_dataset.Y,
np.exp(log_proposal_density_np).reshape(grid_dataset.X.shape))
axs[1].set_xlim(bounds[0])
axs[1].set_ylim(bounds[1])
axs[1].set_xticks([])
axs[1].set_yticks([])
axs[2].pcolormesh(grid_dataset.X, grid_dataset.Y,
np.exp(log_density_np).reshape(grid_dataset.X.shape))
axs[2].set_xlim(bounds[0])
axs[2].set_ylim(bounds[1])
axs[2].set_xticks([])
axs[2].set_yticks([])
plt.tight_layout()
path = os.path.join(io.get_output_root(), 'pytorch', '{}.png'.format(args.dataset_name))
if not os.path.exists(path):
os.makedirs(io.get_output_root())
plt.savefig(path, dpi=300)
writer.add_figure(tag='test-grid', figure=fig, global_step=step)
plt.close()
if (step + 1) % args.save_interval == 0:
path = os.path.join(io.get_checkpoint_root(), 'pytorch', '{}.t'.format(args.dataset_name))
if not os.path.exists(path):
os.makedirs(io.get_checkpoint_root())
torch.save(aem.state_dict(), path)
path = os.path.join(io.get_checkpoint_root(),
'pytorch', '{}-{}.t'.format(args.dataset_name, timestamp))
torch.save(aem.state_dict(), path)
|
[
"numpy.array",
"utils.io.get_checkpoint_root",
"torch.cuda.is_available",
"data_.TestGridDataset",
"os.path.exists",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"torch.mean",
"torch.set_default_tensor_type",
"matplotlib.pyplot.close",
"numpy.exp",
"utils.parse_activation",
"numpy.random.seed",
"matplotlib.pyplot.savefig",
"utils.io.get_output_root",
"utils.io.get_log_root",
"torch.device",
"torch.manual_seed",
"torch.optim.Adam",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"os.path.join",
"models.AEM",
"data_.InfiniteLoader",
"matplotlib.pyplot.tight_layout",
"torch.utils.data.DataLoader",
"utils.tensor2numpy",
"utils.io.get_timestamp",
"matplotlib.pyplot.subplots",
"data_.load_plane_dataset"
] |
[((299, 324), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (322, 324), False, 'import argparse\n'), ((4721, 4749), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4738, 4749), False, 'import torch\n'), ((4750, 4775), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4764, 4775), True, 'import numpy as np\n'), ((4990, 5053), 'data_.load_plane_dataset', 'data_.load_plane_dataset', (['args.dataset_name', 'args.n_data_points'], {}), '(args.dataset_name, args.n_data_points)\n', (5014, 5053), False, 'import data_\n'), ((5069, 5191), 'data_.InfiniteLoader', 'data_.InfiniteLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'args.batch_size', 'shuffle': '(True)', 'drop_last': '(True)', 'num_epochs': 'None'}), '(dataset=train_dataset, batch_size=args.batch_size,\n shuffle=True, drop_last=True, num_epochs=None)\n', (5089, 5191), False, 'import data_\n'), ((5270, 5298), 'numpy.array', 'np.array', (['[[-4, 4], [-4, 4]]'], {}), '([[-4, 4], [-4, 4]])\n', (5278, 5298), True, 'import numpy as np\n'), ((5324, 5397), 'data_.TestGridDataset', 'data_.TestGridDataset', ([], {'n_points_per_axis': 'n_points_per_axis', 'bounds': 'bounds'}), '(n_points_per_axis=n_points_per_axis, bounds=bounds)\n', (5345, 5397), False, 'import data_\n'), ((5412, 5483), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'grid_dataset', 'batch_size': '(1000)', 'drop_last': '(False)'}), '(dataset=grid_dataset, batch_size=1000, drop_last=False)\n', (5427, 5483), False, 'from torch.utils import data\n'), ((7026, 7071), 'torch.optim.Adam', 'optim.Adam', (['parameters'], {'lr': 'args.learning_rate'}), '(parameters, lr=args.learning_rate)\n', (7036, 7071), False, 'from torch import optim\n'), ((7084, 7151), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'optim.lr_scheduler.CosineAnnealingLR', (['optimizer', 'args.n_total_steps'], {}), '(optimizer, args.n_total_steps)\n', (7120, 7151), False, 'from torch import optim\n'), ((7216, 7234), 'utils.io.get_timestamp', 'io.get_timestamp', ([], {}), '()\n', (7232, 7234), False, 'from utils import io\n'), ((7316, 7346), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'log_dir'}), '(log_dir=log_dir)\n', (7329, 7346), False, 'from tensorboardX import SummaryWriter\n'), ((7358, 7394), 'os.path.join', 'os.path.join', (['log_dir', '"""config.json"""'], {}), "(log_dir, 'config.json')\n", (7370, 7394), False, 'import os\n'), ((4797, 4822), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4820, 4822), False, 'import torch\n'), ((4837, 4857), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4849, 4857), False, 'import torch\n'), ((4862, 4917), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.cuda.FloatTensor"""'], {}), "('torch.cuda.FloatTensor')\n", (4891, 4917), False, 'import torch\n'), ((4937, 4956), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4949, 4956), False, 'import torch\n'), ((7258, 7275), 'utils.io.get_log_root', 'io.get_log_root', ([], {}), '()\n', (7273, 7275), False, 'from utils import io\n'), ((7768, 7791), 'torch.mean', 'torch.mean', (['log_density'], {}), '(log_density)\n', (7778, 7791), False, 'import torch\n'), ((7824, 7856), 'torch.mean', 'torch.mean', (['log_proposal_density'], {}), '(log_proposal_density)\n', (7834, 7856), False, 'import torch\n'), ((7883, 7909), 'torch.mean', 'torch.mean', (['log_normalizer'], {}), '(log_normalizer)\n', (7893, 7909), False, 'import torch\n'), ((11252, 11276), 'utils.io.get_checkpoint_root', 'io.get_checkpoint_root', ([], {}), '()\n', (11274, 11276), False, 'from utils import io\n'), ((6503, 6902), 'models.AEM', 'models.AEM', ([], {'autoregressive_net': 'made', 'energy_net': 'energy_net', 'context_dim': 'args.context_dim', 'n_proposal_mixture_components': 'args.n_mixture_components', 'proposal_component_family': 'args.proposal_component', 'n_proposal_samples_per_input': 'args.n_proposal_samples_per_input', 'mixture_component_min_scale': 'args.mixture_component_min_scale', 'apply_context_activation': 'args.apply_context_activation'}), '(autoregressive_net=made, energy_net=energy_net, context_dim=args\n .context_dim, n_proposal_mixture_components=args.n_mixture_components,\n proposal_component_family=args.proposal_component,\n n_proposal_samples_per_input=args.n_proposal_samples_per_input,\n mixture_component_min_scale=args.mixture_component_min_scale,\n apply_context_activation=args.apply_context_activation)\n', (6513, 6902), False, 'import models\n'), ((9817, 9855), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(7.5, 2.5)'}), '(1, 3, figsize=(7.5, 2.5))\n', (9829, 9855), True, 'from matplotlib import cm, pyplot as plt\n'), ((10628, 10646), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10644, 10646), True, 'from matplotlib import cm, pyplot as plt\n'), ((10836, 10862), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'dpi': '(300)'}), '(path, dpi=300)\n', (10847, 10862), True, 'from matplotlib import cm, pyplot as plt\n'), ((10944, 10955), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10953, 10955), True, 'from matplotlib import cm, pyplot as plt\n'), ((10676, 10696), 'utils.io.get_output_root', 'io.get_output_root', ([], {}), '()\n', (10694, 10696), False, 'from utils import io\n'), ((10760, 10780), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (10774, 10780), False, 'import os\n'), ((11030, 11054), 'utils.io.get_checkpoint_root', 'io.get_checkpoint_root', ([], {}), '()\n', (11052, 11054), False, 'from utils import io\n'), ((11116, 11136), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (11130, 11136), False, 'import os\n'), ((5888, 5932), 'utils.parse_activation', 'utils.parse_activation', (['args.activation_made'], {}), '(args.activation_made)\n', (5910, 5932), False, 'import utils\n'), ((6307, 6357), 'utils.parse_activation', 'utils.parse_activation', (['args.activation_energy_net'], {}), '(args.activation_energy_net)\n', (6329, 6357), False, 'import utils\n'), ((10806, 10826), 'utils.io.get_output_root', 'io.get_output_root', ([], {}), '()\n', (10824, 10826), False, 'from utils import io\n'), ((11162, 11186), 'utils.io.get_checkpoint_root', 'io.get_checkpoint_root', ([], {}), '()\n', (11184, 11186), False, 'from utils import io\n'), ((9598, 9629), 'utils.tensor2numpy', 'utils.tensor2numpy', (['log_density'], {}), '(log_density)\n', (9616, 9629), False, 'import utils\n'), ((9741, 9781), 'utils.tensor2numpy', 'utils.tensor2numpy', (['log_proposal_density'], {}), '(log_proposal_density)\n', (9759, 9781), False, 'import utils\n'), ((10157, 10188), 'numpy.exp', 'np.exp', (['log_proposal_density_np'], {}), '(log_proposal_density_np)\n', (10163, 10188), True, 'import numpy as np\n'), ((10435, 10457), 'numpy.exp', 'np.exp', (['log_density_np'], {}), '(log_density_np)\n', (10441, 10457), True, 'import numpy as np\n')]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
'''Analysis file.'''
import sys
import os.path
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
import cPickle as pickle
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pylab
import matplotlib.pyplot as plt
import numpy as np, h5py
import scipy.io as sio
from scipy import ndimage
import random
import re # regular expression matching
FLAGS = flags.FLAGS
flags.DEFINE_string('folder_name', 'experiment4', 'folder where to store all the data')
flags.DEFINE_string('save_location',
'/home/bhaishahster/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/bhaishahster/data_breakdown/',
'where to take data from?')
flags.DEFINE_integer('n_b_in_c', 10, 'number of batches in one chunk of data')
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
flags.DEFINE_integer('ratio_SU', 2, 'ratio of subunits/cells')
flags.DEFINE_string('model_id', 'poisson', 'which model to fit')
FLAGS = flags.FLAGS
def main(argv):
print('\nCode started')
np.random.seed(FLAGS.np_randseed)
random.seed(FLAGS.randseed)
## Load data summary
filename = FLAGS.data_location + 'data_details.mat'
summary_file = gfile.Open(filename, 'r')
data_summary = sio.loadmat(summary_file)
cells = np.squeeze(data_summary['cells'])
if FLAGS.model_id == 'poisson' or FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge':
cells_choose = (cells ==3287) | (cells ==3318 ) | (cells ==3155) | (cells ==3066)
if FLAGS.model_id == 'poisson_full':
cells_choose = np.array(np.ones(np.shape(cells)), dtype='bool')
n_cells = np.sum(cells_choose)
tot_spks = np.squeeze(data_summary['tot_spks'])
total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T
tot_spks_chosen_cells = tot_spks[cells_choose]
chosen_mask = np.array(np.sum(total_mask[cells_choose,:],0)>0, dtype='bool')
print(np.shape(chosen_mask))
print(np.sum(chosen_mask))
stim_dim = np.sum(chosen_mask)
print('\ndataset summary loaded')
# use stim_dim, chosen_mask, cells_choose, tot_spks_chosen_cells, n_cells
# decide the number of subunits to fit
n_su = FLAGS.ratio_SU*n_cells
#batchsz = [100, 500, 1000, 100, 500, 1000, 100, 500, 1000, 1000, 1000, 5000, 10000, 5000, 10000]
#n_b_in_c = [10, 2, 1, 10, 2, 1, 10, 2, 1, 1, 1, 1, 1, 1, 1 ]
#step_sz = [0.0001, 0.0001, 0.0001, 0.01, 0.01, 0.01 , 1, 1, 1, 10, 100, 10, 10, 1, 1 ]
batchsz = [100, 500, 1000, 5000, 1000, 100, 500, 1000, 5000, 10000, 100, 500, 1000, 5000, 10000, 100, 500, 1000, 5000, 10000]
n_b_in_c = [10, 2, 1, 1, 1, 10, 2, 1, 1, 1, 10, 2, 1, 1, 1, 10, 2, 1, 1, 1 ]
step_sz = [0.1, 0.1, 0.1, 0.1, 0.1, 1 , 1, 1, 1, 1, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10 ]
with tf.Session() as sess:
# Learn population model!
stim = tf.placeholder(tf.float32, shape=[None, stim_dim], name='stim')
resp = tf.placeholder(tf.float32, name='resp')
data_len = tf.placeholder(tf.float32, name='data_len')
# get filename
if FLAGS.model_id == 'poisson' or FLAGS.model_id == 'poisson_full':
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su), dtype='float32'))
a = tf.Variable(np.array(0.1 * np.random.rand(n_cells, 1, n_su), dtype='float32'))
if FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge':
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su), dtype='float32'))
a = tf.Variable(np.array(0.01 * np.random.rand(n_su, n_cells), dtype='float32'))
b_init = np.random.randn(n_cells) #np.log((np.sum(response,0))/(response.shape[0]-np.sum(response,0)))
b = tf.Variable(b_init,dtype='float32')
plt.figure()
for icnt, ibatchsz in enumerate(batchsz):
in_b_in_c = n_b_in_c[icnt]
istep_sz = np.array(step_sz[icnt],dtype='double')
print(icnt)
if FLAGS.model_id == 'poisson':
short_filename = ('data_model=ASM_pop_batch_sz='+ str(ibatchsz) + '_n_b_in_c' + str(in_b_in_c) +
'_step_sz'+ str(istep_sz)+'_bg')
else:
short_filename = ('data_model='+ str(FLAGS.model_id) +'_batch_sz='+ str(ibatchsz) + '_n_b_in_c' + str(in_b_in_c) +
'_step_sz'+ str(istep_sz)+'_bg')
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
save_location = parent_folder +short_filename + '/'
print(gfile.IsDirectory(save_location))
print(save_location)
save_filename = save_location + short_filename
#determine filelist
file_list = gfile.ListDirectory(save_location)
save_filename = save_location + short_filename
print('\nLoading: ', save_filename)
bin_files = []
meta_files = []
for file_n in file_list:
if re.search(short_filename + '.', file_n):
if re.search('.meta', file_n):
meta_files += [file_n]
else:
bin_files += [file_n]
#print(bin_files)
print(len(meta_files), len(bin_files), len(file_list))
# get latest iteration
iterations = np.array([])
for file_name in bin_files:
try:
iterations = np.append(iterations, int(file_name.split('/')[-1].split('-')[-1]))
except:
print('Could not load filename: ' + file_name)
iterations.sort()
print(iterations)
iter_plot = iterations[-1]
print(int(iter_plot))
# load tensorflow variables
saver_var = tf.train.Saver(tf.all_variables())
restore_file = save_filename + '-' + str(int(iter_plot))
saver_var.restore(sess, restore_file)
a_eval = a.eval()
print(np.exp(np.squeeze(a_eval)))
#print(np.shape(a_eval))
# get 2D region to plot
mask2D = np.reshape(chosen_mask, [40, 80])
nz_idx = np.nonzero(mask2D)
np.shape(nz_idx)
print(nz_idx)
ylim = np.array([np.min(nz_idx[0])-1, np.max(nz_idx[0])+1])
xlim = np.array([np.min(nz_idx[1])-1, np.max(nz_idx[1])+1])
w_eval = w.eval()
#plt.figure()
n_su = w_eval.shape[1]
for isu in np.arange(n_su):
xx = np.zeros((3200))
xx[chosen_mask] = w_eval[:, isu]
fig = plt.subplot(20, n_su, n_su * icnt + isu+1)
plt.imshow(np.reshape(xx, [40, 80]), interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
#if FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge':
# plt.title(str(a_eval[isu, :]))
#else:
# plt.title(str(np.squeeze(np.exp(a_eval[:, 0, isu]))), fontsize=12)
if isu == 4:
plt.title('Iteration:' + str(int(iter_plot)) + ' batchSz:' + str(ibatchsz) + ' step size:' + str(istep_sz), fontsize=18)
plt.show()
plt.draw()
if __name__ == '__main__':
app.run()
|
[
"absl.gfile.Open",
"numpy.random.rand",
"scipy.io.loadmat",
"numpy.array",
"numpy.arange",
"re.search",
"numpy.reshape",
"tensorflow.Session",
"tensorflow.placeholder",
"absl.gfile.IsDirectory",
"absl.app.run",
"numpy.max",
"numpy.random.seed",
"numpy.min",
"matplotlib.pyplot.ylim",
"tensorflow.all_variables",
"tensorflow.Variable",
"matplotlib.use",
"numpy.squeeze",
"numpy.nonzero",
"numpy.shape",
"matplotlib.pyplot.draw",
"numpy.random.randn",
"absl.flags.DEFINE_string",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"absl.flags.DEFINE_integer",
"random.seed",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"absl.gfile.ListDirectory",
"matplotlib.pyplot.subplot"
] |
[((836, 859), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (850, 859), False, 'import matplotlib\n'), ((1072, 1163), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""folder_name"""', '"""experiment4"""', '"""folder where to store all the data"""'], {}), "('folder_name', 'experiment4',\n 'folder where to store all the data')\n", (1091, 1163), False, 'from absl import flags\n'), ((1161, 1260), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""save_location"""', '"""/home/bhaishahster/"""', '"""where to store logs and outputs?"""'], {}), "('save_location', '/home/bhaishahster/',\n 'where to store logs and outputs?')\n", (1180, 1260), False, 'from absl import flags\n'), ((1299, 1405), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data_location"""', '"""/home/bhaishahster/data_breakdown/"""', '"""where to take data from?"""'], {}), "('data_location', '/home/bhaishahster/data_breakdown/',\n 'where to take data from?')\n", (1318, 1405), False, 'from absl import flags\n'), ((1442, 1520), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""n_b_in_c"""', '(10)', '"""number of batches in one chunk of data"""'], {}), "('n_b_in_c', 10, 'number of batches in one chunk of data')\n", (1462, 1520), False, 'from absl import flags\n'), ((1521, 1578), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""np_randseed"""', '(23)', '"""numpy RNG seed"""'], {}), "('np_randseed', 23, 'numpy RNG seed')\n", (1541, 1578), False, 'from absl import flags\n'), ((1579, 1634), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""randseed"""', '(65)', '"""python RNG seed"""'], {}), "('randseed', 65, 'python RNG seed')\n", (1599, 1634), False, 'from absl import flags\n'), ((1635, 1697), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ratio_SU"""', '(2)', '"""ratio of subunits/cells"""'], {}), "('ratio_SU', 2, 'ratio of subunits/cells')\n", (1655, 1697), False, 'from absl import flags\n'), ((1698, 1762), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model_id"""', '"""poisson"""', '"""which model to fit"""'], {}), "('model_id', 'poisson', 'which model to fit')\n", (1717, 1762), False, 'from absl import flags\n'), ((1829, 1862), 'numpy.random.seed', 'np.random.seed', (['FLAGS.np_randseed'], {}), '(FLAGS.np_randseed)\n', (1843, 1862), True, 'import numpy as np, h5py\n'), ((1865, 1892), 'random.seed', 'random.seed', (['FLAGS.randseed'], {}), '(FLAGS.randseed)\n', (1876, 1892), False, 'import random\n'), ((1989, 2014), 'absl.gfile.Open', 'gfile.Open', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1999, 2014), False, 'from absl import gfile\n'), ((2032, 2057), 'scipy.io.loadmat', 'sio.loadmat', (['summary_file'], {}), '(summary_file)\n', (2043, 2057), True, 'import scipy.io as sio\n'), ((2068, 2101), 'numpy.squeeze', 'np.squeeze', (["data_summary['cells']"], {}), "(data_summary['cells'])\n", (2078, 2101), True, 'import numpy as np, h5py\n'), ((2402, 2422), 'numpy.sum', 'np.sum', (['cells_choose'], {}), '(cells_choose)\n', (2408, 2422), True, 'import numpy as np, h5py\n'), ((2437, 2473), 'numpy.squeeze', 'np.squeeze', (["data_summary['tot_spks']"], {}), "(data_summary['tot_spks'])\n", (2447, 2473), True, 'import numpy as np, h5py\n'), ((2741, 2760), 'numpy.sum', 'np.sum', (['chosen_mask'], {}), '(chosen_mask)\n', (2747, 2760), True, 'import numpy as np, h5py\n'), ((7693, 7703), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7701, 7703), True, 'import matplotlib.pyplot as plt\n'), ((7706, 7716), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (7714, 7716), True, 'import matplotlib.pyplot as plt\n'), ((7756, 7765), 'absl.app.run', 'app.run', ([], {}), '()\n', (7763, 7765), False, 'from absl import app\n'), ((2489, 2536), 'numpy.squeeze', 'np.squeeze', (["data_summary['totalMaskAccept_log']"], {}), "(data_summary['totalMaskAccept_log'])\n", (2499, 2536), True, 'import numpy as np, h5py\n'), ((2675, 2696), 'numpy.shape', 'np.shape', (['chosen_mask'], {}), '(chosen_mask)\n', (2683, 2696), True, 'import numpy as np, h5py\n'), ((2706, 2725), 'numpy.sum', 'np.sum', (['chosen_mask'], {}), '(chosen_mask)\n', (2712, 2725), True, 'import numpy as np, h5py\n'), ((3709, 3721), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3719, 3721), True, 'import tensorflow as tf\n'), ((3772, 3835), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, stim_dim]', 'name': '"""stim"""'}), "(tf.float32, shape=[None, stim_dim], name='stim')\n", (3786, 3835), True, 'import tensorflow as tf\n'), ((3847, 3886), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""resp"""'}), "(tf.float32, name='resp')\n", (3861, 3886), True, 'import tensorflow as tf\n'), ((3902, 3945), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""data_len"""'}), "(tf.float32, name='data_len')\n", (3916, 3945), True, 'import tensorflow as tf\n'), ((4619, 4631), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4629, 4631), True, 'import matplotlib.pyplot as plt\n'), ((2613, 2651), 'numpy.sum', 'np.sum', (['total_mask[cells_choose, :]', '(0)'], {}), '(total_mask[cells_choose, :], 0)\n', (2619, 2651), True, 'import numpy as np, h5py\n'), ((4474, 4498), 'numpy.random.randn', 'np.random.randn', (['n_cells'], {}), '(n_cells)\n', (4489, 4498), True, 'import numpy as np, h5py\n'), ((4578, 4614), 'tensorflow.Variable', 'tf.Variable', (['b_init'], {'dtype': '"""float32"""'}), "(b_init, dtype='float32')\n", (4589, 4614), True, 'import tensorflow as tf\n'), ((4728, 4767), 'numpy.array', 'np.array', (['step_sz[icnt]'], {'dtype': '"""double"""'}), "(step_sz[icnt], dtype='double')\n", (4736, 4767), True, 'import numpy as np, h5py\n'), ((5443, 5477), 'absl.gfile.ListDirectory', 'gfile.ListDirectory', (['save_location'], {}), '(save_location)\n', (5462, 5477), False, 'from absl import gfile\n'), ((5955, 5967), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5963, 5967), True, 'import numpy as np, h5py\n'), ((6628, 6661), 'numpy.reshape', 'np.reshape', (['chosen_mask', '[40, 80]'], {}), '(chosen_mask, [40, 80])\n', (6638, 6661), True, 'import numpy as np, h5py\n'), ((6677, 6695), 'numpy.nonzero', 'np.nonzero', (['mask2D'], {}), '(mask2D)\n', (6687, 6695), True, 'import numpy as np, h5py\n'), ((6702, 6718), 'numpy.shape', 'np.shape', (['nz_idx'], {}), '(nz_idx)\n', (6710, 6718), True, 'import numpy as np, h5py\n'), ((6962, 6977), 'numpy.arange', 'np.arange', (['n_su'], {}), '(n_su)\n', (6971, 6977), True, 'import numpy as np, h5py\n'), ((2358, 2373), 'numpy.shape', 'np.shape', (['cells'], {}), '(cells)\n', (2366, 2373), True, 'import numpy as np, h5py\n'), ((5284, 5316), 'absl.gfile.IsDirectory', 'gfile.IsDirectory', (['save_location'], {}), '(save_location)\n', (5301, 5316), False, 'from absl import gfile\n'), ((5658, 5697), 're.search', 're.search', (["(short_filename + '.')", 'file_n'], {}), "(short_filename + '.', file_n)\n", (5667, 5697), False, 'import re\n'), ((6357, 6375), 'tensorflow.all_variables', 'tf.all_variables', ([], {}), '()\n', (6373, 6375), True, 'import tensorflow as tf\n'), ((6992, 7006), 'numpy.zeros', 'np.zeros', (['(3200)'], {}), '(3200)\n', (7000, 7006), True, 'import numpy as np, h5py\n'), ((7064, 7108), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', 'n_su', '(n_su * icnt + isu + 1)'], {}), '(20, n_su, n_su * icnt + isu + 1)\n', (7075, 7108), True, 'import matplotlib.pyplot as plt\n'), ((7199, 7213), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (7207, 7213), True, 'import matplotlib.pyplot as plt\n'), ((7222, 7236), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (7230, 7236), True, 'import matplotlib.pyplot as plt\n'), ((5712, 5738), 're.search', 're.search', (['""".meta"""', 'file_n'], {}), "('.meta', file_n)\n", (5721, 5738), False, 'import re\n'), ((6530, 6548), 'numpy.squeeze', 'np.squeeze', (['a_eval'], {}), '(a_eval)\n', (6540, 6548), True, 'import numpy as np, h5py\n'), ((7127, 7151), 'numpy.reshape', 'np.reshape', (['xx', '[40, 80]'], {}), '(xx, [40, 80])\n', (7137, 7151), True, 'import numpy as np, h5py\n'), ((4076, 4107), 'numpy.random.randn', 'np.random.randn', (['stim_dim', 'n_su'], {}), '(stim_dim, n_su)\n', (4091, 4107), True, 'import numpy as np, h5py\n'), ((4164, 4196), 'numpy.random.rand', 'np.random.rand', (['n_cells', '(1)', 'n_su'], {}), '(n_cells, 1, n_su)\n', (4178, 4196), True, 'import numpy as np, h5py\n'), ((4320, 4351), 'numpy.random.randn', 'np.random.randn', (['stim_dim', 'n_su'], {}), '(stim_dim, n_su)\n', (4335, 4351), True, 'import numpy as np, h5py\n'), ((4409, 4438), 'numpy.random.rand', 'np.random.rand', (['n_su', 'n_cells'], {}), '(n_su, n_cells)\n', (4423, 4438), True, 'import numpy as np, h5py\n'), ((6762, 6779), 'numpy.min', 'np.min', (['nz_idx[0]'], {}), '(nz_idx[0])\n', (6768, 6779), True, 'import numpy as np, h5py\n'), ((6783, 6800), 'numpy.max', 'np.max', (['nz_idx[0]'], {}), '(nz_idx[0])\n', (6789, 6800), True, 'import numpy as np, h5py\n'), ((6828, 6845), 'numpy.min', 'np.min', (['nz_idx[1]'], {}), '(nz_idx[1])\n', (6834, 6845), True, 'import numpy as np, h5py\n'), ((6849, 6866), 'numpy.max', 'np.max', (['nz_idx[1]'], {}), '(nz_idx[1])\n', (6855, 6866), True, 'import numpy as np, h5py\n')]
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer model components."""
from typing import Optional
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class CausalSelfAttention(hk.MultiHeadAttention):
"""Self attention with a causal mask applied."""
def __call__(
self,
query: jnp.ndarray,
key: Optional[jnp.ndarray] = None,
value: Optional[jnp.ndarray] = None,
mask: Optional[jnp.ndarray] = None,
) -> jnp.ndarray:
key = key if key is not None else query
value = value if value is not None else query
if query.ndim != 3:
raise ValueError('Expect queries of shape [B, T, D].')
seq_len = query.shape[1]
causal_mask = np.tril(np.ones((seq_len, seq_len)))
mask = mask * causal_mask if mask is not None else causal_mask
return super().__call__(query, key, value, mask)
class DenseBlock(hk.Module):
"""A 2-layer MLP which widens then narrows the input."""
def __init__(self,
init_scale: float,
widening_factor: int = 4,
name: Optional[str] = None):
super().__init__(name=name)
self._init_scale = init_scale
self._widening_factor = widening_factor
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
hiddens = x.shape[-1]
initializer = hk.initializers.VarianceScaling(self._init_scale)
x = hk.Linear(self._widening_factor * hiddens, w_init=initializer)(x)
x = jax.nn.gelu(x)
return hk.Linear(hiddens, w_init=initializer)(x)
class Transformer(hk.Module):
"""A transformer stack."""
def __init__(self,
num_heads: int,
num_layers: int,
dropout_rate: float,
name: Optional[str] = None):
super().__init__(name=name)
self._num_layers = num_layers
self._num_heads = num_heads
self._dropout_rate = dropout_rate
def __call__(self,
h: jnp.ndarray,
mask: Optional[jnp.ndarray],
is_training: bool) -> jnp.ndarray:
"""Connects the transformer.
Args:
h: Inputs, [B, T, D].
mask: Padding mask, [B, T].
is_training: Whether we're training or not.
Returns:
Array of shape [B, T, D].
"""
init_scale = 2. / self._num_layers
dropout_rate = self._dropout_rate if is_training else 0.
if mask is not None:
mask = mask[:, None, None, :]
# Note: names chosen to approximately match those used in the GPT-2 code;
# see https://github.com/openai/gpt-2/blob/master/src/model.py.
for i in range(self._num_layers):
h_norm = layer_norm(h, name=f'h{i}_ln_1')
h_attn = CausalSelfAttention(
num_heads=self._num_heads,
key_size=32,
w_init_scale=init_scale,
name=f'h{i}_attn')(h_norm, mask=mask)
h_attn = hk.dropout(hk.next_rng_key(), dropout_rate, h_attn)
h = h + h_attn
h_norm = layer_norm(h, name=f'h{i}_ln_2')
h_dense = DenseBlock(init_scale, name=f'h{i}_mlp')(h_norm)
h_dense = hk.dropout(hk.next_rng_key(), dropout_rate, h_dense)
h = h + h_dense
h = layer_norm(h, name='ln_f')
return h
def layer_norm(x: jnp.ndarray, name: Optional[str] = None) -> jnp.ndarray:
"""Apply a unique LayerNorm to x with default settings."""
return hk.LayerNorm(axis=-1,
create_scale=True,
create_offset=True,
name=name)(x)
|
[
"numpy.ones",
"jax.nn.gelu",
"haiku.initializers.VarianceScaling",
"haiku.next_rng_key",
"haiku.LayerNorm",
"haiku.Linear"
] |
[((1964, 2013), 'haiku.initializers.VarianceScaling', 'hk.initializers.VarianceScaling', (['self._init_scale'], {}), '(self._init_scale)\n', (1995, 2013), True, 'import haiku as hk\n'), ((2096, 2110), 'jax.nn.gelu', 'jax.nn.gelu', (['x'], {}), '(x)\n', (2107, 2110), False, 'import jax\n'), ((3945, 4016), 'haiku.LayerNorm', 'hk.LayerNorm', ([], {'axis': '(-1)', 'create_scale': '(True)', 'create_offset': '(True)', 'name': 'name'}), '(axis=-1, create_scale=True, create_offset=True, name=name)\n', (3957, 4016), True, 'import haiku as hk\n'), ((1375, 1402), 'numpy.ones', 'np.ones', (['(seq_len, seq_len)'], {}), '((seq_len, seq_len))\n', (1382, 1402), True, 'import numpy as np\n'), ((2022, 2084), 'haiku.Linear', 'hk.Linear', (['(self._widening_factor * hiddens)'], {'w_init': 'initializer'}), '(self._widening_factor * hiddens, w_init=initializer)\n', (2031, 2084), True, 'import haiku as hk\n'), ((2122, 2160), 'haiku.Linear', 'hk.Linear', (['hiddens'], {'w_init': 'initializer'}), '(hiddens, w_init=initializer)\n', (2131, 2160), True, 'import haiku as hk\n'), ((3483, 3500), 'haiku.next_rng_key', 'hk.next_rng_key', ([], {}), '()\n', (3498, 3500), True, 'import haiku as hk\n'), ((3685, 3702), 'haiku.next_rng_key', 'hk.next_rng_key', ([], {}), '()\n', (3700, 3702), True, 'import haiku as hk\n')]
|
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import time
import importlib
import multiprocessing
import cv2
import fire
import logzero
from logzero import logger
import numpy as np
from rmexp import config, cvutils, dbutils, gabriel_pb2, client
from rmexp.schema import models
logzero.formatter(logging.Formatter(
fmt='%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s', datefmt='%H:%M:%S'))
logzero.loglevel(logging.DEBUG)
def work_loop(job_queue, app, busy_wait=None):
"""[summary]
Arguments:
job_queue {[type]} -- [description]
app {[type]} -- [description]
Keyword Arguments:
busy_wait {float} -- if not None, busy spin seconds instead of running actual app (default: {None})
"""
handler = importlib.import_module(app).Handler()
while True:
get_ts = time.time()
msg = job_queue.get()[0]
get_wait = time.time() - get_ts
if get_wait > 2e-3:
logger.warn("[pid {}] took {} ms to get a new request. Maybe waiting".format(
os.getpid(), int(1000 * get_wait)))
arrival_ts = time.time()
gabriel_msg = gabriel_pb2.Message()
gabriel_msg.ParseFromString(msg)
encoded_im, ts = gabriel_msg.data, gabriel_msg.timestamp
logger.debug("[pid {}] about to process frame {}".format(
os.getpid(), gabriel_msg.index))
cts = time.clock()
if not busy_wait:
# do real work
encoded_im_np = np.frombuffer(encoded_im, dtype=np.uint8)
img = cv2.imdecode(encoded_im_np, cv2.CV_LOAD_IMAGE_UNCHANGED)
result = handler.process(img)
else:
# busy wait fixed time
tic = time.time()
while True:
if time.time() - tic > busy_wait:
break
result = 'busy wait {}'.format(busy_wait)
finished_ts = time.time()
time_lapse = (finished_ts - ts) * 1000
cpu_proc_ms = round((time.clock() - cts) * 1000)
if gabriel_msg.reply:
reply = gabriel_pb2.Message()
reply.data = str(result)
reply.timestamp = gabriel_msg.timestamp
reply.index = gabriel_msg.index
reply.finished_ts = finished_ts
reply.arrival_ts = arrival_ts
reply.cpu_proc_ms = cpu_proc_ms
job_queue.put([reply.SerializeToString(), ])
logger.debug('[pid {}] takes {} ms (cpu: {} ms) for frame {}: {}.'.format(
os.getpid(), (time.time() - ts) * 1000, cpu_proc_ms, gabriel_msg.index, result))
class Sampler(object):
"""A Class to sample video stream. Designed to work with cam.read().
Sample once every sample_period calls
"""
def __init__(self, sample_period, sample_func=None):
super(Sampler, self).__init__()
self._sp = sample_period
assert(type(sample_period) is int and sample_period > 0)
self._sf = sample_func
self._cnt = 0
def sample(self):
while True:
self._cnt = (self._cnt + 1) % self._sp
if self._cnt == 0:
return self._sf()
self._sf()
def process_and_time(img, app_handler):
ts = time.time()
result = app_handler.process(img)
time_lapse = int(round((time.time() - ts) * 1000))
return result, time_lapse
def store(
data,
session,
store_result,
store_latency,
store_profile,
**kwargs):
name, trace, idx, result, time_lapse = data
if store_result:
rec, _ = dbutils.get_or_create(
session,
models.SS,
name=name,
index=idx,
trace=trace)
rec.val = str(result)
if store_latency:
rec, _ = dbutils.get_or_create(
session,
models.LegoLatency,
name=name,
index=idx)
rec.val = int(time_lapse)
if store_profile:
rec = kwargs
rec.update(
{'trace': trace,
'index': idx,
'name': name,
'latency': time_lapse
}
)
dbutils.insert(
session,
models.ResourceLatency,
rec
)
def batch_process(video_uri,
app,
experiment_name,
trace=None,
store_result=False,
store_latency=False,
store_profile=False,
**kwargs):
"""Batch process a video. Able to store both the result and the frame processing latency.
Arguments:
video_uri {string} -- Video URI
app {string} -- Applicaiton name
experiment_name {string} -- Experiment name
Keyword Arguments:
trace {string} -- Trace id
store_result {bool} -- Whether to store result into database
store_result {bool} -- [description] (default: {False})
store_latency {bool} -- [description] (default: {False})
cpu {string} -- No of CPUs used. Used to populate profile database
memory {string} -- No of memory used. Used to populate profile database
num_worker {int} -- No of simultaneous workers. Used to populate profile database
"""
if trace is None:
trace = os.path.basename(os.path.dirname(video_uri))
app = importlib.import_module(app)
app_handler = app.Handler()
vc = client.VideoClient(
app.__name__, video_uri, None, loop=False, random_start=False)
idx = 1
with dbutils.session_scope() as session:
for img in vc.get_frame_generator():
cpu_time_ts = time.clock()
result, time_lapse = process_and_time(img, app_handler)
logger.debug("[pid: {}] processing frame {} from {}. {} ms".format(os.getpid(),
idx, video_uri, int(time_lapse)))
logger.debug(result)
store(
(experiment_name, trace, idx, result, time_lapse),
session,
store_result,
store_latency,
store_profile,
**kwargs
)
idx += 1
def phash(video_uri):
cam = cv2.VideoCapture(video_uri)
has_frame = True
with dbutils.session_scope(dry_run=False) as sess:
trace_name = os.path.basename(os.path.dirname(video_uri))
idx = 1
while has_frame:
has_frame, img = cam.read()
if img is not None:
cur_hash = cvutils.phash(img)
sess.add(models.SS(
name='{}-f{}-phash'.format(trace_name, idx),
val=str(cur_hash),
trace=trace_name))
idx += 1
def phash_diff_adjacent_frame(video_uri, output_dir):
cam = cv2.VideoCapture(video_uri)
os.makedirs(output_dir)
has_frame = True
prev_hash = None
idx = 1
logger.debug('calculating phash diff for adjacent frames')
while has_frame:
has_frame, img = cam.read()
if img is not None:
cur_hash = cvutils.phash(img)
if prev_hash is not None:
diff = cur_hash - prev_hash
cv2.putText(img, 'diff={}'.format(
diff), (int(img.shape[1] / 3), img.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), thickness=5)
cv2.imwrite(os.path.join(
output_dir, '{:010d}.jpg'.format(idx)), img)
logger.debug(diff)
prev_hash = cur_hash
idx += 1
if __name__ == "__main__":
fire.Fire()
|
[
"time.clock",
"fire.Fire",
"cv2.imdecode",
"logzero.logger.debug",
"rmexp.gabriel_pb2.Message",
"rmexp.dbutils.insert",
"rmexp.dbutils.get_or_create",
"os.getpid",
"numpy.frombuffer",
"importlib.import_module",
"os.path.dirname",
"time.time",
"rmexp.client.VideoClient",
"os.makedirs",
"logging.Formatter",
"rmexp.dbutils.session_scope",
"logzero.loglevel",
"cv2.VideoCapture",
"rmexp.cvutils.phash"
] |
[((460, 491), 'logzero.loglevel', 'logzero.loglevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (476, 491), False, 'import logzero\n'), ((356, 458), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s"""', 'datefmt': '"""%H:%M:%S"""'}), "(fmt=\n '%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s', datefmt='%H:%M:%S')\n", (373, 458), False, 'import logging\n'), ((3275, 3286), 'time.time', 'time.time', ([], {}), '()\n', (3284, 3286), False, 'import time\n'), ((5414, 5442), 'importlib.import_module', 'importlib.import_module', (['app'], {}), '(app)\n', (5437, 5442), False, 'import importlib\n'), ((5484, 5570), 'rmexp.client.VideoClient', 'client.VideoClient', (['app.__name__', 'video_uri', 'None'], {'loop': '(False)', 'random_start': '(False)'}), '(app.__name__, video_uri, None, loop=False, random_start=\n False)\n', (5502, 5570), False, 'from rmexp import config, cvutils, dbutils, gabriel_pb2, client\n'), ((6320, 6347), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_uri'], {}), '(video_uri)\n', (6336, 6347), False, 'import cv2\n'), ((6915, 6942), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_uri'], {}), '(video_uri)\n', (6931, 6942), False, 'import cv2\n'), ((6947, 6970), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (6958, 6970), False, 'import os\n'), ((7029, 7087), 'logzero.logger.debug', 'logger.debug', (['"""calculating phash diff for adjacent frames"""'], {}), "('calculating phash diff for adjacent frames')\n", (7041, 7087), False, 'from logzero import logger\n'), ((7703, 7714), 'fire.Fire', 'fire.Fire', ([], {}), '()\n', (7712, 7714), False, 'import fire\n'), ((883, 894), 'time.time', 'time.time', ([], {}), '()\n', (892, 894), False, 'import time\n'), ((1160, 1171), 'time.time', 'time.time', ([], {}), '()\n', (1169, 1171), False, 'import time\n'), ((1195, 1216), 'rmexp.gabriel_pb2.Message', 'gabriel_pb2.Message', ([], {}), '()\n', (1214, 1216), False, 'from rmexp import config, cvutils, dbutils, gabriel_pb2, client\n'), ((1450, 1462), 'time.clock', 'time.clock', ([], {}), '()\n', (1460, 1462), False, 'import time\n'), ((1959, 1970), 'time.time', 'time.time', ([], {}), '()\n', (1968, 1970), False, 'import time\n'), ((3627, 3703), 'rmexp.dbutils.get_or_create', 'dbutils.get_or_create', (['session', 'models.SS'], {'name': 'name', 'index': 'idx', 'trace': 'trace'}), '(session, models.SS, name=name, index=idx, trace=trace)\n', (3648, 3703), False, 'from rmexp import config, cvutils, dbutils, gabriel_pb2, client\n'), ((3834, 3906), 'rmexp.dbutils.get_or_create', 'dbutils.get_or_create', (['session', 'models.LegoLatency'], {'name': 'name', 'index': 'idx'}), '(session, models.LegoLatency, name=name, index=idx)\n', (3855, 3906), False, 'from rmexp import config, cvutils, dbutils, gabriel_pb2, client\n'), ((4204, 4256), 'rmexp.dbutils.insert', 'dbutils.insert', (['session', 'models.ResourceLatency', 'rec'], {}), '(session, models.ResourceLatency, rec)\n', (4218, 4256), False, 'from rmexp import config, cvutils, dbutils, gabriel_pb2, client\n'), ((5597, 5620), 'rmexp.dbutils.session_scope', 'dbutils.session_scope', ([], {}), '()\n', (5618, 5620), False, 'from rmexp import config, cvutils, dbutils, gabriel_pb2, client\n'), ((6378, 6414), 'rmexp.dbutils.session_scope', 'dbutils.session_scope', ([], {'dry_run': '(False)'}), '(dry_run=False)\n', (6399, 6414), False, 'from rmexp import config, cvutils, dbutils, gabriel_pb2, client\n'), ((810, 838), 'importlib.import_module', 'importlib.import_module', (['app'], {}), '(app)\n', (833, 838), False, 'import importlib\n'), ((947, 958), 'time.time', 'time.time', ([], {}), '()\n', (956, 958), False, 'import time\n'), ((1544, 1585), 'numpy.frombuffer', 'np.frombuffer', (['encoded_im'], {'dtype': 'np.uint8'}), '(encoded_im, dtype=np.uint8)\n', (1557, 1585), True, 'import numpy as np\n'), ((1604, 1660), 'cv2.imdecode', 'cv2.imdecode', (['encoded_im_np', 'cv2.CV_LOAD_IMAGE_UNCHANGED'], {}), '(encoded_im_np, cv2.CV_LOAD_IMAGE_UNCHANGED)\n', (1616, 1660), False, 'import cv2\n'), ((1770, 1781), 'time.time', 'time.time', ([], {}), '()\n', (1779, 1781), False, 'import time\n'), ((2126, 2147), 'rmexp.gabriel_pb2.Message', 'gabriel_pb2.Message', ([], {}), '()\n', (2145, 2147), False, 'from rmexp import config, cvutils, dbutils, gabriel_pb2, client\n'), ((5375, 5401), 'os.path.dirname', 'os.path.dirname', (['video_uri'], {}), '(video_uri)\n', (5390, 5401), False, 'import os\n'), ((5704, 5716), 'time.clock', 'time.clock', ([], {}), '()\n', (5714, 5716), False, 'import time\n'), ((6002, 6022), 'logzero.logger.debug', 'logger.debug', (['result'], {}), '(result)\n', (6014, 6022), False, 'from logzero import logger\n'), ((6462, 6488), 'os.path.dirname', 'os.path.dirname', (['video_uri'], {}), '(video_uri)\n', (6477, 6488), False, 'import os\n'), ((7196, 7214), 'rmexp.cvutils.phash', 'cvutils.phash', (['img'], {}), '(img)\n', (7209, 7214), False, 'from rmexp import config, cvutils, dbutils, gabriel_pb2, client\n'), ((1402, 1413), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1411, 1413), False, 'import os\n'), ((2564, 2575), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2573, 2575), False, 'import os\n'), ((6630, 6648), 'rmexp.cvutils.phash', 'cvutils.phash', (['img'], {}), '(img)\n', (6643, 6648), False, 'from rmexp import config, cvutils, dbutils, gabriel_pb2, client\n'), ((7597, 7615), 'logzero.logger.debug', 'logger.debug', (['diff'], {}), '(diff)\n', (7609, 7615), False, 'from logzero import logger\n'), ((1102, 1113), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1111, 1113), False, 'import os\n'), ((2047, 2059), 'time.clock', 'time.clock', ([], {}), '()\n', (2057, 2059), False, 'import time\n'), ((3353, 3364), 'time.time', 'time.time', ([], {}), '()\n', (3362, 3364), False, 'import time\n'), ((5864, 5875), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5873, 5875), False, 'import os\n'), ((1825, 1836), 'time.time', 'time.time', ([], {}), '()\n', (1834, 1836), False, 'import time\n'), ((2578, 2589), 'time.time', 'time.time', ([], {}), '()\n', (2587, 2589), False, 'import time\n')]
|
import os
import numpy as np
from montepython.likelihood_class import Likelihood
import montepython.io_mp as io_mp
import warnings
import ccl_tools as tools
import pyccl as ccl
class covfefe(Likelihood):
# initialization routine
def __init__(self, path, data, command_line):
Likelihood.__init__(self, path, data, command_line)
self.nb = data.cosmo_arguments['n_bins']
self.cm = data.cosmo_arguments['cov']
n_sims = 20000
# Load Covariance matrix
fn = 'cov_{}_{}.npz'.format(self.cm,self.nb)
self.cov = np.load(os.path.join(self.data_directory, fn))['arr_0']
if self.cm=='sim':
factor = (n_sims-self.cov.shape[0]-2.)/(n_sims-1.)
else:
factor = 1.
self.icov = factor*np.linalg.inv(self.cov)
# Load ell bandpowers
self.ell_bp = np.load(os.path.join(self.data_directory, 'ell_bp.npz'))['lsims'].astype(int)
self.nl = len(self.ell_bp)
# Load photo_z
fn = 'z_{}.npz'.format(self.nb)
self.z = np.load(os.path.join(self.data_directory, fn))['arr_0']
fn = 'pz_{}.npz'.format(self.nb)
self.pz = np.load(os.path.join(self.data_directory, fn))['arr_0']
fn = 'bz_{}.npz'.format(self.nb)
self.bz = np.load(os.path.join(self.data_directory, fn))['arr_0']
# Load data
fn = 'data_{}.npz'.format(self.nb)
self.data = np.load(os.path.join(self.data_directory, fn))['arr_0']
# end of initialization
# compute likelihood
def loglkl(self, cosmo, data):
# Get theory Cls
cosmo_ccl = tools.get_cosmo_ccl(cosmo.pars)
tracers = tools.get_tracers_ccl(cosmo_ccl, self.z, self.pz, self.bz)
theory = tools.get_cls_ccl(cosmo_ccl, tracers, self.ell_bp)
# Get chi2
chi2 = (self.data-theory).dot(self.icov).dot(self.data-theory)
lkl = - 0.5 * chi2
return lkl
|
[
"montepython.likelihood_class.Likelihood.__init__",
"ccl_tools.get_cls_ccl",
"os.path.join",
"ccl_tools.get_cosmo_ccl",
"numpy.linalg.inv",
"ccl_tools.get_tracers_ccl"
] |
[((297, 348), 'montepython.likelihood_class.Likelihood.__init__', 'Likelihood.__init__', (['self', 'path', 'data', 'command_line'], {}), '(self, path, data, command_line)\n', (316, 348), False, 'from montepython.likelihood_class import Likelihood\n'), ((1625, 1656), 'ccl_tools.get_cosmo_ccl', 'tools.get_cosmo_ccl', (['cosmo.pars'], {}), '(cosmo.pars)\n', (1644, 1656), True, 'import ccl_tools as tools\n'), ((1675, 1733), 'ccl_tools.get_tracers_ccl', 'tools.get_tracers_ccl', (['cosmo_ccl', 'self.z', 'self.pz', 'self.bz'], {}), '(cosmo_ccl, self.z, self.pz, self.bz)\n', (1696, 1733), True, 'import ccl_tools as tools\n'), ((1751, 1801), 'ccl_tools.get_cls_ccl', 'tools.get_cls_ccl', (['cosmo_ccl', 'tracers', 'self.ell_bp'], {}), '(cosmo_ccl, tracers, self.ell_bp)\n', (1768, 1801), True, 'import ccl_tools as tools\n'), ((785, 808), 'numpy.linalg.inv', 'np.linalg.inv', (['self.cov'], {}), '(self.cov)\n', (798, 808), True, 'import numpy as np\n'), ((582, 619), 'os.path.join', 'os.path.join', (['self.data_directory', 'fn'], {}), '(self.data_directory, fn)\n', (594, 619), False, 'import os\n'), ((1064, 1101), 'os.path.join', 'os.path.join', (['self.data_directory', 'fn'], {}), '(self.data_directory, fn)\n', (1076, 1101), False, 'import os\n'), ((1179, 1216), 'os.path.join', 'os.path.join', (['self.data_directory', 'fn'], {}), '(self.data_directory, fn)\n', (1191, 1216), False, 'import os\n'), ((1294, 1331), 'os.path.join', 'os.path.join', (['self.data_directory', 'fn'], {}), '(self.data_directory, fn)\n', (1306, 1331), False, 'import os\n'), ((1434, 1471), 'os.path.join', 'os.path.join', (['self.data_directory', 'fn'], {}), '(self.data_directory, fn)\n', (1446, 1471), False, 'import os\n'), ((870, 917), 'os.path.join', 'os.path.join', (['self.data_directory', '"""ell_bp.npz"""'], {}), "(self.data_directory, 'ell_bp.npz')\n", (882, 917), False, 'import os\n')]
|
from flask import Flask, render_template, request
# from .recommendation import *
# import pickle
import pandas as pd
import numpy as np
# import keras
# from keras.models import load_model
import pickle
def create_app():
# initializes our app
APP = Flask(__name__)
@APP.route('/')
def form():
return render_template('base.html')
@APP.route('/data/', methods=['GET', 'POST'])
def data():
if request.method == 'POST':
# Get form data
name = request.form.get('name')
blurb = request.form.get('blurb', 'default')
country = request.form.get('country', 'default')
backers_count = request.form.get('backers_count', 'default')
prediction = preprocessDataAndPredict(name, blurb, country,
backers_count)
# print(prediction[0])
return render_template('data.html', prediction=prediction[0])
def preprocessDataAndPredict(name, blurb, country, backers_count):
# test_data = (blurb)
test_data = (name, blurb, country, backers_count)
# print(test_data)
test_data = np.array(test_data)
dftest = pd.DataFrame(test_data).T
dftest.columns = ['name', 'blurb', 'country', 'backers_count']
print(dftest)
print(dftest.shape)
# test_data = test_data.reshape(1, -1)
# print(test_data)
#file = open("model.pkl", "wb")
model = pickle.load(
open('model_knn', 'rb'))
# model = pickle.load(
# open('Kickstarter2/kickstarter/kick_model(1)', 'rb'))
prediction = model.predict(dftest)
# print(prediction)
return prediction
# return prediction
return APP
|
[
"flask.render_template",
"flask.Flask",
"flask.request.form.get",
"numpy.array",
"pandas.DataFrame"
] |
[((259, 274), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (264, 274), False, 'from flask import Flask, render_template, request\n'), ((327, 355), 'flask.render_template', 'render_template', (['"""base.html"""'], {}), "('base.html')\n", (342, 355), False, 'from flask import Flask, render_template, request\n'), ((1163, 1182), 'numpy.array', 'np.array', (['test_data'], {}), '(test_data)\n', (1171, 1182), True, 'import numpy as np\n'), ((507, 531), 'flask.request.form.get', 'request.form.get', (['"""name"""'], {}), "('name')\n", (523, 531), False, 'from flask import Flask, render_template, request\n'), ((552, 588), 'flask.request.form.get', 'request.form.get', (['"""blurb"""', '"""default"""'], {}), "('blurb', 'default')\n", (568, 588), False, 'from flask import Flask, render_template, request\n'), ((611, 649), 'flask.request.form.get', 'request.form.get', (['"""country"""', '"""default"""'], {}), "('country', 'default')\n", (627, 649), False, 'from flask import Flask, render_template, request\n'), ((678, 722), 'flask.request.form.get', 'request.form.get', (['"""backers_count"""', '"""default"""'], {}), "('backers_count', 'default')\n", (694, 722), False, 'from flask import Flask, render_template, request\n'), ((899, 953), 'flask.render_template', 'render_template', (['"""data.html"""'], {'prediction': 'prediction[0]'}), "('data.html', prediction=prediction[0])\n", (914, 953), False, 'from flask import Flask, render_template, request\n'), ((1200, 1223), 'pandas.DataFrame', 'pd.DataFrame', (['test_data'], {}), '(test_data)\n', (1212, 1223), True, 'import pandas as pd\n')]
|
'''See the shared Google Drive documentation for an inheritance diagram that
shows the relationships between the classes defined in this file.
'''
import numpy as np
import socket
import time
from riglib import source
from ismore import settings, udp_feedback_client
import ismore_bmi_lib
from utils.constants import *
#import armassist
#import rehand
from riglib.filter import Filter
from riglib.plants import Plant
import os
class BasePlantUDP(Plant):
'''
Common UDP interface for the ArmAssist/ReHand
'''
debug = 0
sensor_data_timeout = 1 # seconds. if this number of seconds has passed since sensor data was received, velocity commands will not be sent
lpf_vel = 0
# define in subclasses!
ssm_cls = None
addr = None
feedback_data_cls = None
data_source_name = None
n_dof = None
blocking_joints = None
safety_grid = None
feedback_str = ''
def __init__(self, *args, **kwargs):
self.source = source.DataSource(self.feedback_data_cls, bufferlen=5, name=self.data_source_name)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # used only for sending
ssm = self.ssm_cls()
self.pos_state_names = [s.name for s in ssm.states if s.order == 0]
self.vel_state_names = [s.name for s in ssm.states if s.order == 1]
self.aa_xy_ix = [i for i, j in enumerate(ssm.states) if j.name in ['aa_px', 'aa_py']]
self.aa_psi_ix = [i for i, j in enumerate(ssm.states) if j.name == 'aa_ppsi']
self.rh_pron_ix = [i for i, j in enumerate(ssm.states) if j.name == 'rh_pprono']
self.rh_pfings = [(i, j.name) for i, j in enumerate(ssm.states) if j.name in ['rh_pthumb', 'rh_pindex', 'rh_pfing3']]
self.drive_velocity_raw = np.zeros((len(self.vel_state_names),))
self.drive_velocity_raw_fb_gain = np.zeros((len(self.vel_state_names),))
self.drive_velocity_sent = np.zeros((len(self.vel_state_names),))
self.drive_velocity_sent_pre_safety = np.zeros((len(self.vel_state_names),))
self.pre_drive_state = np.zeros((len(self.vel_state_names), ))
# low-pass filters to smooth out command velocities
# from scipy.signal import butter
# b, a = butter(5, 0.1) # fifth order, 2 Hz bandpass (assuming 10 Hz update rate)
#omega, H = signal.freqz(b, a)
#plt.figure()
#plt.plot(omega/np.pi, np.abs(H))
# self.vel_command_lpfs = [None] * self.n_dof
# for k in range(self.n_dof):
# self.vel_command_lpfs[k] = Filter(b=b, a=a)
# self.last_sent_vel = np.ones(self.n_dof) * np.nan
# calculate coefficients for a 4th-order Butterworth LPF at 1.5 Hz for kinematic data received from the exo
# fs_synch = 20 #Frequency at which emg and kin data are synchronized
# nyq = 0.5 * fs_synch
# cuttoff_freq = 1.5 / nyq
# bpf_kin_coeffs = butter(4, cuttoff_freq, btype='low')
# self.pos_filt = [None] * self.n_dof
# for k in range(self.n_dof):
# self.pos_filt[k] = Filter(bpf_kin_coeffs[0], bpf_kin_coeffs[1])
def init(self):
from riglib import sink
sink.sinks.register(self.source)
def start(self):
# only start this DataSource after it has been registered with
# the SinkManager singleton (sink.sinks) in the call to init()
self.source.start()
self.ts_start_data = time.time()
def stop(self):
# send a zero-velocity command
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(np.zeros(self.n_dof))))
self.source.stop()
self.feedback_file.close()
def last_data_ts_arrival(self):
return self.source.read(n_pts=1)['ts_arrival'][0]
def _send_command(self, command):
self.sock.sendto(command, self.addr)
def pack_vel(self, vel):
format_str = "%f " * self.n_dof
return format_str % tuple(vel)
def send_vel(self, vel):
assert len(vel) == self.n_dof
vel = vel.copy()
vel *= self.vel_gain # change the units of the velocity, if necessary
self.last_sent_vel = vel
#command_vel is already fitlered at the task level, no need to filter it again.
#self.last_sent_vel = filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel()
if all(v <= 0.00000001 for v in abs(self.last_sent_vel)):
print ('last sent vel')
print (self.last_sent_vel)
if (self.last_data_ts_arrival() == 0) or ((self.last_data_ts_arrival() - time.time()) > self.sensor_data_timeout):
print ("sensor data not received for %s recently enough, not sending velocity command!" % self.plant_type)
return
# squash any velocities which would take joints outside of the rectangular bounding box
current_pos = self.get_pos() * self.vel_gain
projected_pos = current_pos + vel * 0.1
max_reached, = np.nonzero((projected_pos > self.max_pos_vals) * (vel > 0))
min_reached, = np.nonzero((projected_pos < self.min_pos_vals) * (vel < 0))
vel[max_reached] = 0
vel[min_reached] = 0
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
#if we wanna define some limit values for the rehand use the filt_vel. Otherwise use vel
#self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(filt_vel)))
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
if self.debug:
print ("input vel")
print (vel)
print ("vel sent to %s" % self.plant_type)
print (vel)
print ("current_pos")
print (current_pos)
print ("projected_pos")
print (projected_pos)
print ("actual velocity")
print (self.get_vel())
if self.lpf_vel:
# squash any velocities which would take joints outside of the rectangular bounding box
current_pos = self.get_pos() * self.vel_gain
projected_pos = current_pos + vel * (1.0/20)
max_reached, = np.nonzero((projected_pos > self.max_pos_vals) * (vel > 0))
min_reached, = np.nonzero((projected_pos < self.min_pos_vals) * (vel < 0))
vel[max_reached] = 0
vel[min_reached] = 0
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
if faster_than_max_speed > 0:
print ('faster_than_max_speed')
print (faster_than_max_speed)
if self.debug:
print ("input vel")
print (vel)
print ("vel sent to %s" % self.plant_type)
print (vel)
#print "current_pos"
#print current_pos
#print "projected_pos"
#print projected_pos
#print "actual velocity"
#print self.get_vel()
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
else:
self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel)))
# def get_pos(self):
# # udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF
# return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0]))
def drive(self, decoder):
vel = decoder['qdot']
vel_bl = vel.copy()
feedback_str = ''
if self.blocking_joints is not None:
vel_bl[self.blocking_joints] = 0
if self.safety_grid is not None:
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = decoder['q'] + 0.1*vel_bl
#Make sure predicted AA PX, AA PY within bounds:
xy_change = True
if len(self.aa_xy_ix) > 0:
if self.safety_grid.is_valid_pos(pos_pred[self.aa_xy_ix]) is False:
#If not, make their velocity zero:
vel_bl[self.aa_xy_ix] = 0
xy_change = False
feedback_str = feedback_str+ ' stopping xy from moving'
else:
xy_change = False
# Make sure AA Psi within bounds:
if len(self.aa_psi_ix) > 0:
# If X/Y ok
if xy_change:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred[self.aa_xy_ix])
# If x/y not ok:
else:
mn, mx = self.safety_grid.get_minmax_psi(decoder['q'][self.aa_xy_ix])
# Set psi velocity :
if np.logical_and(pos_pred[self.aa_psi_ix] >= mn, pos_pred[self.aa_psi_ix] <= mx):
pass
else:
vel_bl[self.aa_psi_ix] = 0
feedback_str = feedback_str+ 'stopping psi'
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_pron_ix) > 0:
# If X/Y ok
if xy_change:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred[self.aa_xy_ix])
# If x/y not ok or not moving bc not part of state pace :
else:
if len(self.aa_xy_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(decoder['q'][self.aa_xy_ix])
else:
mn, mx = self.safety_grid.get_minmax_prono(settings.starting_pos['aa_px'], settings.starting_pos['aa_py'])
# Set prono velocity :
if np.logical_and(pos_pred[self.rh_pron_ix] >= mn, pos_pred[self.rh_pron_ix] <= mx):
pass
else:
vel_bl[self.rh_pron_ix] = 0
feedback_str = feedback_str+ 'stopping prono'
# Assure RH fingers are within range:
if len(self.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred[ix] >= mn, pos_pred[ix] <= mx):
pass
else:
vel_bl[ix] = 0
feedback_str = feedback_str+ 'stopping rh fings'
self.feedback_str = feedback_str
self.drive_velocity = vel_bl
self.send_vel(vel_bl)
decoder['q'] = self.get_pos()
def write_feedback(self):
pos_vel = [str(i) for i in np.hstack(( self.get_pos(), self.get_vel() )) ]
#self.feedback_file.write(','.join(pos_vel)+'\n')
if self.feedback_str != '':
self.feedback_file.write(self.feedback_str+ time.ctime() + '\n')
class ArmAssistPlantUDP(BasePlantUDP):
'''Sends velocity commands and receives feedback over UDP. Can be used
with either the real or simulated ArmAssist.
'''
ssm_cls = ismore_bmi_lib.StateSpaceArmAssist
addr = settings.ARMASSIST_UDP_SERVER_ADDR
feedback_data_cls = udp_feedback_client.ArmAssistData
data_source_name = 'armassist'
n_dof = 3
plant_type = 'ArmAssist'
vel_gain = np.array([cm_to_mm, cm_to_mm, rad_to_deg]) # convert units to: [mm/s, mm/s, deg/s]
max_pos_vals = np.array([np.inf, np.inf, np.inf])
min_pos_vals = np.array([-np.inf, -np.inf, -np.inf])
max_speed = np.array([np.inf, np.inf, np.inf])
feedback_file = open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'w')
#max_speed = np.array([40, 60, 20]) # in mm/s and deg/s
#max_speed = np.array([60, 80, 50]) # in mm/s and deg/s
#parameters for kinematics low-pass filtering
from scipy.signal import butter, lfilter
from ismore.filter import Filter
fs_synch = 25 #Frequency at which emg and kin data are synchronized
nyq = 0.5 * fs_synch
cuttoff_freq = 1.5 / nyq
bpf_kin_coeffs = butter(2, cuttoff_freq, btype='low')
n_dof = 3
vel_filter = [None] * n_dof
for k in range(n_dof):
vel_filter[k] = Filter(bpf_kin_coeffs[0], bpf_kin_coeffs[1])
n_getpos_iter= 0
def __init__(self, *args, **kwargs):
super(ArmAssistPlantUDP, self).__init__(*args, **kwargs)
def set_pos_control(self): # position control with global reference system
self._send_command('SetControlMode ArmAssist Position')
def set_global_control(self): #velocity control with global reference system
self._send_command('SetControlMode ArmAssist Global')
def set_trajectory_control(self): #trajectory control with global reference system
self._send_command('SetControlMode ArmAssist Trajectory')
def send_vel(self, vel):
vel = vel.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(vel) == self.n_dof
# convert units to: [mm/s, mm/s, deg/s] to send them through UDP to the ArmAssist application
vel[0] *= cm_to_mm
vel[1] *= cm_to_mm
vel[2] *= rad_to_deg
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
self.debug = True
if self.debug:
# print "vel sent to armassist"
# print vel
if faster_than_max_speed.any() > 0:
print ('faster_than_max_speed')
print (faster_than_max_speed)
print ("speed set to: ")
print (vel)
self._send_command('SetSpeed ArmAssist %f %f %f\r' % tuple(vel))
# get raw position
def get_pos_raw(self):
# udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF
#get the last poitns of data of the armassist and low-pass filter
return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0]))
# get filtered position
def get_pos(self):
return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.pos_state_names][0]))
# calculate vel from raw position
def get_vel_raw(self):
recent_pos_data = self.source.read(n_pts=2)
pos = recent_pos_data['data'][self.pos_state_names]
ts = recent_pos_data['ts']
delta_pos = np.array(tuple(pos[1])) - np.array(tuple(pos[0]))
delta_ts = ts[1] - ts[0]
vel = delta_pos / delta_ts
#filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel() #nerea --> to test!
if ts[0] != 0 and any(np.isnan(v) for v in vel):
print ("WARNING -- delta_ts = 0 in AA vel calculation:", vel)
for i in range(3):
if np.isnan(vel[i]):
vel[i] = 0
return vel
#calculate vel from raw position and filter
def get_vel(self):
recent_pos_data = self.source.read(n_pts=2)
pos = recent_pos_data['data'][self.pos_state_names]
ts = recent_pos_data['ts']
delta_pos = np.array(tuple(pos[1])) - np.array(tuple(pos[0]))
delta_ts = ts[1] - ts[0]
vel = delta_pos / delta_ts
if ts[0] != 0 and any(np.isnan(v) for v in vel):
print ("WARNING -- delta_ts = 0 in AA vel calculation:", vel)
for i in range(3):
if np.isnan(vel[i]):
vel[i] = 0
# the first value of the pos because it is always NaN and if a NaN is introduced in the filter, all the following filtered values will be also NaNs
if np.any(np.isnan(vel)):
self.n_getpos_iter = self.n_getpos_iter +1
vel_filt = vel
else:
vel_filt = np.array([self.vel_filter[k](vel[k]) for k in range(self.n_dof)]).ravel()
return vel_filt
def send_pos(self, pos, time):
pos = pos.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(pos) == 3
# convert units to: [mm/s, mm/s, deg/s]
pos[0] *= cm_to_mm
pos[1] *= cm_to_mm
pos[2] *= rad_to_deg
# mode 1: the forearm angle (psi) stays the same as it is. mode 2: psi will move according to the determined value
mode = 2
pos_command = np.zeros(5)
pos_command[0] = pos[0]
pos_command[1] = pos[1]
pos_command[2] = pos[2]
pos_command[3] = time
pos_command[4] = mode
print ("pos")
print (pos)
print ("time")
print (time)
self._send_command('SetPosition ArmAssist %f %f %f %f %f\r' % tuple(pos_command))
def enable(self):
self._send_command('SetControlMode ArmAssist Global\r')
def disable(self):
self._send_command('SetControlMode ArmAssist Disable\r')
def enable_watchdog(self, timeout_ms):
print ('ArmAssist watchdog not enabled, doing nothing')
def send_traj(self, pos_vel):
pos_vel = pos_vel.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(pos_vel) == 6
# units to are alread in [mm/s, mm/s, rad/s]
# convert values to integers to reduce noise
#pos_vel_int = np.rint(pos_vel)
pos_vel_int = pos_vel
print ("trajectory sent to AA")
print ("x y psi vx vy vpsi")
print (pos_vel_int)
traj_command = np.zeros(6)
traj_command[0] = pos_vel_int[0]
traj_command[1] = pos_vel_int[1]
traj_command[2] = pos_vel_int[2]
traj_command[3] = pos_vel_int[3]
traj_command[4] = pos_vel_int[4]
traj_command[5] = pos_vel_int[5]
self._send_command('SetTrajectory ArmAssist %d %d %d %d %d %d\r' % tuple(traj_command))
class DummyPlantUDP(object):
drive_velocity_raw = np.array([0,0,0])
drive_velocity_sent = np.array([0,0,0])
drive_velocity_sent_pre_safety = np.array([0,0,0])
pre_drive_state = np.array([0, 0, 0])
def init(self):
pass
def enable(self):
pass
def start(self):
pass
def stop(self):
pass
def write_feedback(self):
pass
def get_pos_raw(self):
return np.array([0,0,0])
def get_pos(self):
return np.array([0,0,0])
def get_vel_raw(self):
return np.array([0,0,0])
def get_vel(self):
return np.array([0,0,0])
class ReHandPlantUDP(BasePlantUDP):
'''Sends velocity commands and receives feedback over UDP. Can be used
with either the real or simulated ReHand.
'''
ssm_cls = ismore_bmi_lib.StateSpaceReHand
addr = settings.REHAND_UDP_SERVER_ADDR
feedback_data_cls = udp_feedback_client.ReHandData
data_source_name = 'rehand'
n_dof = 4
plant_type = 'ReHand'
vel_gain = np.array([rad_to_deg, rad_to_deg, rad_to_deg, rad_to_deg])
max_pos_vals = np.array([60, 60, 60, 90], dtype=np.float64) # degrees
min_pos_vals = np.array([25, 25, 25, 25], dtype=np.float64) # degrees
max_speed = np.array([np.inf, np.inf, np.inf, np.inf], dtype=np.float64) # degrees/sec
#max_speed = np.array([15., 15., 15., 15.], dtype=np.float64) # degrees/sec
feedback_file = open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'w')
def send_vel(self, vel):
vel = vel.copy()
# units of vel should be: [rad/s, rad/s, rad/s, rad/s]
assert len(vel) == self.n_dof
# convert units to: [deg/s, deg/s, deg/s, deg/s]
vel *= rad_to_deg
#filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel()
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
self.debug = True
if self.debug:
# print 'filt_vel in plants in degrees'
# print filt_vel #*np.array([deg_to_rad, deg_to_rad, deg_to_rad, deg_to_rad])
if faster_than_max_speed.any() > 0:
print ('faster_than_max_speed')
print (faster_than_max_speed)
print ("speed set to: ")
print (vel)
# self.plant.enable() #when we send vel commands always enable the rehand motors
# self._send_command('SystemEnable ReHand\r')
self._send_command('SetSpeed ReHand %f %f %f %f\r' % tuple(vel))
def get_vel_raw(self):
return np.array(tuple(self.source.read(n_pts=1)['data'][self.vel_state_names][0]))
def get_vel(self):
return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.vel_state_names][0]))
def enable(self):
self._send_command('SystemEnable ReHand\r')
def disable(self):
self._send_command('SystemDisable ReHand\r')
def diff_enable(self,DoFs):
self._send_command('DiffEnable ReHand %i %i %i %i\r' % tuple(DoFs))
def get_enable_state(self):
self._send_command('GetEnableState ReHand\r')
def enable_watchdog(self, timeout_ms):
self._send_command('WatchDogEnable ReHand %d\r' % timeout_ms)
def get_pos_raw(self):
# udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF
return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0]))
#get pos filtered
def get_pos(self):
return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.pos_state_names][0]))
################################################
class BasePlantIsMore(Plant):
# define in subclasses!
aa_plant_cls = None
rh_plant_cls = None
safety_grid = None
both_feedback_str = ''
def __init__(self, *args, **kwargs):
self.aa_plant = self.aa_plant_cls()
self.rh_plant = self.rh_plant_cls()
self.drive_velocity_raw = np.zeros((7,))
self.drive_velocity_sent= np.zeros((7,))
self.drive_velocity_sent_pre_safety = np.zeros((7, ))
self.pre_drive_state = np.zeros((7, ))
self.prev_vel_bl_aa = np.zeros((3, ))*np.NaN
self.prev_vel_bl_rh = np.zeros((4, ))*np.NaN
self.accel_lim_armassist = np.inf #0.8
self.accel_lim_psi = np.inf #0.16
self.accel_lim_rehand = np.inf #0.16
def init(self):
self.aa_plant.init()
self.rh_plant.init()
def start(self):
self.aa_plant.start()
self.rh_plant.start()
self.ts_start_data = time.time()
def stop(self):
self.aa_plant.stop()
self.rh_plant.stop()
def last_data_ts_arrival(self):
return {
'ArmAssist': self.aa_plant.last_data_ts_arrival(),
'ReHand': self.rh_plant.last_data_ts_arrival(),
}
def send_vel(self, vel):
self.aa_plant.send_vel(vel[0:3])
self.rh_plant.send_vel(vel[3:7])
def get_pos_raw(self):
aa_pos = self.aa_plant.get_pos_raw()
rh_pos = self.rh_plant.get_pos_raw()
return np.hstack([aa_pos, rh_pos])
def get_pos(self):
aa_pos = self.aa_plant.get_pos()
rh_pos = self.rh_plant.get_pos()
return np.hstack([aa_pos, rh_pos])
def get_vel_raw(self):
aa_vel = self.aa_plant.get_vel_raw()
rh_vel = self.rh_plant.get_vel_raw()
return np.hstack([aa_vel, rh_vel])
def get_vel(self):
aa_vel = self.aa_plant.get_vel()
rh_vel = self.rh_plant.get_vel()
return np.hstack([aa_vel, rh_vel])
def enable(self):
self.aa_plant.enable()
self.rh_plant.enable()
def disable(self):
self.aa_plant.disable()
self.rh_plant.disable()
def drive(self, decoder):
# print self.aa_plant.aa_xy_ix: [0, 1]
# print self.aa_plant.aa_psi_ix: [2]
# print self.rh_plant.rh_pfings: [0, 1, 2]
# print self.rh_plant.rh_pron_ix: [3]
vel = decoder['qdot']
vel_bl = vel.copy()
current_state = self.get_pos()
self.pre_drive_state = current_state.copy()
self.drive_velocity_raw = vel_bl.copy()
if self.blocking_joints is not None:
vel_bl[self.blocking_joints] = 0
vel_bl_aa0 = vel_bl[0:3].copy()
vel_bl_rh0 = vel_bl[3:7].copy()
### Accel Limit Velocitites ###
# if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))):
# aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa
# rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh
# ### AA XY ###
# for i in np.arange(2):
# if aa_output_accel[i] > self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist
# elif aa_output_accel[i] < -1*self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist
# ### AA PSI ###
# if aa_output_accel[2] > self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi
# elif aa_output_accel[2] < -1*self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi
# ### RH All ###
# for i in np.arange(4):
# if rh_output_accel[i] > self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand
# elif rh_output_accel[i] < -1*self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand
### Add Attractor ###
if self.safety_grid is not None:
attractor_point_aa = self.safety_grid.attractor_point[:3]
attractor_point_rh = self.safety_grid.attractor_point[3:]
vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05
vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05
vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy()
vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy()
else:
vel_bl_aa = vel_bl_aa0
vel_bl_rh = vel_bl_rh0
### LPF Filter Velocities ###
for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']):
vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s])
if np.isnan(vel_bl_aa[s]):
vel_bl_aa[s] = 0
for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']):
vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s])
if np.isnan(vel_bl_rh[s]):
vel_bl_rh[s] = 0
self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh))
pos_pred_aa = pos_pred[0:3]
pos_pred_rh = pos_pred[3:7]
both_feedback_str = ''
if self.safety_grid is not None:
if len(self.aa_plant.aa_xy_ix) > 0:
x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix])
if x_tmp == False:
current_pos = current_state[self.aa_plant.aa_xy_ix]
pos_valid = attractor_point_aa[self.aa_plant.aa_xy_ix]
#d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos)
vel_bl_aa[self.aa_plant.aa_xy_ix] = self.attractor_speed*(pos_valid - current_pos)/0.05
pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix]
#print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix]
xy_change = True
# Make sure AA Psi within bounds:
if len(self.aa_plant.aa_psi_ix) > 0:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix])
predx, predy= pos_pred_aa[[0, 1]]
# Set psi velocity :
psi_ok = False
if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx):
# Test if globally ok:
global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0
if global_ok:
psi_ok = True
if psi_ok == False:
# Move psi back to attractor pos:
psi_neutral = attractor_point_aa[self.aa_plant.aa_psi_ix]
vel_bl_aa[self.aa_plant.aa_psi_ix] = self.attractor_speed*(psi_neutral-current_state[self.aa_plant.aa_psi_ix])/0.05
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_plant.rh_pron_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix])
# Set prono velocity :
if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx):
pass
else:
tmp_pos = pos_pred_rh[self.rh_plant.rh_pron_ix]
prono_neutral = attractor_point_rh[self.rh_plant.rh_pron_ix]
vel_bl_rh[self.rh_plant.rh_pron_ix] = self.attractor_speed*(prono_neutral-tmp_pos)/0.05
# Assure RH fingers are within range:
if len(self.rh_plant.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx):
pass
else:
tmp_ = pos_pred_rh[ix]
neutral = attractor_point_rh[ix]
vel_bl_rh[ix] = self.attractor_speed*(neutral - tmp_)/0.05
# If in the rest state -- block the arm:
if self.task_state in ['rest', 'prep', 'baseline_check']:
vel_bl_aa[:] = 0
vel_bl_rh[:] = 0
elif self.task_state == 'emg_rest':
scaling = self.rest_emg_output
if scaling <= 0.5:
scaling = 0
else:
scaling = 0.5*scaling
vel_bl_aa = scaling*vel_bl_aa
vel_bl_rh = scaling*vel_bl_rh
max_vel_xy = 10.
vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy
vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy
max_vel_ang = 2.
if vel_bl_aa[2] > max_vel_ang:
vel_bl_aa[2] = max_vel_ang
elif vel_bl_aa[2] < -1*max_vel_ang:
vel_bl_aa[2] = -1*max_vel_ang
vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang
vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang
if self.blocking_joints is not None:
for j in [0, 1, 2]:
if j in self.blocking_joints:
vel_bl_aa[j] = 0
#print 'blocking vel_bl_aa: ', j
for j in [3, 4, 5, 6]:
if j in self.blocking_joints:
vel_bl_rh[j-3] = 0
#print 'blocking vel_bl_rh: ', j-3
self.both_feedback_str = both_feedback_str
self.aa_plant.send_vel(vel_bl_aa)
self.rh_plant.send_vel(vel_bl_rh)
self.prev_vel_bl_aa = vel_bl_aa.copy()
self.prev_vel_bl_rh = vel_bl_rh.copy()
self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
decoder['q'] = self.get_pos()
class IsMorePlantUDP(BasePlantIsMore):
'''Sends velocity commands and receives feedback over UDP. Can be used
with either the real or simulated ArmAssist+ReHand.
'''
aa_plant_cls = ArmAssistPlantUDP
rh_plant_cls = ReHandPlantUDP
def write_feedback(self):
self.aa_plant.feedback_str = self.both_feedback_str
self.aa_plant.write_feedback()
#self.rh_plant.write_feedback()
class IsMorePlantEMGControl(IsMorePlantUDP): # Plant used for the pure EMG control task
def drive(self):
vel_bl = self.drive_velocity_raw
current_state = self.get_pos()
self.pre_drive_state = current_state.copy()
if self.blocking_joints is not None:
vel_bl[self.blocking_joints] = 0
vel_bl_aa0 = vel_bl[0:3].copy()
vel_bl_rh0 = vel_bl[3:7].copy()
### Accel Limit Velocitites ###
# if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))):
# aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa
# rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh
# ### AA XY ###
# for i in np.arange(2):
# if aa_output_accel[i] > self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist
# elif aa_output_accel[i] < -1*self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist
# ### AA PSI ###
# if aa_output_accel[2] > self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi
# elif aa_output_accel[2] < -1*self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi
# ### RH All ###
# for i in np.arange(4):
# if rh_output_accel[i] > self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand
# elif rh_output_accel[i] < -1*self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand
### Add Attractor ###
attractor_point_aa = self.safety_grid.attractor_point[:3]
attractor_point_rh = self.safety_grid.attractor_point[3:]
vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05
vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05
vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy()
vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy()
### LPF Filter Velocities ###
for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']):
vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s])
if np.isnan(vel_bl_aa[s]):
vel_bl_aa[s] = 0
for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']):
vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s])
if np.isnan(vel_bl_rh[s]):
vel_bl_rh[s] = 0
self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh))
pos_pred_aa = pos_pred[0:3]
pos_pred_rh = pos_pred[3:7]
both_feedback_str = ''
if self.safety_grid is not None:
if len(self.aa_plant.aa_xy_ix) > 0:
x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix])
if x_tmp == False:
current_pos = current_state[self.aa_plant.aa_xy_ix]
pos_valid = attractor_point_aa[self.aa_plant.aa_xy_ix]
#d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos)
vel_bl_aa[self.aa_plant.aa_xy_ix] = self.attractor_speed*(pos_valid - current_pos)/0.05
pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix]
#print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix]
xy_change = True
# Make sure AA Psi within bounds:
if len(self.aa_plant.aa_psi_ix) > 0:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix])
predx, predy= pos_pred_aa[[0, 1]]
# Set psi velocity :
psi_ok = False
if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx):
# Test if globally ok:
global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0
if global_ok:
psi_ok = True
if psi_ok == False:
# Move psi back to attractor pos:
psi_neutral = attractor_point_aa[self.aa_plant.aa_psi_ix]
vel_bl_aa[self.aa_plant.aa_psi_ix] = self.attractor_speed*(psi_neutral-current_state[self.aa_plant.aa_psi_ix])/0.05
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_plant.rh_pron_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix])
# Set prono velocity :
if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx):
pass
else:
tmp_pos = pos_pred_rh[self.rh_plant.rh_pron_ix]
prono_neutral = attractor_point_rh[self.rh_plant.rh_pron_ix]
vel_bl_rh[self.rh_plant.rh_pron_ix] = self.attractor_speed*(prono_neutral-tmp_pos)/0.05
# Assure RH fingers are within range:
if len(self.rh_plant.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx):
pass
else:
tmp_ = pos_pred_rh[ix]
neutral = attractor_point_rh[ix]
vel_bl_rh[ix] = self.attractor_speed*(neutral - tmp_)/0.05
# If in the rest state -- block the arm:
if self.task_state in ['rest', 'prep']:
vel_bl_aa[:] = 0
vel_bl_rh[:] = 0
elif self.task_state == 'emg_rest':
scaling = self.rest_emg_output
if scaling <= 0.5:
scaling = 0
else:
scaling = 0.5*scaling
vel_bl_aa = scaling*vel_bl_aa
vel_bl_rh = scaling*vel_bl_rh
max_vel_xy = 10.
vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy
vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy
max_vel_ang = 2.
if vel_bl_aa[2] > max_vel_ang:
vel_bl_aa[2] = max_vel_ang
elif vel_bl_aa[2] < -1*max_vel_ang:
vel_bl_aa[2] = -1*max_vel_ang
vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang
vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang
if self.blocking_joints is not None:
for j in [0, 1, 2]:
if j in self.blocking_joints:
vel_bl_aa[j] = 0
#print 'blocking vel_bl_aa: ', j
for j in [3, 4, 5, 6]:
if j in self.blocking_joints:
vel_bl_rh[j-3] = 0
#print 'blocking vel_bl_rh: ', j-3
self.both_feedback_str = both_feedback_str
self.aa_plant.send_vel(vel_bl_aa)
self.rh_plant.send_vel(vel_bl_rh)
self.prev_vel_bl_aa = vel_bl_aa.copy()
self.prev_vel_bl_rh = vel_bl_rh.copy()
self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
class IsMorePlantHybridBMI(IsMorePlantUDP): # Plant used for the hybrid (EMG + brain) BMI task.
def __init__(self, *args, **kwargs):
self.drive_velocity_raw_brain = np.zeros((7,))
self.emg_vel_raw_scaled = np.zeros((7,))
super(IsMorePlantHybridBMI, self).__init__(*args, **kwargs)
def drive(self, decoder):
vel = decoder['qdot']
vel_brain = vel.copy()
vel_brain_aa = vel_brain[[0, 1, 2]]
vel_brain_fingers = vel_brain[[3, 4, 5]]
vel_brain_prono = vel_brain[[6]]
self.drive_velocity_raw_brain = vel_brain.copy()
# Use EMG scaled array to scale the output:
vel_emg = self.emg_vel.copy()
vel_emg_scaled = []
for i in range(7):
vel_emg_scaled.append(vel_emg[i]*self.scale_emg_pred_arr[i])
vel_emg_scaled = np.hstack((vel_emg_scaled))
self.emg_vel_raw_scaled = vel_emg_scaled.copy()
vel_emg_aa = vel_emg_scaled[[0, 1, 2]]
vel_emg_fingers = vel_emg_scaled[[3, 4, 5]]
vel_emg_prono = vel_emg_scaled[[6]]
vel_bl_aa = vel_emg_aa*self.emg_weight_aa + vel_brain_aa*(1-self.emg_weight_aa)
vel_bl_fingers = vel_emg_fingers*self.emg_weight_fingers + vel_brain_fingers*(1-self.emg_weight_fingers)
vel_bl_prono = vel_emg_prono*self.emg_weight_prono + vel_brain_prono*(1-self.emg_weight_prono)
vel_bl = np.hstack((vel_bl_aa, vel_bl_fingers, vel_bl_prono))
# Fuse velocities from EMG and neural decoders
#vel_bl = vel_emg*self.emg_weight + vel_brain*(1-self.emg_weight)
self.drive_velocity_raw = vel_bl.copy()
vel_bl_fb_gain = []
for i in range(7):
vel_bl_fb_gain.append(vel_bl[i]*self.fb_vel_gain[i])
vel_bl_fb_gain = np.hstack((vel_bl_fb_gain))
self.drive_velocity_raw_fb_gain = vel_bl_fb_gain.copy()
current_state = self.get_pos()
self.pre_drive_state = current_state.copy()
if self.blocking_joints is not None:
print ('self.blocking_joints --> ', self.blocking_joints)
vel_bl_fb_gain[self.blocking_joints] = 0
vel_bl_aa0 = vel_bl_fb_gain[0:3].copy()
vel_bl_rh0 = vel_bl_fb_gain[3:7].copy()
### Accel Limit Velocitites ###
# if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))):
# aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa
# rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh
# ### AA XY ###
# for i in np.arange(2):
# if aa_output_accel[i] > self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist
# elif aa_output_accel[i] < -1*self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist
# ### AA PSI ###
# if aa_output_accel[2] > self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi
# elif aa_output_accel[2] < -1*self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi
# ### RH All ###
# for i in np.arange(4):
# if rh_output_accel[i] > self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand
# elif rh_output_accel[i] < -1*self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand
### Add Attractor ###
attractor_point_aa = self.safety_grid.attractor_point[:3]
attractor_point_rh = self.safety_grid.attractor_point[3:]
vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05
vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05
vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy()
vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy()
### LPF Filter Velocities ###
for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']):
vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s])
if np.isnan(vel_bl_aa[s]):
vel_bl_aa[s] = 0
for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']):
vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s])
if np.isnan(vel_bl_rh[s]):
vel_bl_rh[s] = 0
self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh))
pos_pred_aa = pos_pred[0:3]
pos_pred_rh = pos_pred[3:7]
both_feedback_str = ''
if self.safety_grid is not None:
if len(self.aa_plant.aa_xy_ix) > 0:
x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix])
if x_tmp == False:
print ('false position')
current_pos = current_state[self.aa_plant.aa_xy_ix]
pos_valid = attractor_point_aa[self.aa_plant.aa_xy_ix]
#d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos)
vel_bl_aa[self.aa_plant.aa_xy_ix] = self.attractor_speed*(pos_valid - current_pos)/0.05
pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix]
#print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix]
xy_change = True
# Make sure AA Psi within bounds:
if len(self.aa_plant.aa_psi_ix) > 0:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix])
predx, predy= pos_pred_aa[[0, 1]]
# Set psi velocity :
psi_ok = False
if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx):
# Test if globally ok:
#global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0
global_ok = True
if global_ok:
psi_ok = True
if psi_ok == False:
# Move psi back to attractor pos:
psi_neutral = attractor_point_aa[self.aa_plant.aa_psi_ix]
vel_bl_aa[self.aa_plant.aa_psi_ix] = self.attractor_speed*(psi_neutral-current_state[self.aa_plant.aa_psi_ix])/0.05
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_plant.rh_pron_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix])
# Set prono velocity :
if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx):
pass
else:
tmp_pos = pos_pred_rh[self.rh_plant.rh_pron_ix]
prono_neutral = attractor_point_rh[self.rh_plant.rh_pron_ix]
vel_bl_rh[self.rh_plant.rh_pron_ix] = self.attractor_speed*(prono_neutral-tmp_pos)/0.05
# Assure RH fingers are within range:
if len(self.rh_plant.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx):
pass
else:
tmp_ = pos_pred_rh[ix]
neutral = attractor_point_rh[ix]
vel_bl_rh[ix] = self.attractor_speed*(neutral - tmp_)/0.05
# print 'safely adjusting fingers! ', nm, 'min: ', mn, ' max: ', mx, ' pred: ', pos_pred_rh[ix]
# If in the rest state -- block the arm:
if self.task_state in ['rest', 'prep', 'baseline_check']:
vel_bl_aa[:] = 0
vel_bl_rh[:] = 0
elif self.task_state == 'emg_rest':
scaling = self.rest_emg_output
if scaling <= 0.5:
scaling = 0
else:
scaling = 0.5*scaling
vel_bl_aa = scaling*vel_bl_aa
vel_bl_rh = scaling*vel_bl_rh
elif self.task_state == 'rest_back':
vel_bl_aa = vel_bl_aa_pull/self.attractor_speed_const*self.rest_back_attractor_speed
vel_bl_rh = vel_bl_rh_pull/self.attractor_speed_const*self.rest_back_attractor_speed
elif self.task_state in ['drive_to_start', 'drive_to_rest']:
vel_bl_aa = self.back_to_target_speed*(self.drive_to_start_target[:3] - current_state[:3])/0.05
vel_bl_rh = self.back_to_target_speed*(self.drive_to_start_target[3:] - current_state[3:])/0.05
max_vel_xy = 10.
vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy
vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy
max_vel_ang = 2.
if vel_bl_aa[2] > max_vel_ang:
vel_bl_aa[2] = max_vel_ang
elif vel_bl_aa[2] < -1*max_vel_ang:
vel_bl_aa[2] = -1*max_vel_ang
vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang
vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang
if self.blocking_joints is not None:
for j in [0, 1, 2]:
if j in self.blocking_joints:
vel_bl_aa[j] = 0
#print 'blocking vel_bl_aa: ', j
for j in [3, 4, 5, 6]:
if j in self.blocking_joints:
vel_bl_rh[j-3] = 0
#print 'blocking vel_bl_rh: ', j-3
self.both_feedback_str = both_feedback_str
self.aa_plant.send_vel(vel_bl_aa)
self.rh_plant.send_vel(vel_bl_rh)
self.prev_vel_bl_aa = vel_bl_aa.copy()
self.prev_vel_bl_rh = vel_bl_rh.copy()
self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
decoder['q'] = self.get_pos()
class IsMorePlantHybridBMISoftSafety(IsMorePlantHybridBMI):
def drive(self, decoder):
vel = decoder['qdot']
vel_brain = vel.copy()
vel_brain_aa = vel_brain[[0, 1, 2]]
vel_brain_fingers = vel_brain[[3, 4, 5]]
vel_brain_prono = vel_brain[[6]]
self.drive_velocity_raw_brain = vel_brain.copy()
# Use EMG scaled array to scale the output:
vel_emg = self.emg_vel.copy()
vel_emg_scaled = []
for i in range(7):
vel_emg_scaled.append(vel_emg[i]*self.scale_emg_pred_arr[i])
vel_emg_scaled = np.hstack((vel_emg_scaled))
self.emg_vel_raw_scaled = vel_emg_scaled.copy()
vel_emg_aa = vel_emg_scaled[[0, 1, 2]]
vel_emg_fingers = vel_emg_scaled[[3, 4, 5]]
vel_emg_prono = vel_emg_scaled[[6]]
vel_bl_aa = vel_emg_aa*self.emg_weight_aa + vel_brain_aa*(1-self.emg_weight_aa)
vel_bl_fingers = vel_emg_fingers*self.emg_weight_fingers + vel_brain_fingers*(1-self.emg_weight_fingers)
vel_bl_prono = vel_emg_prono*self.emg_weight_prono + vel_brain_prono*(1-self.emg_weight_prono)
vel_bl = np.hstack((vel_bl_aa, vel_bl_fingers, vel_bl_prono))
# Fuse velocities from EMG and neural decoders
#vel_bl = vel_emg*self.emg_weight + vel_brain*(1-self.emg_weight)
self.drive_velocity_raw = vel_bl.copy()
vel_bl_fb_gain = []
for i in range(7):
vel_bl_fb_gain.append(vel_bl[i]*self.fb_vel_gain[i])
vel_bl_fb_gain = np.hstack((vel_bl_fb_gain))
self.drive_velocity_raw_fb_gain = vel_bl_fb_gain.copy()
current_state = self.get_pos()
self.pre_drive_state = current_state.copy()
if self.blocking_joints is not None:
vel_bl_fb_gain[self.blocking_joints] = 0
vel_bl_aa0 = vel_bl_fb_gain[0:3].copy()
vel_bl_rh0 = vel_bl_fb_gain[3:7].copy()
### Accel Limit Velocitites ###
# if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))):
# aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa
# rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh
# ### AA XY ###
# for i in np.arange(2):
# if aa_output_accel[i] > self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist
# elif aa_output_accel[i] < -1*self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist
# ### AA PSI ###
# if aa_output_accel[2] > self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi
# elif aa_output_accel[2] < -1*self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi
# ### RH All ###
# for i in np.arange(4):
# if rh_output_accel[i] > self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand
# elif rh_output_accel[i] < -1*self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand
### Add Attractor ###
attractor_point_aa = self.safety_grid.attractor_point[:3]
attractor_point_rh = self.safety_grid.attractor_point[3:]
vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05
vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05
vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy()
vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy()
### LPF Filter Velocities ###
for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']):
vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s])
if np.isnan(vel_bl_aa[s]):
vel_bl_aa[s] = 0
for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']):
vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s])
if np.isnan(vel_bl_rh[s]):
vel_bl_rh[s] = 0
self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
#If the next position is outside of safety then damp velocity to only go to limit:
pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh))
pos_pred_aa = pos_pred[0:3]
pos_pred_rh = pos_pred[3:7]
both_feedback_str = ''
if self.safety_grid is not None:
if len(self.aa_plant.aa_xy_ix) > 0:
x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix])
if x_tmp == False:
# Find the closest point on the boundary of the safety grid and set velocity in same
# direction, but at 90% of way to get to the edge of the safety grid:
current_pos = current_state[self.aa_plant.aa_xy_ix]
### loop through percentages of velocity and check validity of point:
valid_scale = False
scale = 1.0
while valid_scale is False:
scale -= 0.05
pos_pred_xy = current_pos + 0.05*(vel_bl_aa[self.aa_plant.aa_xy_ix]*scale)
valid_scale = self.safety_grid.is_valid_pos(pos_pred_xy)
if scale < -1.0:
scale = 0.0
break
#d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos)
vel_bl_aa[self.aa_plant.aa_xy_ix] = vel_bl_aa[self.aa_plant.aa_xy_ix]*scale
pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix]
#print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix]
xy_change = True
# Make sure AA Psi within bounds:
if len(self.aa_plant.aa_psi_ix) > 0:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix])
predx, predy= pos_pred_aa[[0, 1]]
# Set psi velocity :
psi_ok = False
if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx):
# Test if globally ok:
#global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0
global_ok = True
if global_ok:
psi_ok = True
if psi_ok == False:
valid_scale_psi = False
scale = 1.0
while valid_scale_psi is False:
scale -= 0.05
psi_pred = current_state[self.aa_plant.aa_psi_ix] + 0.05*(scale*vel_bl_aa[self.aa_plant.aa_psi_ix])
if np.logical_and(psi_pred >= mn, psi_pred <= mx):
valid_scale_psi = True
if scale < -1.0:
scale = 0.0
break
vel_bl_aa[self.aa_plant.aa_psi_ix] = scale*vel_bl_aa[self.aa_plant.aa_psi_ix]
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
if len(self.rh_plant.rh_pron_ix) > 0:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix])
# Set prono velocity :
if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx):
pass
else:
valid_scale_prono = False
scale = 1.0
while valid_scale_prono is False:
scale -= 0.05
pron_pred = pos_pred_rh[self.rh_plant.rh_pron_ix] + 0.05*(scale*vel_bl_rh[self.rh_plant.rh_pron_ix])
if np.logical_and(pron_pred >= mn, pron_pred <= mx):
valid_scale_prono = True
if scale < -1.0:
scale = 0.
break
vel_bl_rh[self.rh_plant.rh_pron_ix] = scale*vel_bl_rh[self.rh_plant.rh_pron_ix]
# Assure RH fingers are within range:
if len(self.rh_plant.rh_pfings) > 0:
for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx):
pass
else:
finger_scale = False
scale = 1.0
while finger_scale is False:
scale -= 0.05
fing_pred = pos_pred_rh[ix] + 0.05*(scale*vel_bl_rh[ix])
if np.logical_and(fing_pred >= mn, fing_pred<= mx):
finger_scale = True
if scale < -1.0:
scale = 0.0
break
vel_bl_rh[ix] = scale*vel_bl_rh[ix]
# If in the rest state -- block the arm:
if self.task_state in ['rest', 'prep', 'baseline_check', 'wait']:
vel_bl_aa[:] = 0
vel_bl_rh[:] = 0
elif self.task_state == 'emg_rest':
scaling = self.rest_emg_output
if scaling <= 0.5:
scaling = 0
else:
scaling = 0.5*scaling
vel_bl_aa = scaling*vel_bl_aa
vel_bl_rh = scaling*vel_bl_rh
elif self.task_state == 'rest_back':
vel_bl_aa = vel_bl_aa_pull/self.attractor_speed_const*self.rest_back_attractor_speed
vel_bl_rh = vel_bl_rh_pull/self.attractor_speed_const*self.rest_back_attractor_speed
elif self.task_state in ['drive_to_start', 'drive_to_rest']:
vel_bl_aa = self.back_to_target_speed*(self.drive_to_start_target[:3] - current_state[:3])/0.05
vel_bl_rh = self.back_to_target_speed*(self.drive_to_start_target[3:] - current_state[3:])/0.05
max_vel_xy = 10.
vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy
vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy
max_vel_ang = 2.
if vel_bl_aa[2] > max_vel_ang:
vel_bl_aa[2] = max_vel_ang
elif vel_bl_aa[2] < -1*max_vel_ang:
vel_bl_aa[2] = -1*max_vel_ang
vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang
vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang
if self.blocking_joints is not None:
for j in [0, 1, 2]:
if j in self.blocking_joints:
vel_bl_aa[j] = 0
#print 'blocking vel_bl_aa: ', j
for j in [3, 4, 5, 6]:
if j in self.blocking_joints:
vel_bl_rh[j-3] = 0
#print 'blocking vel_bl_rh: ', j-3
self.both_feedback_str = both_feedback_str
self.aa_plant.send_vel(vel_bl_aa)
self.rh_plant.send_vel(vel_bl_rh)
self.prev_vel_bl_aa = vel_bl_aa.copy()
self.prev_vel_bl_rh = vel_bl_rh.copy()
self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
decoder['q'] = self.get_pos()
UDP_PLANT_CLS_DICT = {
'ArmAssist': ArmAssistPlantUDP,
'ReHand': ReHandPlantUDP,
'IsMore': IsMorePlantUDP,
'IsMoreEMGControl': IsMorePlantEMGControl,
'IsMoreHybridControl': IsMorePlantHybridBMI,
'IsMorePlantHybridBMISoftSafety': IsMorePlantHybridBMISoftSafety,
'DummyPlant': DummyPlantUDP,
}
###########################
##### Deprecated code #####
###########################
class BasePlant(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError('Implement in subclasses!')
def init(self):
raise NotImplementedError('Implement in subclasses!')
def start(self):
raise NotImplementedError('Implement in subclasses!')
def stop(self):
raise NotImplementedError('Implement in subclasses!')
def last_data_ts_arrival(self):
raise NotImplementedError('Implement in subclasses!')
def send_vel(self, vel):
raise NotImplementedError('Implement in subclasses!')
def get_pos(self):
raise NotImplementedError('Implement in subclasses!')
def get_vel(self):
raise NotImplementedError('Implement in subclasses!')
def enable(self):
'''Disable the device's motor drivers.'''
raise NotImplementedError('Implement in subclasses!')
def disable(self):
'''Disable the device's motor drivers.'''
raise NotImplementedError('Implement in subclasses!')
def enable_watchdog(self, timeout_ms):
raise NotImplementedError('Implement in subclasses!')
def get_intrinsic_coordinates(self):
return self.get_pos()
|
[
"numpy.abs",
"time.ctime",
"socket.socket",
"numpy.hstack",
"os.path.expandvars",
"ismore.filter.Filter",
"numpy.logical_and",
"scipy.signal.butter",
"riglib.sink.sinks.register",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"numpy.sign",
"numpy.nonzero",
"time.time",
"riglib.source.DataSource"
] |
[((12073, 12115), 'numpy.array', 'np.array', (['[cm_to_mm, cm_to_mm, rad_to_deg]'], {}), '([cm_to_mm, cm_to_mm, rad_to_deg])\n', (12081, 12115), True, 'import numpy as np\n'), ((12180, 12214), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (12188, 12214), True, 'import numpy as np\n'), ((12239, 12276), 'numpy.array', 'np.array', (['[-np.inf, -np.inf, -np.inf]'], {}), '([-np.inf, -np.inf, -np.inf])\n', (12247, 12276), True, 'import numpy as np\n'), ((12301, 12335), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (12309, 12335), True, 'import numpy as np\n'), ((12847, 12883), 'scipy.signal.butter', 'butter', (['(2)', 'cuttoff_freq'], {'btype': '"""low"""'}), "(2, cuttoff_freq, btype='low')\n", (12853, 12883), False, 'from scipy.signal import butter, lfilter\n'), ((18816, 18835), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18824, 18835), True, 'import numpy as np\n'), ((18860, 18879), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18868, 18879), True, 'import numpy as np\n'), ((18915, 18934), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18923, 18934), True, 'import numpy as np\n'), ((18955, 18974), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18963, 18974), True, 'import numpy as np\n'), ((19843, 19901), 'numpy.array', 'np.array', (['[rad_to_deg, rad_to_deg, rad_to_deg, rad_to_deg]'], {}), '([rad_to_deg, rad_to_deg, rad_to_deg, rad_to_deg])\n', (19851, 19901), True, 'import numpy as np\n'), ((19926, 19970), 'numpy.array', 'np.array', (['[60, 60, 60, 90]'], {'dtype': 'np.float64'}), '([60, 60, 60, 90], dtype=np.float64)\n', (19934, 19970), True, 'import numpy as np\n'), ((20005, 20049), 'numpy.array', 'np.array', (['[25, 25, 25, 25]'], {'dtype': 'np.float64'}), '([25, 25, 25, 25], dtype=np.float64)\n', (20013, 20049), True, 'import numpy as np\n'), ((20084, 20144), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf, np.inf]'], {'dtype': 'np.float64'}), '([np.inf, np.inf, np.inf, np.inf], dtype=np.float64)\n', (20092, 20144), True, 'import numpy as np\n'), ((1022, 1109), 'riglib.source.DataSource', 'source.DataSource', (['self.feedback_data_cls'], {'bufferlen': '(5)', 'name': 'self.data_source_name'}), '(self.feedback_data_cls, bufferlen=5, name=self.\n data_source_name)\n', (1039, 1109), False, 'from riglib import source\n'), ((1125, 1173), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1138, 1173), False, 'import socket\n'), ((3234, 3266), 'riglib.sink.sinks.register', 'sink.sinks.register', (['self.source'], {}), '(self.source)\n', (3253, 3266), False, 'from riglib import sink\n'), ((3489, 3500), 'time.time', 'time.time', ([], {}), '()\n', (3498, 3500), False, 'import time\n'), ((5076, 5135), 'numpy.nonzero', 'np.nonzero', (['((projected_pos > self.max_pos_vals) * (vel > 0))'], {}), '((projected_pos > self.max_pos_vals) * (vel > 0))\n', (5086, 5135), True, 'import numpy as np\n'), ((5159, 5218), 'numpy.nonzero', 'np.nonzero', (['((projected_pos < self.min_pos_vals) * (vel < 0))'], {}), '((projected_pos < self.min_pos_vals) * (vel < 0))\n', (5169, 5218), True, 'import numpy as np\n'), ((12365, 12421), 'os.path.expandvars', 'os.path.expandvars', (['"""$HOME/code/bmi3d/log/armassist.txt"""'], {}), "('$HOME/code/bmi3d/log/armassist.txt')\n", (12383, 12421), False, 'import os\n'), ((12993, 13037), 'ismore.filter.Filter', 'Filter', (['bpf_kin_coeffs[0]', 'bpf_kin_coeffs[1]'], {}), '(bpf_kin_coeffs[0], bpf_kin_coeffs[1])\n', (12999, 13037), False, 'from ismore.filter import Filter\n'), ((17267, 17278), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (17275, 17278), True, 'import numpy as np\n'), ((18404, 18415), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (18412, 18415), True, 'import numpy as np\n'), ((19200, 19219), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (19208, 19219), True, 'import numpy as np\n'), ((19257, 19276), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (19265, 19276), True, 'import numpy as np\n'), ((19318, 19337), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (19326, 19337), True, 'import numpy as np\n'), ((19375, 19394), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (19383, 19394), True, 'import numpy as np\n'), ((20276, 20329), 'os.path.expandvars', 'os.path.expandvars', (['"""$HOME/code/bmi3d/log/rehand.txt"""'], {}), "('$HOME/code/bmi3d/log/rehand.txt')\n", (20294, 20329), False, 'import os\n'), ((23006, 23020), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (23014, 23020), True, 'import numpy as np\n'), ((23055, 23069), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (23063, 23069), True, 'import numpy as np\n'), ((23116, 23130), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (23124, 23130), True, 'import numpy as np\n'), ((23163, 23177), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (23171, 23177), True, 'import numpy as np\n'), ((23613, 23624), 'time.time', 'time.time', ([], {}), '()\n', (23622, 23624), False, 'import time\n'), ((24140, 24167), 'numpy.hstack', 'np.hstack', (['[aa_pos, rh_pos]'], {}), '([aa_pos, rh_pos])\n', (24149, 24167), True, 'import numpy as np\n'), ((24289, 24316), 'numpy.hstack', 'np.hstack', (['[aa_pos, rh_pos]'], {}), '([aa_pos, rh_pos])\n', (24298, 24316), True, 'import numpy as np\n'), ((24450, 24477), 'numpy.hstack', 'np.hstack', (['[aa_vel, rh_vel]'], {}), '([aa_vel, rh_vel])\n', (24459, 24477), True, 'import numpy as np\n'), ((24599, 24626), 'numpy.hstack', 'np.hstack', (['[aa_vel, rh_vel]'], {}), '([aa_vel, rh_vel])\n', (24608, 24626), True, 'import numpy as np\n'), ((41559, 41573), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (41567, 41573), True, 'import numpy as np\n'), ((41608, 41622), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (41616, 41622), True, 'import numpy as np\n'), ((42229, 42254), 'numpy.hstack', 'np.hstack', (['vel_emg_scaled'], {}), '(vel_emg_scaled)\n', (42238, 42254), True, 'import numpy as np\n'), ((42779, 42831), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_fingers, vel_bl_prono)'], {}), '((vel_bl_aa, vel_bl_fingers, vel_bl_prono))\n', (42788, 42831), True, 'import numpy as np\n'), ((43155, 43180), 'numpy.hstack', 'np.hstack', (['vel_bl_fb_gain'], {}), '(vel_bl_fb_gain)\n', (43164, 43180), True, 'import numpy as np\n'), ((52437, 52462), 'numpy.hstack', 'np.hstack', (['vel_emg_scaled'], {}), '(vel_emg_scaled)\n', (52446, 52462), True, 'import numpy as np\n'), ((52987, 53039), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_fingers, vel_bl_prono)'], {}), '((vel_bl_aa, vel_bl_fingers, vel_bl_prono))\n', (52996, 53039), True, 'import numpy as np\n'), ((53364, 53389), 'numpy.hstack', 'np.hstack', (['vel_bl_fb_gain'], {}), '(vel_bl_fb_gain)\n', (53373, 53389), True, 'import numpy as np\n'), ((5557, 5592), 'numpy.sign', 'np.sign', (['vel[faster_than_max_speed]'], {}), '(vel[faster_than_max_speed])\n', (5564, 5592), True, 'import numpy as np\n'), ((6052, 6087), 'numpy.sign', 'np.sign', (['vel[faster_than_max_speed]'], {}), '(vel[faster_than_max_speed])\n', (6059, 6087), True, 'import numpy as np\n'), ((6726, 6785), 'numpy.nonzero', 'np.nonzero', (['((projected_pos > self.max_pos_vals) * (vel > 0))'], {}), '((projected_pos > self.max_pos_vals) * (vel > 0))\n', (6736, 6785), True, 'import numpy as np\n'), ((6813, 6872), 'numpy.nonzero', 'np.nonzero', (['((projected_pos < self.min_pos_vals) * (vel < 0))'], {}), '((projected_pos < self.min_pos_vals) * (vel < 0))\n', (6823, 6872), True, 'import numpy as np\n'), ((14128, 14163), 'numpy.sign', 'np.sign', (['vel[faster_than_max_speed]'], {}), '(vel[faster_than_max_speed])\n', (14135, 14163), True, 'import numpy as np\n'), ((16568, 16581), 'numpy.isnan', 'np.isnan', (['vel'], {}), '(vel)\n', (16576, 16581), True, 'import numpy as np\n'), ((20874, 20909), 'numpy.sign', 'np.sign', (['vel[faster_than_max_speed]'], {}), '(vel[faster_than_max_speed])\n', (20881, 20909), True, 'import numpy as np\n'), ((23209, 23223), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (23217, 23223), True, 'import numpy as np\n'), ((23262, 23276), 'numpy.zeros', 'np.zeros', (['(4,)'], {}), '((4,))\n', (23270, 23276), True, 'import numpy as np\n'), ((27532, 27554), 'numpy.isnan', 'np.isnan', (['vel_bl_aa[s]'], {}), '(vel_bl_aa[s])\n', (27540, 27554), True, 'import numpy as np\n'), ((27760, 27782), 'numpy.isnan', 'np.isnan', (['vel_bl_rh[s]'], {}), '(vel_bl_rh[s])\n', (27768, 27782), True, 'import numpy as np\n'), ((35907, 35929), 'numpy.isnan', 'np.isnan', (['vel_bl_aa[s]'], {}), '(vel_bl_aa[s])\n', (35915, 35929), True, 'import numpy as np\n'), ((36135, 36157), 'numpy.isnan', 'np.isnan', (['vel_bl_rh[s]'], {}), '(vel_bl_rh[s])\n', (36143, 36157), True, 'import numpy as np\n'), ((45598, 45620), 'numpy.isnan', 'np.isnan', (['vel_bl_aa[s]'], {}), '(vel_bl_aa[s])\n', (45606, 45620), True, 'import numpy as np\n'), ((45826, 45848), 'numpy.isnan', 'np.isnan', (['vel_bl_rh[s]'], {}), '(vel_bl_rh[s])\n', (45834, 45848), True, 'import numpy as np\n'), ((55736, 55758), 'numpy.isnan', 'np.isnan', (['vel_bl_aa[s]'], {}), '(vel_bl_aa[s])\n', (55744, 55758), True, 'import numpy as np\n'), ((55964, 55986), 'numpy.isnan', 'np.isnan', (['vel_bl_rh[s]'], {}), '(vel_bl_rh[s])\n', (55972, 55986), True, 'import numpy as np\n'), ((5450, 5461), 'numpy.abs', 'np.abs', (['vel'], {}), '(vel)\n', (5456, 5461), True, 'import numpy as np\n'), ((5945, 5956), 'numpy.abs', 'np.abs', (['vel'], {}), '(vel)\n', (5951, 5956), True, 'import numpy as np\n'), ((7146, 7181), 'numpy.sign', 'np.sign', (['vel[faster_than_max_speed]'], {}), '(vel[faster_than_max_speed])\n', (7153, 7181), True, 'import numpy as np\n'), ((9469, 9547), 'numpy.logical_and', 'np.logical_and', (['(pos_pred[self.aa_psi_ix] >= mn)', '(pos_pred[self.aa_psi_ix] <= mx)'], {}), '(pos_pred[self.aa_psi_ix] >= mn, pos_pred[self.aa_psi_ix] <= mx)\n', (9483, 9547), True, 'import numpy as np\n'), ((10463, 10548), 'numpy.logical_and', 'np.logical_and', (['(pos_pred[self.rh_pron_ix] >= mn)', '(pos_pred[self.rh_pron_ix] <= mx)'], {}), '(pos_pred[self.rh_pron_ix] >= mn, pos_pred[self.rh_pron_ix] <= mx\n )\n', (10477, 10548), True, 'import numpy as np\n'), ((14021, 14032), 'numpy.abs', 'np.abs', (['vel'], {}), '(vel)\n', (14027, 14032), True, 'import numpy as np\n'), ((15725, 15741), 'numpy.isnan', 'np.isnan', (['vel[i]'], {}), '(vel[i])\n', (15733, 15741), True, 'import numpy as np\n'), ((16344, 16360), 'numpy.isnan', 'np.isnan', (['vel[i]'], {}), '(vel[i])\n', (16352, 16360), True, 'import numpy as np\n'), ((20767, 20778), 'numpy.abs', 'np.abs', (['vel'], {}), '(vel)\n', (20773, 20778), True, 'import numpy as np\n'), ((28046, 28079), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_rh)'], {}), '((vel_bl_aa, vel_bl_rh))\n', (28055, 28079), True, 'import numpy as np\n'), ((29389, 29496), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn)', '(pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx)'], {}), '(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self\n .aa_plant.aa_psi_ix] <= mx)\n', (29403, 29496), True, 'import numpy as np\n'), ((30359, 30468), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn)', '(pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx)'], {}), '(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[\n self.rh_plant.rh_pron_ix] <= mx)\n', (30373, 30468), True, 'import numpy as np\n'), ((36421, 36454), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_rh)'], {}), '((vel_bl_aa, vel_bl_rh))\n', (36430, 36454), True, 'import numpy as np\n'), ((37764, 37871), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn)', '(pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx)'], {}), '(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self\n .aa_plant.aa_psi_ix] <= mx)\n', (37778, 37871), True, 'import numpy as np\n'), ((38734, 38843), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn)', '(pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx)'], {}), '(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[\n self.rh_plant.rh_pron_ix] <= mx)\n', (38748, 38843), True, 'import numpy as np\n'), ((46112, 46145), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_rh)'], {}), '((vel_bl_aa, vel_bl_rh))\n', (46121, 46145), True, 'import numpy as np\n'), ((47478, 47585), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn)', '(pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx)'], {}), '(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self\n .aa_plant.aa_psi_ix] <= mx)\n', (47492, 47585), True, 'import numpy as np\n'), ((48486, 48595), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn)', '(pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx)'], {}), '(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[\n self.rh_plant.rh_pron_ix] <= mx)\n', (48500, 48595), True, 'import numpy as np\n'), ((56250, 56283), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_rh)'], {}), '((vel_bl_aa, vel_bl_rh))\n', (56259, 56283), True, 'import numpy as np\n'), ((58245, 58352), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn)', '(pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx)'], {}), '(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self\n .aa_plant.aa_psi_ix] <= mx)\n', (58259, 58352), True, 'import numpy as np\n'), ((59616, 59725), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn)', '(pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx)'], {}), '(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[\n self.rh_plant.rh_pron_ix] <= mx)\n', (59630, 59725), True, 'import numpy as np\n'), ((4665, 4676), 'time.time', 'time.time', ([], {}), '()\n', (4674, 4676), False, 'import time\n'), ((7035, 7046), 'numpy.abs', 'np.abs', (['vel'], {}), '(vel)\n', (7041, 7046), True, 'import numpy as np\n'), ((10947, 11001), 'numpy.logical_and', 'np.logical_and', (['(pos_pred[ix] >= mn)', '(pos_pred[ix] <= mx)'], {}), '(pos_pred[ix] >= mn, pos_pred[ix] <= mx)\n', (10961, 11001), True, 'import numpy as np\n'), ((15574, 15585), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (15582, 15585), True, 'import numpy as np\n'), ((16193, 16204), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (16201, 16204), True, 'import numpy as np\n'), ((31045, 31105), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[ix] >= mn)', '(pos_pred_rh[ix] <= mx)'], {}), '(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx)\n', (31059, 31105), True, 'import numpy as np\n'), ((39420, 39480), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[ix] >= mn)', '(pos_pred_rh[ix] <= mx)'], {}), '(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx)\n', (39434, 39480), True, 'import numpy as np\n'), ((49172, 49232), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[ix] >= mn)', '(pos_pred_rh[ix] <= mx)'], {}), '(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx)\n', (49186, 49232), True, 'import numpy as np\n'), ((60686, 60746), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[ix] >= mn)', '(pos_pred_rh[ix] <= mx)'], {}), '(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx)\n', (60700, 60746), True, 'import numpy as np\n'), ((3641, 3661), 'numpy.zeros', 'np.zeros', (['self.n_dof'], {}), '(self.n_dof)\n', (3649, 3661), True, 'import numpy as np\n'), ((11581, 11593), 'time.ctime', 'time.ctime', ([], {}), '()\n', (11591, 11593), False, 'import time\n'), ((58985, 59031), 'numpy.logical_and', 'np.logical_and', (['(psi_pred >= mn)', '(psi_pred <= mx)'], {}), '(psi_pred >= mn, psi_pred <= mx)\n', (58999, 59031), True, 'import numpy as np\n'), ((60108, 60156), 'numpy.logical_and', 'np.logical_and', (['(pron_pred >= mn)', '(pron_pred <= mx)'], {}), '(pron_pred >= mn, pron_pred <= mx)\n', (60122, 60156), True, 'import numpy as np\n'), ((29618, 29658), 'numpy.array', 'np.array', (['[predx, predy, pos_pred_aa[2]]'], {}), '([predx, predy, pos_pred_aa[2]])\n', (29626, 29658), True, 'import numpy as np\n'), ((37993, 38033), 'numpy.array', 'np.array', (['[predx, predy, pos_pred_aa[2]]'], {}), '([predx, predy, pos_pred_aa[2]])\n', (38001, 38033), True, 'import numpy as np\n'), ((61095, 61143), 'numpy.logical_and', 'np.logical_and', (['(fing_pred >= mn)', '(fing_pred <= mx)'], {}), '(fing_pred >= mn, fing_pred <= mx)\n', (61109, 61143), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
import glob
import re
from log import print_to_file
from scipy.fftpack import fftn, ifftn
from skimage.feature import peak_local_max, canny
from skimage.transform import hough_circle
import pickle as pickle
from paths import TRAIN_DATA_PATH, LOGS_PATH, PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH
from paths import TEST_DATA_PATH
def orthogonal_projection_on_slice(percentual_coordinate, source_metadata, target_metadata):
point = np.array([[percentual_coordinate[0]],
[percentual_coordinate[1]],
[0],
[1]])
image_size = [source_metadata["Rows"], source_metadata["Columns"]]
point = np.dot(np.array( [[image_size[0],0,0,0],
[0,image_size[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
pixel_spacing = source_metadata["PixelSpacing"]
point = np.dot(np.array( [[pixel_spacing[0],0,0,0],
[0,pixel_spacing[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
Fa = np.array(source_metadata["ImageOrientationPatient"]).reshape( (2,3) )[::-1,:]
posa = source_metadata["ImagePositionPatient"]
point = np.dot(np.array( [[Fa[0,0],Fa[1,0],0,posa[0]],
[Fa[0,1],Fa[1,1],0,posa[1]],
[Fa[0,2],Fa[1,2],0,posa[2]],
[0,0,0,1]]), point)
posb = target_metadata["ImagePositionPatient"]
point = np.dot(np.array( [[1,0,0,-posb[0]],
[0,1,0,-posb[1]],
[0,0,1,-posb[2]],
[0,0,0,1]]), point)
Fb = np.array(target_metadata["ImageOrientationPatient"]).reshape( (2,3) )[::-1,:]
ff0 = np.sqrt(np.sum(Fb[0,:]*Fb[0,:]))
ff1 = np.sqrt(np.sum(Fb[1,:]*Fb[1,:]))
point = np.dot(np.array( [[Fb[0,0]/ff0,Fb[0,1]/ff0,Fb[0,2]/ff0,0],
[Fb[1,0]/ff1,Fb[1,1]/ff1,Fb[1,2]/ff1,0],
[0,0,0,0],
[0,0,0,1]]), point)
pixel_spacing = target_metadata["PixelSpacing"]
point = np.dot(np.array( [[1./pixel_spacing[0],0,0,0],
[0,1./pixel_spacing[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
image_size = [target_metadata["Rows"], target_metadata["Columns"]]
point = np.dot(np.array( [[1./image_size[0],0,0,0],
[0,1./image_size[1],0,0],
[0,0,0,0],
[0,0,0,1]]), point)
return point[:2,0] # percentual coordinate as well
#joni
minradius = 15
maxradius = 65
kernel_width = 5
center_margin = 8
num_peaks = 10
num_circles = 10 # 20
radstep = 2
#ira
minradius_mm=25
maxradius_mm=45
kernel_width=5
center_margin=8
num_peaks=10
num_circles=20
radstep=2
def extract_roi(data, pixel_spacing, minradius_mm=15, maxradius_mm=65, kernel_width=5, center_margin=8, num_peaks=10,
num_circles=10, radstep=2):
"""
Returns center and radii of ROI region in (i,j) format
"""
# radius of the smallest and largest circles in mm estimated from the train set
# convert to pixel counts
minradius = int(minradius_mm / pixel_spacing)
maxradius = int(maxradius_mm / pixel_spacing)
ximagesize = data[0]['data'].shape[1]
yimagesize = data[0]['data'].shape[2]
xsurface = np.tile(list(range(ximagesize)), (yimagesize, 1)).T
ysurface = np.tile(list(range(yimagesize)), (ximagesize, 1))
lsurface = np.zeros((ximagesize, yimagesize))
allcenters = []
allaccums = []
allradii = []
for dslice in data:
ff1 = fftn(dslice['data'])
fh = np.absolute(ifftn(ff1[1, :, :]))
fh[fh < 0.1 * np.max(fh)] = 0.0
image = 1. * fh / np.max(fh)
# find hough circles and detect two radii
edges = canny(image, sigma=3)
hough_radii = np.arange(minradius, maxradius, radstep)
hough_res = hough_circle(edges, hough_radii)
if hough_res.any():
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract num_peaks circles
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
# Keep the most prominent num_circles circles
sorted_circles_idxs = np.argsort(accums)[::-1][:num_circles]
for idx in sorted_circles_idxs:
center_x, center_y = centers[idx]
allcenters.append(centers[idx])
allradii.append(radii[idx])
allaccums.append(accums[idx])
brightness = accums[idx]
lsurface = lsurface + brightness * np.exp(
-((xsurface - center_x) ** 2 + (ysurface - center_y) ** 2) / kernel_width ** 2)
lsurface = lsurface / lsurface.max()
# select most likely ROI center
roi_center = np.unravel_index(lsurface.argmax(), lsurface.shape)
# determine ROI radius
roi_x_radius = 0
roi_y_radius = 0
for idx in range(len(allcenters)):
xshift = np.abs(allcenters[idx][0] - roi_center[0])
yshift = np.abs(allcenters[idx][1] - roi_center[1])
if (xshift <= center_margin) & (yshift <= center_margin):
roi_x_radius = np.max((roi_x_radius, allradii[idx] + xshift))
roi_y_radius = np.max((roi_y_radius, allradii[idx] + yshift))
if roi_x_radius > 0 and roi_y_radius > 0:
roi_radii = roi_x_radius, roi_y_radius
else:
roi_radii = None
return roi_center, roi_radii
def read_slice(path):
return pickle.load(open(path))['data']
def read_metadata(path):
d = pickle.load(open(path))['metadata'][0]
metadata = {k: d[k] for k in ['PixelSpacing', 'ImageOrientationPatient', 'ImagePositionPatient', 'SliceLocation',
'PatientSex', 'PatientAge', 'Rows', 'Columns']}
metadata['PixelSpacing'] = np.float32(metadata['PixelSpacing'])
metadata['ImageOrientationPatient'] = np.float32(metadata['ImageOrientationPatient'])
metadata['SliceLocation'] = np.float32(metadata['SliceLocation'])
metadata['ImagePositionPatient'] = np.float32(metadata['ImagePositionPatient'])
metadata['PatientSex'] = 1 if metadata['PatientSex'] == 'F' else 0
metadata['PatientAge'] = int(metadata['PatientAge'][1:3])
metadata['Rows'] = int(metadata['Rows'])
metadata['Columns'] = int(metadata['Columns'])
return metadata
def get_patient_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + '/sax_*.pkl'),
key=lambda x: int(re.search(r'/\w*_(\d+)*\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.search(r'/(sax_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
d = read_slice(s)
patient_data.append({'data': d, 'metadata': metadata,
'slice_id': slice_id, 'patient_id': pid})
return patient_data
def get_patient_ch_data(patient_data_path):
patient_data = []
spaths = sorted(glob.glob(patient_data_path + '/*ch_*.pkl'),
key=lambda x: int(re.search(r'/\w*_(\d+)*\.pkl$', x).group(1)))
pid = re.search(r'/(\d+)/study$', patient_data_path).group(1)
for s in spaths:
slice_id = re.search(r'/(\d+ch_\d+\.pkl)$', s).group(1)
metadata = read_metadata(s)
d = read_slice(s)
patient_data.append({'data': d, 'metadata': metadata,
'slice_id': slice_id, 'patient_id': pid})
return patient_data
def sort_slices(slices):
nslices = len(slices)
positions = np.zeros((nslices,))
for i in range(nslices):
positions[i] = slices[i]['metadata']['SliceLocation']
sorted_slices = [s for pos, s in sorted(zip(positions.tolist(), slices),
key=lambda x: x[0], reverse=True)]
return sorted_slices
def group_slices(slice_stack):
"""
Groups slices into stacks with the same image orientation
:param slice_stack:
:return: list of slice stacks
"""
img_orientations = []
for s in slice_stack:
img_orientations.append(tuple(s['metadata']['ImageOrientationPatient']))
img_orientations = list(set(img_orientations))
if len(img_orientations) == 1:
return [slice_stack]
else:
slice_groups = [[] for _ in range(len(img_orientations))]
for s in slice_stack:
group = img_orientations.index(tuple(s['metadata']['ImageOrientationPatient']))
slice_groups[group].append(s)
return slice_groups
def plot_roi(slice_group, roi_center, roi_radii):
x_roi_center, y_roi_center = roi_center[0], roi_center[1]
x_roi_radius, y_roi_radius = roi_radii[0], roi_radii[1]
print('nslices', len(slice_group))
for dslice in [slice_group[len(slice_group) / 2]]:
outdata = dslice['data']
# print dslice['slice_id']
# print dslice['metadata']['SliceLocation']
# print dslice['metadata']['ImageOrientationPatient']
# print dslice['metadata']['PixelSpacing']
# print dslice['data'].shape
# print '--------------------------------------'
roi_mask = np.zeros_like(outdata[0])
roi_mask[x_roi_center - x_roi_radius:x_roi_center + x_roi_radius,
y_roi_center - y_roi_radius:y_roi_center + y_roi_radius] = 1
outdata[:, roi_mask > 0.5] = 0.4 * outdata[:, roi_mask > 0.5]
outdata[:, roi_mask > 0.5] = 0.4 * outdata[:, roi_mask > 0.5]
fig = plt.figure(1)
fig.canvas.set_window_title(dslice['patient_id'] + dslice['slice_id'])
def init_out():
im.set_data(outdata[0])
def animate_out(i):
im.set_data(outdata[i])
return im
im = fig.gca().imshow(outdata[0], cmap='gist_gray_r', vmin=0, vmax=255)
anim = animation.FuncAnimation(fig, animate_out, init_func=init_out, frames=30, interval=50)
plt.show()
def get_slice2roi(data_path, plot=False):
patient_paths = sorted(glob.glob(data_path + '*/study'))
slice2roi = {}
for p in patient_paths:
patient_data = get_patient_data(p)
sorted_slices = sort_slices(patient_data)
grouped_slices = group_slices(sorted_slices)
ch_data = get_patient_ch_data(p)
ch4, ch2 = None,None
for data in ch_data:
if data['slice_id'].startswith("4"):
ch4 = data
elif data['slice_id'].startswith("2"):
ch2 = data
# init patient dict
pid = sorted_slices[0]['patient_id']
print("processing patient %s" % pid)
# print pid
slice2roi[pid] = {}
# pixel spacing doesn't change within one patient
pixel_spacing = sorted_slices[0]['metadata']['PixelSpacing'][0]
for slice_group in grouped_slices:
try:
roi_center, roi_radii = extract_roi(slice_group, pixel_spacing)
except:
print('Could not find ROI')
roi_center, roi_radii = None, None
print(roi_center, roi_radii)
if plot and roi_center and roi_radii:
pass
#plot_roi(slice_group, roi_center, roi_radii)
for s in slice_group:
sid = s['slice_id']
slice2roi[pid][sid] = {'roi_center': roi_center, 'roi_radii': roi_radii}
# project found roi_centers on the 4ch and 2ch slice
ch4_centers = []
ch2_centers = []
for slice in sorted_slices:
sid = slice['slice_id']
roi_center = slice2roi[pid][sid]['roi_center']
metadata_source = slice['metadata']
hough_roi_center = (float(roi_center[0]) / metadata_source['Rows'],
float(roi_center[1]) / metadata_source['Columns'])
if ch4 is not None:
metadata_target = ch4['metadata']
result = orthogonal_projection_on_slice(hough_roi_center, metadata_source, metadata_target)
ch_roi_center = [float(result[0]) * metadata_target['Rows'],
float(result[1]) * metadata_target['Columns']]
ch4_centers.append(ch_roi_center)
if ch2 is not None:
metadata_target = ch2['metadata']
result = orthogonal_projection_on_slice(hough_roi_center, metadata_source, metadata_target)
ch_roi_center = [float(result[0]) * metadata_target['Rows'],
float(result[1]) * metadata_target['Columns']]
ch2_centers.append(ch_roi_center)
if ch4 is not None:
centers = np.array(ch4_centers)
ch4_result_center = np.mean(centers, axis=0)
ch4_result_radius = np.max(np.sqrt((centers - ch4_result_center)**2))
sid = ch4['slice_id']
slice2roi[pid][sid] = {'roi_center': tuple(ch4_result_center), 'roi_radii': (ch4_result_radius, ch4_result_radius)}
if ch2 is not None:
centers = np.array(ch2_centers)
ch2_result_center = np.mean(centers, axis=0)
ch2_result_radius = np.max(np.sqrt((centers - ch2_result_center)**2))
sid = ch2['slice_id']
slice2roi[pid][sid] = {'roi_center': tuple(ch2_result_center), 'roi_radii': (ch2_result_radius, ch2_result_radius)}
filename = data_path.split('/')[-1] + '_slice2roi_joni.pkl'
with open(filename, 'w') as f:
pickle.dump(slice2roi, f)
print('saved to ', filename)
return slice2roi
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
required = parser.add_argument_group('required arguments')
#required.add_argument('-c', '--config',
# help='configuration to run',
# required=True)
args = parser.parse_args()
data_paths = [PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH]
log_path = LOGS_PATH + "generate_roi.log"
with print_to_file(log_path):
for d in data_paths:
get_slice2roi(d, plot=True)
print("log saved to '%s'" % log_path)
|
[
"numpy.sqrt",
"scipy.fftpack.fftn",
"numpy.array",
"numpy.argsort",
"numpy.arange",
"re.search",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.max",
"numpy.exp",
"log.print_to_file",
"glob.glob",
"numpy.abs",
"numpy.float32",
"pickle.dump",
"numpy.sum",
"numpy.zeros",
"skimage.feature.canny",
"skimage.transform.hough_circle",
"scipy.fftpack.ifftn",
"numpy.zeros_like",
"skimage.feature.peak_local_max"
] |
[((482, 558), 'numpy.array', 'np.array', (['[[percentual_coordinate[0]], [percentual_coordinate[1]], [0], [1]]'], {}), '([[percentual_coordinate[0]], [percentual_coordinate[1]], [0], [1]])\n', (490, 558), True, 'import numpy as np\n'), ((3780, 3814), 'numpy.zeros', 'np.zeros', (['(ximagesize, yimagesize)'], {}), '((ximagesize, yimagesize))\n', (3788, 3814), True, 'import numpy as np\n'), ((6458, 6494), 'numpy.float32', 'np.float32', (["metadata['PixelSpacing']"], {}), "(metadata['PixelSpacing'])\n", (6468, 6494), True, 'import numpy as np\n'), ((6538, 6585), 'numpy.float32', 'np.float32', (["metadata['ImageOrientationPatient']"], {}), "(metadata['ImageOrientationPatient'])\n", (6548, 6585), True, 'import numpy as np\n'), ((6619, 6656), 'numpy.float32', 'np.float32', (["metadata['SliceLocation']"], {}), "(metadata['SliceLocation'])\n", (6629, 6656), True, 'import numpy as np\n'), ((6697, 6741), 'numpy.float32', 'np.float32', (["metadata['ImagePositionPatient']"], {}), "(metadata['ImagePositionPatient'])\n", (6707, 6741), True, 'import numpy as np\n'), ((8271, 8291), 'numpy.zeros', 'np.zeros', (['(nslices,)'], {}), '((nslices,))\n', (8279, 8291), True, 'import numpy as np\n'), ((14462, 14506), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (14485, 14506), False, 'import argparse\n'), ((720, 814), 'numpy.array', 'np.array', (['[[image_size[0], 0, 0, 0], [0, image_size[1], 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]\n ]'], {}), '([[image_size[0], 0, 0, 0], [0, image_size[1], 0, 0], [0, 0, 0, 0],\n [0, 0, 0, 1]])\n', (728, 814), True, 'import numpy as np\n'), ((978, 1079), 'numpy.array', 'np.array', (['[[pixel_spacing[0], 0, 0, 0], [0, pixel_spacing[1], 0, 0], [0, 0, 0, 0], [0,\n 0, 0, 1]]'], {}), '([[pixel_spacing[0], 0, 0, 0], [0, pixel_spacing[1], 0, 0], [0, 0, \n 0, 0], [0, 0, 0, 1]])\n', (986, 1079), True, 'import numpy as np\n'), ((1329, 1460), 'numpy.array', 'np.array', (['[[Fa[0, 0], Fa[1, 0], 0, posa[0]], [Fa[0, 1], Fa[1, 1], 0, posa[1]], [Fa[0,\n 2], Fa[1, 2], 0, posa[2]], [0, 0, 0, 1]]'], {}), '([[Fa[0, 0], Fa[1, 0], 0, posa[0]], [Fa[0, 1], Fa[1, 1], 0, posa[1]\n ], [Fa[0, 2], Fa[1, 2], 0, posa[2]], [0, 0, 0, 1]])\n', (1337, 1460), True, 'import numpy as np\n'), ((1616, 1707), 'numpy.array', 'np.array', (['[[1, 0, 0, -posb[0]], [0, 1, 0, -posb[1]], [0, 0, 1, -posb[2]], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, -posb[0]], [0, 1, 0, -posb[1]], [0, 0, 1, -posb[2]], [0,\n 0, 0, 1]])\n', (1624, 1707), True, 'import numpy as np\n'), ((1905, 1932), 'numpy.sum', 'np.sum', (['(Fb[0, :] * Fb[0, :])'], {}), '(Fb[0, :] * Fb[0, :])\n', (1911, 1932), True, 'import numpy as np\n'), ((1949, 1976), 'numpy.sum', 'np.sum', (['(Fb[1, :] * Fb[1, :])'], {}), '(Fb[1, :] * Fb[1, :])\n', (1955, 1976), True, 'import numpy as np\n'), ((1996, 2144), 'numpy.array', 'np.array', (['[[Fb[0, 0] / ff0, Fb[0, 1] / ff0, Fb[0, 2] / ff0, 0], [Fb[1, 0] / ff1, Fb[1,\n 1] / ff1, Fb[1, 2] / ff1, 0], [0, 0, 0, 0], [0, 0, 0, 1]]'], {}), '([[Fb[0, 0] / ff0, Fb[0, 1] / ff0, Fb[0, 2] / ff0, 0], [Fb[1, 0] /\n ff1, Fb[1, 1] / ff1, Fb[1, 2] / ff1, 0], [0, 0, 0, 0], [0, 0, 0, 1]])\n', (2004, 2144), True, 'import numpy as np\n'), ((2290, 2402), 'numpy.array', 'np.array', (['[[1.0 / pixel_spacing[0], 0, 0, 0], [0, 1.0 / pixel_spacing[1], 0, 0], [0, \n 0, 0, 0], [0, 0, 0, 1]]'], {}), '([[1.0 / pixel_spacing[0], 0, 0, 0], [0, 1.0 / pixel_spacing[1], 0,\n 0], [0, 0, 0, 0], [0, 0, 0, 1]])\n', (2298, 2402), True, 'import numpy as np\n'), ((2579, 2686), 'numpy.array', 'np.array', (['[[1.0 / image_size[0], 0, 0, 0], [0, 1.0 / image_size[1], 0, 0], [0, 0, 0, \n 0], [0, 0, 0, 1]]'], {}), '([[1.0 / image_size[0], 0, 0, 0], [0, 1.0 / image_size[1], 0, 0], [\n 0, 0, 0, 0], [0, 0, 0, 1]])\n', (2587, 2686), True, 'import numpy as np\n'), ((3919, 3939), 'scipy.fftpack.fftn', 'fftn', (["dslice['data']"], {}), "(dslice['data'])\n", (3923, 3939), False, 'from scipy.fftpack import fftn, ifftn\n'), ((4138, 4159), 'skimage.feature.canny', 'canny', (['image'], {'sigma': '(3)'}), '(image, sigma=3)\n', (4143, 4159), False, 'from skimage.feature import peak_local_max, canny\n'), ((4183, 4223), 'numpy.arange', 'np.arange', (['minradius', 'maxradius', 'radstep'], {}), '(minradius, maxradius, radstep)\n', (4192, 4223), True, 'import numpy as np\n'), ((4245, 4277), 'skimage.transform.hough_circle', 'hough_circle', (['edges', 'hough_radii'], {}), '(edges, hough_radii)\n', (4257, 4277), False, 'from skimage.transform import hough_circle\n'), ((5584, 5626), 'numpy.abs', 'np.abs', (['(allcenters[idx][0] - roi_center[0])'], {}), '(allcenters[idx][0] - roi_center[0])\n', (5590, 5626), True, 'import numpy as np\n'), ((5645, 5687), 'numpy.abs', 'np.abs', (['(allcenters[idx][1] - roi_center[1])'], {}), '(allcenters[idx][1] - roi_center[1])\n', (5651, 5687), True, 'import numpy as np\n'), ((7086, 7129), 'glob.glob', 'glob.glob', (["(patient_data_path + '/sax_*.pkl')"], {}), "(patient_data_path + '/sax_*.pkl')\n", (7095, 7129), False, 'import glob\n'), ((7685, 7728), 'glob.glob', 'glob.glob', (["(patient_data_path + '/*ch_*.pkl')"], {}), "(patient_data_path + '/*ch_*.pkl')\n", (7694, 7728), False, 'import glob\n'), ((9907, 9932), 'numpy.zeros_like', 'np.zeros_like', (['outdata[0]'], {}), '(outdata[0])\n', (9920, 9932), True, 'import numpy as np\n'), ((10768, 10800), 'glob.glob', 'glob.glob', (["(data_path + '*/study')"], {}), "(data_path + '*/study')\n", (10777, 10800), False, 'import glob\n'), ((14334, 14359), 'pickle.dump', 'pickle.dump', (['slice2roi', 'f'], {}), '(slice2roi, f)\n', (14345, 14359), True, 'import pickle as pickle\n'), ((14868, 14891), 'log.print_to_file', 'print_to_file', (['log_path'], {}), '(log_path)\n', (14881, 14891), False, 'from log import print_to_file\n'), ((3966, 3985), 'scipy.fftpack.ifftn', 'ifftn', (['ff1[1, :, :]'], {}), '(ff1[1, :, :])\n', (3971, 3985), False, 'from scipy.fftpack import fftn, ifftn\n'), ((4055, 4065), 'numpy.max', 'np.max', (['fh'], {}), '(fh)\n', (4061, 4065), True, 'import numpy as np\n'), ((5783, 5829), 'numpy.max', 'np.max', (['(roi_x_radius, allradii[idx] + xshift)'], {}), '((roi_x_radius, allradii[idx] + xshift))\n', (5789, 5829), True, 'import numpy as np\n'), ((5858, 5904), 'numpy.max', 'np.max', (['(roi_y_radius, allradii[idx] + yshift)'], {}), '((roi_y_radius, allradii[idx] + yshift))\n', (5864, 5904), True, 'import numpy as np\n'), ((7227, 7273), 're.search', 're.search', (['"""/(\\\\d+)/study$"""', 'patient_data_path'], {}), "('/(\\\\d+)/study$', patient_data_path)\n", (7236, 7273), False, 'import re\n'), ((7826, 7872), 're.search', 're.search', (['"""/(\\\\d+)/study$"""', 'patient_data_path'], {}), "('/(\\\\d+)/study$', patient_data_path)\n", (7835, 7872), False, 'import re\n'), ((13512, 13533), 'numpy.array', 'np.array', (['ch4_centers'], {}), '(ch4_centers)\n', (13520, 13533), True, 'import numpy as np\n'), ((13567, 13591), 'numpy.mean', 'np.mean', (['centers'], {'axis': '(0)'}), '(centers, axis=0)\n', (13574, 13591), True, 'import numpy as np\n'), ((13893, 13914), 'numpy.array', 'np.array', (['ch2_centers'], {}), '(ch2_centers)\n', (13901, 13914), True, 'import numpy as np\n'), ((13948, 13972), 'numpy.mean', 'np.mean', (['centers'], {'axis': '(0)'}), '(centers, axis=0)\n', (13955, 13972), True, 'import numpy as np\n'), ((1179, 1231), 'numpy.array', 'np.array', (["source_metadata['ImageOrientationPatient']"], {}), "(source_metadata['ImageOrientationPatient'])\n", (1187, 1231), True, 'import numpy as np\n'), ((1808, 1860), 'numpy.array', 'np.array', (["target_metadata['ImageOrientationPatient']"], {}), "(target_metadata['ImageOrientationPatient'])\n", (1816, 1860), True, 'import numpy as np\n'), ((4532, 4570), 'skimage.feature.peak_local_max', 'peak_local_max', (['h'], {'num_peaks': 'num_peaks'}), '(h, num_peaks=num_peaks)\n', (4546, 4570), False, 'from skimage.feature import peak_local_max, canny\n'), ((7325, 7359), 're.search', 're.search', (['"""/(sax_\\\\d+\\\\.pkl)$"""', 's'], {}), "('/(sax_\\\\d+\\\\.pkl)$', s)\n", (7334, 7359), False, 'import re\n'), ((7924, 7961), 're.search', 're.search', (['"""/(\\\\d+ch_\\\\d+\\\\.pkl)$"""', 's'], {}), "('/(\\\\d+ch_\\\\d+\\\\.pkl)$', s)\n", (7933, 7961), False, 'import re\n'), ((13632, 13675), 'numpy.sqrt', 'np.sqrt', (['((centers - ch4_result_center) ** 2)'], {}), '((centers - ch4_result_center) ** 2)\n', (13639, 13675), True, 'import numpy as np\n'), ((14013, 14056), 'numpy.sqrt', 'np.sqrt', (['((centers - ch2_result_center) ** 2)'], {}), '((centers - ch2_result_center) ** 2)\n', (14020, 14056), True, 'import numpy as np\n'), ((4010, 4020), 'numpy.max', 'np.max', (['fh'], {}), '(fh)\n', (4016, 4020), True, 'import numpy as np\n'), ((4818, 4836), 'numpy.argsort', 'np.argsort', (['accums'], {}), '(accums)\n', (4828, 4836), True, 'import numpy as np\n'), ((5190, 5281), 'numpy.exp', 'np.exp', (['(-((xsurface - center_x) ** 2 + (ysurface - center_y) ** 2) / kernel_width ** 2\n )'], {}), '(-((xsurface - center_x) ** 2 + (ysurface - center_y) ** 2) / \n kernel_width ** 2)\n', (5196, 5281), True, 'import numpy as np\n'), ((7170, 7206), 're.search', 're.search', (['"""/\\\\w*_(\\\\d+)*\\\\.pkl$"""', 'x'], {}), "('/\\\\w*_(\\\\d+)*\\\\.pkl$', x)\n", (7179, 7206), False, 'import re\n'), ((7769, 7805), 're.search', 're.search', (['"""/\\\\w*_(\\\\d+)*\\\\.pkl$"""', 'x'], {}), "('/\\\\w*_(\\\\d+)*\\\\.pkl$', x)\n", (7778, 7805), False, 'import re\n')]
|
import numpy as np
pos = []
normals = []
p = [[-0.4722227, -0.24517583, -0.6370031]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.2549828, -0.24587737, -0.63704705]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.25787751, -0.38255749, -0.63705089]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
p = [[-0.47206733, -0.38317576, -0.6370076]]
n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]]
pos.append(p)
normals.append(n)
#Contact lgripper/handrail
#Left
p = [[0.3651077, 0.33419711, 0.63609439]]
n = [[-3.39491173e-05, 9.99999875e-01, 4.99472000e-04]]
pos.append(p)
normals.append(n)
#Right
#p = [[0.36510907, 0.29419711, 0.63607441]]
#p = [[0.3651077, 0.33419711, 0.63609439]]
#n = [[3.44761855e-05, -9.99999874e-01, -5.00077386e-04]]
#pos.append(p)
#normals.append(n)
#Bottom
#p = [[0.34212609, 0.31418314, 0.66248165]]
#n = [[-6.56636734e-01, -3.99160434e-04, 7.54206895e-01]]
#pos.append(p)
#normals.append(n)
#
##Top
p = [[0.38480749, 0.31420908, 0.61345819]]
n = [[6.56636734e-01, 4.00439950e-04, -7.54206894e-01]]
pos.append(p)
normals.append(n)
pos = [np.array(px).T for px in pos]
#for p in pos:
# p[2, 0] = 0.0
normals = [np.array(nx).T for nx in normals]
|
[
"numpy.array"
] |
[((1222, 1234), 'numpy.array', 'np.array', (['px'], {}), '(px)\n', (1230, 1234), True, 'import numpy as np\n'), ((1295, 1307), 'numpy.array', 'np.array', (['nx'], {}), '(nx)\n', (1303, 1307), True, 'import numpy as np\n')]
|
import numpy
with open ("dic.txt", "w", encoding="utf-8") as dic:
for x in range(5, 790, 1):
if 92 < x <= 113:
dic.write('"'+str(x)+'"'+":"+ '"'+'1'+'",')
elif 113 < x <= 133:
dic.write('"'+str(x)+'"'+":"+ '"'+'2'+'",')
elif 133 < x <= 153:
dic.write('"'+str(x)+'"'+":"+ '"'+'3'+'",')
elif 153 < x <= 173:
dic.write('"'+str(x)+'"'+":"+ '"'+'4'+'",')
elif 173 < x <= 193:
dic.write('"'+str(x)+'"'+":"+ '"'+'5'+'",')
elif 193 < x <= 213:
dic.write('"'+str(x)+'"'+":"+ '"'+'6'+'",')
elif 213 < x <= 233:
dic.write('"'+str(x)+'"'+":"+ '"'+'7'+'",')
elif 233 < x <= 253:
dic.write('"'+str(x)+'"'+":"+ '"'+'8'+'",')
elif 253 < x <= 273:
dic.write('"'+str(x)+'"'+":"+ '"'+'9'+'",')
elif 273 < x <= 293:
dic.write('"'+str(x)+'"'+":"+ '"'+'10'+'",')
elif 293 < x <= 313:
dic.write('"'+str(x)+'"'+":"+ '"'+'11'+'",')
elif 313 < x <= 333:
dic.write('"'+str(x)+'"'+":"+ '"'+'12'+'",')
elif 333 < x <= 353:
dic.write('"'+str(x)+'"'+":"+ '"'+'13'+'",')
elif 353 < x <= 373:
dic.write('"'+str(x)+'"'+":"+ '"'+'14'+'",')
elif 373 < x <= 393:
dic.write('"'+str(x)+'"'+":"+ '"'+'15'+'",')
elif 393 < x <= 413:
dic.write('"'+str(x)+'"'+":"+ '"'+'16'+'",')
elif 413 < x <= 433:
dic.write('"'+str(x)+'"'+":"+ '"'+'17'+'",')
elif 433 < x <= 453:
dic.write('"'+str(x)+'"'+":"+ '"'+'18'+'",')
elif 453 < x <= 473:
dic.write('"'+str(x)+'"'+":"+ '"'+'19'+'",')
elif 473 < x <= 493:
dic.write('"'+str(x)+'"'+":"+ '"'+'20'+'",')
elif 493 < x <= 513:
dic.write('"'+str(x)+'"'+":"+ '"'+'21'+'",')
elif 513 < x <= 533:
dic.write('"'+str(x)+'"'+":"+ '"'+'22'+'",')
elif 533 < x <= 553:
dic.write('"'+str(x)+'"'+":"+ '"'+'23'+'",')
elif 553 < x <= 573:
dic.write('"'+str(x)+'"'+":"+ '"'+'24'+'",')
elif 573 < x <= 593:
dic.write('"'+str(x)+'"'+":"+ '"'+'25'+'",')
elif 593 < x <= 613:
dic.write('"'+str(x)+'"'+":"+ '"'+'26'+'",')
elif 613 < x <= 633:
dic.write('"'+str(x)+'"'+":"+ '"'+'27'+'",')
elif 633 < x <= 653:
dic.write('"'+str(x)+'"'+":"+ '"'+'28'+'",')
elif 653 < x <= 673:
dic.write('"'+str(x)+'"'+":"+ '"'+'29'+'",')
elif 673 < x <= 693:
dic.write('"'+str(x)+'"'+":"+ '"'+'30'+'",')
elif 693 < x <= 713:
dic.write('"'+str(x)+'"'+":"+ '"'+'31'+'",')
elif 713 < x <= 733:
dic.write('"'+str(x)+'"'+":"+ '"'+'32'+'",')
elif 733 < x <= 753:
dic.write('"'+str(x)+'"'+":"+ '"'+'33'+'",')
elif 753 < x <= 773:
dic.write('"'+str(x)+'"'+":"+ '"'+'34'+'",')
elif 773 < x <= 793:
dic.write('"'+str(x)+'"'+":"+ '"'+'35'+'",')
elif 4 < x <= 15:
dic.write('"'+str(x)+'"'+":"+ '"'+'36'+'",')
elif 15 < x <= 25:
dic.write('"'+str(x)+'"'+":"+ '"'+'37'+'",')
elif 25 < x <= 35:
dic.write('"'+str(x)+'"'+":"+ '"'+'38'+'",')
elif 35 < x <= 45:
dic.write('"'+str(x)+'"'+":"+ '"'+'39'+'",')
elif 45 < x <= 55:
dic.write('"'+str(x)+'"'+":"+ '"'+'40'+'",')
elif 55 < x <= 65:
dic.write('"'+str(x)+'"'+":"+ '"'+'41'+'",')
elif 65 < x <= 75:
dic.write('"'+str(x)+'"'+":"+ '"'+'42'+'",')
elif 75 < x <= 85:
dic.write('"'+str(x)+'"'+":"+ '"'+'43'+'",')
elif 85 < x <= 92:
dic.write('"'+str(x)+'"'+":"+ '"'+'44'+'",')
with open ("time.txt", "w", encoding="utf-8") as duree:
for y in numpy.arange(0, 1.7, 0.01):
if 0 < y <= 0.1:
duree.write('"'+str(y)+'"'+":"+ '"'+'80'+'",')
elif 0.1 < y <= 0.2:
duree.write('"'+str(y)+'"'+":"+ '"'+'81'+'",')
elif 0.2 < y <= 0.3:
duree.write('"'+str(y)+'"'+":"+ '"'+'82'+'",')
elif 0.3 < y <= 0.4:
duree.write('"'+str(y)+'"'+":"+ '"'+'83'+'",')
elif 0.4 < y <= 0.5:
duree.write('"'+str(y)+'"'+":"+ '"'+'84'+'",')
elif 0.5 < y <= 0.6:
duree.write('"'+str(y)+'"'+":"+ '"'+'85'+'",')
elif 0.6 < y <= 0.7:
duree.write('"'+str(y)+'"'+":"+ '"'+'86'+'",')
elif 0.7 < y <= 0.8:
duree.write('"'+str(y)+'"'+":"+ '"'+'87'+'",')
elif 0.8 < y <= 0.9:
duree.write('"'+str(y)+'"'+":"+ '"'+'88'+'",')
elif 0.9 < y <= 1:
duree.write('"'+str(y)+'"'+":"+ '"'+'89'+'",')
elif 1 < y <= 1.1:
duree.write('"'+str(y)+'"'+":"+ '"'+'90'+'",')
elif 1.1 < y <= 1.2:
duree.write('"'+str(y)+'"'+":"+ '"'+'91'+'",')
elif 1.2 < y <= 1.3:
duree.write('"'+str(y)+'"'+":"+ '"'+'92'+'",')
elif 1.3 < y <= 1.4:
duree.write('"'+str(y)+'"'+":"+ '"'+'93'+'",')
elif 1.4 < y <= 1.5:
duree.write('"'+str(y)+'"'+":"+ '"'+'94'+'",')
elif 1.5 < y <= 1.6:
duree.write('"'+str(y)+'"'+":"+ '"'+'95'+'",')
elif 1.6 < y <= 1.7:
duree.write('"'+str(y)+'"'+":"+ '"'+'96'+'",')
|
[
"numpy.arange"
] |
[((4023, 4049), 'numpy.arange', 'numpy.arange', (['(0)', '(1.7)', '(0.01)'], {}), '(0, 1.7, 0.01)\n', (4035, 4049), False, 'import numpy\n')]
|
#!/usr/bin/env python
# Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Lidar/BB check for CARLA
This script obtains the LiDAR's point cloud corresponding to all the vehicles
of the scene and make sure that they are inside the bounding box of the
corresponding actor.
This is done in a predefined route in Town03 with a high speed and several agressive
turns.
In a nutshell, the script have a queue that is filled in each frame with a lidar point
cloud and an structure for storing the Bounding Boxes. This last one is emulated as a
sensor filling the queue in the on_tick callback of the carla.world. In this way, we make
sure that we are correctly syncronizing the lidar point cloud and BB/actor transformations.
Then, we select the points corresponding to each actor (car) in the scene and check they
are inside the bounding boxes of that actor, all in each vehicle frame of reference.
Important Data structure description:
+ Lidar data structure: four element tuple with:
- [0] Frame
- [1] Sensor name: 'semlidar'
- [2] Point cloud in the form of a numpy dictionary with all semantic lidar information
- [3] Global transformation of the sensor
+ Bounding box data structure: four element tuple with:
- [0] Frame
- [1] Sensor name: 'bb'
- [2] List of actor information: each a tuple with:
- [0] Actor id
- [1] Actor type (blueprint's name)
- [0] Actor's global transformation
- [0] Actor's bounding box
+ ActorTrace class: Takes the Lidar data structure and one actor information and
check if all the data points related with this actor are inside its BB.
This is done in the local coordinate frame of the actor and should be done like:
trace = ActorTrace(actor_info, lidar_data)
trace.process()
trace.check_lidar_data()
"""
import glob
import os
import sys
import numpy as np
from queue import Queue
from queue import Empty
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
class ActorTrace(object):
"""Class that store and process information about an actor at certain moment."""
def __init__(self, actor, lidar):
self.set_lidar(lidar)
self.set_actor(actor)
self._lidar_pc_local = np.array([])
self._bb_vertices = np.array([])
self._bb_minlimits = [0, 0, 0]
self._bb_maxlimits = [0, 0, 0]
def set_lidar(self, lidar):
self._frame = lidar[0]
self._lidar_data = lidar[2]
self._lidar_transf = lidar[3]
def set_actor(self, actor):
self._actor_id = actor[0]
self._actor_type = actor[1]
self._actor_transf = actor[2]
self._actor_bb = actor[3]
def process(self):
# Filter lidar points that correspond to my actor id
data_actor = self._lidar_data[self._lidar_data['ObjIdx'] == self._actor_id]
# Take the xyz point cloud data and transform it to actor's frame
points = np.array([data_actor['x'], data_actor['y'], data_actor['z']]).T
points = np.append(points, np.ones((points.shape[0], 1)), axis=1)
points = np.dot(self._lidar_transf.get_matrix(), points.T).T # sensor -> world
points = np.dot(self._actor_transf.get_inverse_matrix(), points.T).T # world -> actor
points = points[:, :-1]
# Saving the points in 'local' coordinates
self._lidar_pc_local = points
# We compute the limits in the local frame of reference using the
# vertices of the bounding box
vertices = self._actor_bb.get_local_vertices()
ver_py = []
for v in vertices:
ver_py.append([v.x, v.y, v.z])
ver_np = np.array(ver_py)
self._bb_vertices = ver_np
self._bb_minlimits = ver_np.min(axis=0) - 0.001
self._bb_maxlimits = ver_np.max(axis=0) + 0.001
def print(self, print_if_empty = False):
if self._lidar_pc_local.shape[0] > 0 or print_if_empty:
np.savetxt("veh_data_%d_%s_%d.out" % (self._frame, self._actor_type, self._actor_id), self._lidar_pc_local)
np.savetxt("bb_data_%d_%s_%d.out" % (self._frame, self._actor_type, self._actor_id), self._bb_vertices)
def lidar_is_outside_bb(self, check_axis = [True, True, True]):
lidar_pc = self._lidar_pc_local
if check_axis[0]:
xmin = self._bb_minlimits[0]
xmax = self._bb_maxlimits[0]
out = np.any((lidar_pc[:,0] > xmax) | (lidar_pc[:,0] < xmin))
if out:
print("Problem with x axis")
return True
if check_axis[1]:
ymin = self._bb_minlimits[1]
ymax = self._bb_maxlimits[1]
out = np.any((lidar_pc[:, 1] > ymax) | (lidar_pc[:, 1] < ymin))
if out:
print("Problem with y axis")
return True
if check_axis[2]:
zmin = self._bb_minlimits[2]
zmax = self._bb_maxlimits[2]
out = np.any((lidar_pc[:, 2] > zmax) | (lidar_pc[:, 2] < zmin))
if out:
print("Problem with z axis")
return True
return False
def check_lidar_data(self):
if self.lidar_is_outside_bb():
print("Error!!! Points of lidar point cloud are outside its BB for car %d: %s " % (self._actor_id, self._actor_type))
self.print()
return False
else:
return True
def wait(world, frames=100, queue = None, slist = None):
for i in range(0, frames):
world.tick()
if queue != None and slist != None:
try:
for _i in range (0, len(slist)):
s_frame = queue.get(True, 1.0)
except Empty:
print(" Some of the sensor information is missed")
# Sensor callback.
# This is where you receive the sensor data and
# process it as you liked and the important part is that,
# at the end, it should include an element into the sensor queue.
def lidar_callback(sensor_data, sensor_queue, sensor_name):
sensor_pc_local = np.frombuffer(sensor_data.raw_data, dtype=np.dtype([
('x', np.float32), ('y', np.float32), ('z', np.float32),
('CosAngle', np.float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)]))
sensor_transf = sensor_data.transform
sensor_queue.put((sensor_data.frame, sensor_name, sensor_pc_local, sensor_transf))
def bb_callback(snapshot, world, sensor_queue, sensor_name):
data_array = []
vehicles = world.get_actors().filter('vehicle.*')
for actor in vehicles:
data_array.append((actor.id, actor.type_id, actor.get_transform(), actor.bounding_box))
sensor_queue.put((snapshot.frame, sensor_name, data_array))
def move_spectator(world, actor):
actor_tr = actor.get_transform()
spectator_transform = carla.Transform(actor_tr.location, actor_tr.rotation)
spectator_transform.location -= actor_tr.get_forward_vector() * 5
spectator_transform.location -= actor_tr.get_up_vector() * 3
spectator = world.get_spectator()
spectator.set_transform(spectator_transform)
def world_callback(snapshot, world, sensor_queue, sensor_name, actor):
move_spectator(world, actor)
bb_callback(snapshot, world, sensor_queue, sensor_name)
def process_sensors(w_frame, sensor_queue, sensor_number):
if sensor_number != 2:
print("Error!!! Sensor number should be two")
sl_data = None
bb_data = None
try:
for i in range (0, sensor_number):
s_frame = sensor_queue.get(True, 1.0)
while s_frame[0] != w_frame:
print("Warning! Missmatch for sensor %s in the frame timestamp (w: %d, s: %d)" % (s_frame[1], w_frame, s_frame[0]))
print("This could be due to accumulated data for previous steps")
s_frame = sensor_queue.get(True, 1.0)
if s_frame[1] == "semlidar":
sl_data = s_frame
elif s_frame[1] == "bb":
bb_data = s_frame
#print(" Frame: %d Sensor: %s Len: %d " % (s_frame[0], s_frame[1], len(s_frame[2])))
except Empty:
print("Error!!! The needeinformation is not here!!!")
return
if sl_data == None or bb_data == None:
print("Error!!! Missmatch for sensor %s in the frame timestamp (w: %d, s: %d)" % (s_frame[1], w_frame, s_frame[0]))
for actor_data in bb_data[2]:
trace_vehicle = ActorTrace(actor_data, sl_data)
trace_vehicle.process()
trace_vehicle.check_lidar_data()
class SpawnCar(object):
def __init__(self, location, rotation, filter="vehicle.*", autopilot = False, velocity = None):
self._filter = filter
self._transform = carla.Transform(location, rotation)
self._autopilot = autopilot
self._velocity = velocity
self._actor = None
self._world = None
def spawn(self, world):
self._world = world
actor_BP = world.get_blueprint_library().filter(self._filter)[0]
self._actor = world.spawn_actor(actor_BP, self._transform)
self._actor.set_autopilot(True)
return self._actor
def destroy(self):
if self._actor != None:
self._actor.destroy()
CarPropList = [
SpawnCar(carla.Location(x=83, y= -40, z=5), carla.Rotation(yaw=-90), filter= "*lincoln*", autopilot=True),
SpawnCar(carla.Location(x=83, y= -30, z=3), carla.Rotation(yaw=-90), filter= "*ambulance*", autopilot=True),
SpawnCar(carla.Location(x=83, y= -20, z=3), carla.Rotation(yaw=-90), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=120, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=100, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=140, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*model3*", autopilot=True),
SpawnCar(carla.Location(x=160, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*impala*", autopilot=False),
SpawnCar(carla.Location(x=180, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*a2*", autopilot=True),
SpawnCar(carla.Location(x=60, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*model3*", autopilot=True),
SpawnCar(carla.Location(x=80, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=100, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*mustan*", autopilot=True),
SpawnCar(carla.Location(x=120, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=140, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*impala*", autopilot=True),
SpawnCar(carla.Location(x=160, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*prius*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +20,z=2), carla.Rotation(yaw=+90), filter= "*dodge*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +40,z=2), carla.Rotation(yaw=+90), filter= "*isetta*", autopilot=True),
SpawnCar(carla.Location(x=234, y= +80,z=2), carla.Rotation(yaw=+90), filter= "*tt*", autopilot=True),
SpawnCar(carla.Location(x=243, y= -40,z=2), carla.Rotation(yaw=-90), filter= "*etron*", autopilot=True),
SpawnCar(carla.Location(x=243, y= -20,z=2), carla.Rotation(yaw=-90), filter= "*mkz2017*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +00,z=2), carla.Rotation(yaw=-90), filter= "*mustan*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +20,z=2), carla.Rotation(yaw=-90), filter= "*dodge*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +40,z=2), carla.Rotation(yaw=-90), filter= "*charger2020*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +60,z=2), carla.Rotation(yaw=-90), filter= "*lincoln2020*", autopilot=True),
SpawnCar(carla.Location(x=243, y= +80,z=2), carla.Rotation(yaw=-90), filter= "*tt*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+100,z=2), carla.Rotation(yaw=-90), filter= "*a2*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+120,z=2), carla.Rotation(yaw=-90), filter= "*wrangler_rubicon*", autopilot=True),
SpawnCar(carla.Location(x=243, y=+140,z=2), carla.Rotation(yaw=-90), filter= "*c3*", autopilot=True)
]
def spawn_prop_vehicles(world):
for car in CarPropList:
car.spawn(world)
def destroy_prop_vehicles():
for car in CarPropList:
car.destroy()
def main():
# We start creating the client
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
world = client.get_world()
try:
# We need to save the settings to be able to recover them at the end
# of the script to leave the server in the same state that we found it.
original_settings = world.get_settings()
settings = world.get_settings()
# We set CARLA syncronous mode
settings.fixed_delta_seconds = 0.05
settings.synchronous_mode = True
world.apply_settings(settings)
traffic_manager = client.get_trafficmanager(8000)
traffic_manager.set_synchronous_mode(True)
# We create the sensor queue in which we keep track of the information
# already received. This structure is thread safe and can be
# accessed by all the sensors callback concurrently without problem.
sensor_queue = Queue()
# Spawning ego vehicle
actor_BP = world.get_blueprint_library().filter("vehicle.lincoln.mkz2017")[0]
car_tr = carla.Transform(carla.Location(x=239, y=125, z=0.9), carla.Rotation(yaw=-88.5))
actor = world.spawn_actor(actor_BP, car_tr)
world.tick()
move_spectator(world, actor)
spawn_prop_vehicles(world)
wait(world, 10)
# We create all the sensors and keep them in a list for convenience.
sensor_list = []
lidar_bp = world.get_blueprint_library().find('sensor.lidar.ray_cast_semantic')
lidar_bp.set_attribute('channels', '64')
lidar_bp.set_attribute('points_per_second', '500000')
lidar_bp.set_attribute('range', '300')
lidar_bp.set_attribute('upper_fov', '10.0')
lidar_bp.set_attribute('lower_fov', '-90.0')
lidar_tr = carla.Transform(carla.Location(z=3), carla.Rotation(yaw=0))
lidar = world.spawn_actor(lidar_bp, lidar_tr, attach_to=actor)
lidar.listen(lambda data: lidar_callback(data, sensor_queue, "semlidar"))
world.on_tick(lambda snapshot: world_callback(snapshot, world, sensor_queue, "bb", actor))
sensor_list.append(lidar)
sensor_list.append(actor) # actor acts as a 'sensor' to simplify bb-lidar data comparison
# Set autopilot for main vehicle
actor.enable_constant_velocity(carla.Vector3D(20, 0, 0))
for _i in range(0, 100):
# Tick the server
world.tick()
w_frame = world.get_snapshot().frame
process_sensors(w_frame, sensor_queue, len(sensor_list))
actor.disable_constant_velocity()
finally:
world.apply_settings(original_settings)
# Destroy all the actors
destroy_prop_vehicles()
for sensor in sensor_list:
sensor.destroy()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print(' - Exited by user.')
|
[
"numpy.dtype",
"numpy.ones",
"carla.Transform",
"carla.Vector3D",
"carla.Location",
"numpy.any",
"numpy.array",
"carla.Client",
"numpy.savetxt",
"queue.Queue",
"carla.Rotation",
"glob.glob"
] |
[((7225, 7278), 'carla.Transform', 'carla.Transform', (['actor_tr.location', 'actor_tr.rotation'], {}), '(actor_tr.location, actor_tr.rotation)\n', (7240, 7278), False, 'import carla\n'), ((12931, 12962), 'carla.Client', 'carla.Client', (['"""localhost"""', '(2000)'], {}), "('localhost', 2000)\n", (12943, 12962), False, 'import carla\n'), ((2629, 2641), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2637, 2641), True, 'import numpy as np\n'), ((2670, 2682), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2678, 2682), True, 'import numpy as np\n'), ((4060, 4076), 'numpy.array', 'np.array', (['ver_py'], {}), '(ver_py)\n', (4068, 4076), True, 'import numpy as np\n'), ((9115, 9150), 'carla.Transform', 'carla.Transform', (['location', 'rotation'], {}), '(location, rotation)\n', (9130, 9150), False, 'import carla\n'), ((9665, 9697), 'carla.Location', 'carla.Location', ([], {'x': '(83)', 'y': '(-40)', 'z': '(5)'}), '(x=83, y=-40, z=5)\n', (9679, 9697), False, 'import carla\n'), ((9702, 9725), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (9716, 9725), False, 'import carla\n'), ((9779, 9811), 'carla.Location', 'carla.Location', ([], {'x': '(83)', 'y': '(-30)', 'z': '(3)'}), '(x=83, y=-30, z=3)\n', (9793, 9811), False, 'import carla\n'), ((9816, 9839), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (9830, 9839), False, 'import carla\n'), ((9895, 9927), 'carla.Location', 'carla.Location', ([], {'x': '(83)', 'y': '(-20)', 'z': '(3)'}), '(x=83, y=-20, z=3)\n', (9909, 9927), False, 'import carla\n'), ((9932, 9955), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (9946, 9955), False, 'import carla\n'), ((10007, 10041), 'carla.Location', 'carla.Location', ([], {'x': '(120)', 'y': '(-3.5)', 'z': '(2)'}), '(x=120, y=-3.5, z=2)\n', (10021, 10041), False, 'import carla\n'), ((10044, 10068), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+180)'}), '(yaw=+180)\n', (10058, 10068), False, 'import carla\n'), ((10120, 10154), 'carla.Location', 'carla.Location', ([], {'x': '(100)', 'y': '(-3.5)', 'z': '(2)'}), '(x=100, y=-3.5, z=2)\n', (10134, 10154), False, 'import carla\n'), ((10157, 10181), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+180)'}), '(yaw=+180)\n', (10171, 10181), False, 'import carla\n'), ((10232, 10266), 'carla.Location', 'carla.Location', ([], {'x': '(140)', 'y': '(-3.5)', 'z': '(2)'}), '(x=140, y=-3.5, z=2)\n', (10246, 10266), False, 'import carla\n'), ((10269, 10293), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+180)'}), '(yaw=+180)\n', (10283, 10293), False, 'import carla\n'), ((10345, 10379), 'carla.Location', 'carla.Location', ([], {'x': '(160)', 'y': '(-3.5)', 'z': '(2)'}), '(x=160, y=-3.5, z=2)\n', (10359, 10379), False, 'import carla\n'), ((10382, 10406), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+180)'}), '(yaw=+180)\n', (10396, 10406), False, 'import carla\n'), ((10459, 10493), 'carla.Location', 'carla.Location', ([], {'x': '(180)', 'y': '(-3.5)', 'z': '(2)'}), '(x=180, y=-3.5, z=2)\n', (10473, 10493), False, 'import carla\n'), ((10496, 10520), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+180)'}), '(yaw=+180)\n', (10510, 10520), False, 'import carla\n'), ((10568, 10599), 'carla.Location', 'carla.Location', ([], {'x': '(60)', 'y': '(+6)', 'z': '(2)'}), '(x=60, y=+6, z=2)\n', (10582, 10599), False, 'import carla\n'), ((10605, 10627), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (10619, 10627), False, 'import carla\n'), ((10681, 10712), 'carla.Location', 'carla.Location', ([], {'x': '(80)', 'y': '(+6)', 'z': '(2)'}), '(x=80, y=+6, z=2)\n', (10695, 10712), False, 'import carla\n'), ((10718, 10740), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (10732, 10740), False, 'import carla\n'), ((10793, 10825), 'carla.Location', 'carla.Location', ([], {'x': '(100)', 'y': '(+6)', 'z': '(2)'}), '(x=100, y=+6, z=2)\n', (10807, 10825), False, 'import carla\n'), ((10830, 10852), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (10844, 10852), False, 'import carla\n'), ((10906, 10938), 'carla.Location', 'carla.Location', ([], {'x': '(120)', 'y': '(+6)', 'z': '(2)'}), '(x=120, y=+6, z=2)\n', (10920, 10938), False, 'import carla\n'), ((10943, 10965), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (10957, 10965), False, 'import carla\n'), ((11019, 11051), 'carla.Location', 'carla.Location', ([], {'x': '(140)', 'y': '(+6)', 'z': '(2)'}), '(x=140, y=+6, z=2)\n', (11033, 11051), False, 'import carla\n'), ((11056, 11078), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (11070, 11078), False, 'import carla\n'), ((11132, 11164), 'carla.Location', 'carla.Location', ([], {'x': '(160)', 'y': '(+6)', 'z': '(2)'}), '(x=160, y=+6, z=2)\n', (11146, 11164), False, 'import carla\n'), ((11169, 11191), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (11183, 11191), False, 'import carla\n'), ((11244, 11277), 'carla.Location', 'carla.Location', ([], {'x': '(234)', 'y': '(+20)', 'z': '(2)'}), '(x=234, y=+20, z=2)\n', (11258, 11277), False, 'import carla\n'), ((11281, 11304), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+90)'}), '(yaw=+90)\n', (11295, 11304), False, 'import carla\n'), ((11356, 11389), 'carla.Location', 'carla.Location', ([], {'x': '(234)', 'y': '(+40)', 'z': '(2)'}), '(x=234, y=+40, z=2)\n', (11370, 11389), False, 'import carla\n'), ((11393, 11416), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+90)'}), '(yaw=+90)\n', (11407, 11416), False, 'import carla\n'), ((11469, 11502), 'carla.Location', 'carla.Location', ([], {'x': '(234)', 'y': '(+80)', 'z': '(2)'}), '(x=234, y=+80, z=2)\n', (11483, 11502), False, 'import carla\n'), ((11506, 11529), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+90)'}), '(yaw=+90)\n', (11520, 11529), False, 'import carla\n'), ((11578, 11611), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(-40)', 'z': '(2)'}), '(x=243, y=-40, z=2)\n', (11592, 11611), False, 'import carla\n'), ((11615, 11638), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (11629, 11638), False, 'import carla\n'), ((11690, 11723), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(-20)', 'z': '(2)'}), '(x=243, y=-20, z=2)\n', (11704, 11723), False, 'import carla\n'), ((11727, 11750), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (11741, 11750), False, 'import carla\n'), ((11804, 11836), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+0)', 'z': '(2)'}), '(x=243, y=+0, z=2)\n', (11818, 11836), False, 'import carla\n'), ((11841, 11864), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (11855, 11864), False, 'import carla\n'), ((11917, 11950), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+20)', 'z': '(2)'}), '(x=243, y=+20, z=2)\n', (11931, 11950), False, 'import carla\n'), ((11954, 11977), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (11968, 11977), False, 'import carla\n'), ((12029, 12062), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+40)', 'z': '(2)'}), '(x=243, y=+40, z=2)\n', (12043, 12062), False, 'import carla\n'), ((12066, 12089), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12080, 12089), False, 'import carla\n'), ((12147, 12180), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+60)', 'z': '(2)'}), '(x=243, y=+60, z=2)\n', (12161, 12180), False, 'import carla\n'), ((12184, 12207), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12198, 12207), False, 'import carla\n'), ((12265, 12298), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+80)', 'z': '(2)'}), '(x=243, y=+80, z=2)\n', (12279, 12298), False, 'import carla\n'), ((12302, 12325), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12316, 12325), False, 'import carla\n'), ((12374, 12408), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+100)', 'z': '(2)'}), '(x=243, y=+100, z=2)\n', (12388, 12408), False, 'import carla\n'), ((12411, 12434), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12425, 12434), False, 'import carla\n'), ((12483, 12517), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+120)', 'z': '(2)'}), '(x=243, y=+120, z=2)\n', (12497, 12517), False, 'import carla\n'), ((12520, 12543), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12534, 12543), False, 'import carla\n'), ((12606, 12640), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+140)', 'z': '(2)'}), '(x=243, y=+140, z=2)\n', (12620, 12640), False, 'import carla\n'), ((12643, 12666), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12657, 12666), False, 'import carla\n'), ((13801, 13808), 'queue.Queue', 'Queue', ([], {}), '()\n', (13806, 13808), False, 'from queue import Queue\n'), ((2166, 2324), 'glob.glob', 'glob.glob', (["('../carla/dist/carla-*%d.%d-%s.egg' % (sys.version_info.major, sys.\n version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))"], {}), "('../carla/dist/carla-*%d.%d-%s.egg' % (sys.version_info.major,\n sys.version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64')\n )\n", (2175, 2324), False, 'import glob\n'), ((3335, 3396), 'numpy.array', 'np.array', (["[data_actor['x'], data_actor['y'], data_actor['z']]"], {}), "([data_actor['x'], data_actor['y'], data_actor['z']])\n", (3343, 3396), True, 'import numpy as np\n'), ((3434, 3463), 'numpy.ones', 'np.ones', (['(points.shape[0], 1)'], {}), '((points.shape[0], 1))\n', (3441, 3463), True, 'import numpy as np\n'), ((4348, 4460), 'numpy.savetxt', 'np.savetxt', (["('veh_data_%d_%s_%d.out' % (self._frame, self._actor_type, self._actor_id))", 'self._lidar_pc_local'], {}), "('veh_data_%d_%s_%d.out' % (self._frame, self._actor_type, self.\n _actor_id), self._lidar_pc_local)\n", (4358, 4460), True, 'import numpy as np\n'), ((4468, 4576), 'numpy.savetxt', 'np.savetxt', (["('bb_data_%d_%s_%d.out' % (self._frame, self._actor_type, self._actor_id))", 'self._bb_vertices'], {}), "('bb_data_%d_%s_%d.out' % (self._frame, self._actor_type, self.\n _actor_id), self._bb_vertices)\n", (4478, 4576), True, 'import numpy as np\n'), ((4809, 4866), 'numpy.any', 'np.any', (['((lidar_pc[:, 0] > xmax) | (lidar_pc[:, 0] < xmin))'], {}), '((lidar_pc[:, 0] > xmax) | (lidar_pc[:, 0] < xmin))\n', (4815, 4866), True, 'import numpy as np\n'), ((5085, 5142), 'numpy.any', 'np.any', (['((lidar_pc[:, 1] > ymax) | (lidar_pc[:, 1] < ymin))'], {}), '((lidar_pc[:, 1] > ymax) | (lidar_pc[:, 1] < ymin))\n', (5091, 5142), True, 'import numpy as np\n'), ((5363, 5420), 'numpy.any', 'np.any', (['((lidar_pc[:, 2] > zmax) | (lidar_pc[:, 2] < zmin))'], {}), '((lidar_pc[:, 2] > zmax) | (lidar_pc[:, 2] < zmin))\n', (5369, 5420), True, 'import numpy as np\n'), ((6515, 6659), 'numpy.dtype', 'np.dtype', (["[('x', np.float32), ('y', np.float32), ('z', np.float32), ('CosAngle', np.\n float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)]"], {}), "([('x', np.float32), ('y', np.float32), ('z', np.float32), (\n 'CosAngle', np.float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)])\n", (6523, 6659), True, 'import numpy as np\n'), ((13960, 13995), 'carla.Location', 'carla.Location', ([], {'x': '(239)', 'y': '(125)', 'z': '(0.9)'}), '(x=239, y=125, z=0.9)\n', (13974, 13995), False, 'import carla\n'), ((13997, 14022), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-88.5)'}), '(yaw=-88.5)\n', (14011, 14022), False, 'import carla\n'), ((14686, 14705), 'carla.Location', 'carla.Location', ([], {'z': '(3)'}), '(z=3)\n', (14700, 14705), False, 'import carla\n'), ((14707, 14728), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(0)'}), '(yaw=0)\n', (14721, 14728), False, 'import carla\n'), ((15204, 15228), 'carla.Vector3D', 'carla.Vector3D', (['(20)', '(0)', '(0)'], {}), '(20, 0, 0)\n', (15218, 15228), False, 'import carla\n')]
|
"""Loading MNIST dataset.
"""
import struct
import numpy as np
class MNIST:
"""
Loading MNIST dataset.
In the directory of MNIST dataset, there should be the following files:
- Training set:
- train-images-idx3-ubyte
- train-labels-idx1-ubyte
- Test set:
- t10k-images-idx3-ubyte
- t10k-labels-idx1-ubyte
Functions
---------
next_batch()
image_pair(index: int)
sample_batch(batch_index: int)
to_ndarray()
Attributes
----------
data_type: Can be either `"test"` or `"train"`.
path: Path for MNIST data.
data_size: Size of the dataset. Default value `None` means using all data in MNIST.
batch_size: Size of the mini-batch. Default value `None` means using the whole dataset as
a mini-batch.
binarize: Whether to binarize the images (using 0 and 1 values). Default value is True.
reshape: Whether to reshape the images into 2D arrays. Default value is False.
one_hot: whether to use one-hot encoding for labels (e.g. using vector
`[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]` for 0). Default value is False.
"""
IMAGE_SIZE = 784
LABEL_SIZE = 1
_IMAGE_SIZE_FMT = ">784B"
_LABEL_SIZE_FMT = ">B"
IMAGE_SHAPE = (28, 28)
batch_index = 0
def __init__(self, data_type: str, path: str,
data_size: int = None,
batch_size: int = None,
binarize=True,
reshape=False,
one_hot=False):
self.data_type = data_type
self.path = path
# Options
self.binarize = binarize
self.reshape = reshape
self.one_hot = one_hot
# Data buffer
# `data_size` will be updated according to the actual data
image_buf, label_buf = self._read_file()
# Size
if data_size is None:
# `len(image_buf)` may not be exactly divided by 784
self.data_size = len(image_buf) // self.IMAGE_SIZE
else:
self.data_size = data_size
if batch_size is None:
self.batch_size = self.data_size
else:
if batch_size <= self.data_size:
self.batch_size = batch_size
else:
raise ValueError("batch size larger than data size")
self.batch_num = self.data_size // self.batch_size
# Data
self._images = self._get_image(image_buf)
self._labels = self._get_label(label_buf)
def _read_file(self):
if self.data_type == "test":
image_file_name = self.path + "t10k-images-idx3-ubyte"
label_file_name = self.path + "t10k-labels-idx1-ubyte"
elif self.data_type == "train":
image_file_name = self.path + "train-images-idx3-ubyte"
label_file_name = self.path + "train-labels-idx1-ubyte"
else:
raise ValueError("only type \"test\" and \"train\" are available")
# "rb" means reading + binary mode
with open(image_file_name, "rb") as image_file:
image_buf = image_file.read()
with open(label_file_name, "rb") as label_file:
label_buf = label_file.read()
return image_buf, label_buf
def _get_image(self, image_buf):
"""Get an image array from `image_buf`.
This is the structure of the image file (training set):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 60000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
"""
image_buf_len = self.data_size * self.IMAGE_SIZE + 16
image_offset = 16
image_arr = []
while image_offset < image_buf_len:
temp = struct.unpack_from(self._IMAGE_SIZE_FMT, image_buf, image_offset)
if self.binarize:
temp = np.vectorize(lambda x: 0 if x <= 127 else 1)(temp)
if self.reshape:
temp = np.reshape(temp, self.IMAGE_SHAPE)
image_arr.append(temp)
image_offset += self.IMAGE_SIZE
return image_arr
def _get_label(self, label_buf):
"""Get an label array from `label_buf`.
This is the structure of the label file (training set):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 60000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
"""
label_buf_len = self.data_size * self.LABEL_SIZE + 8
label_offset = 8
label_arr = []
while label_offset < label_buf_len:
temp = struct.unpack_from(self._LABEL_SIZE_FMT, label_buf, label_offset)[0]
if self.one_hot:
vec = np.zeros(10)
vec[temp] = 1
label_arr.append(vec)
else:
label_arr.append(temp)
label_offset += self.LABEL_SIZE
return label_arr
def next_batch(self):
"""Increase `batch_index` by 1, then return a mini-batch of (image, label) tuples."""
this_batch = self.batch(self.batch_index)
self.batch_index = (self.batch_index + 1) % self.batch_num
return this_batch
def image_pair(self, index: int):
"""Return a (image, label) tuple at `index`."""
if index < self.data_size:
return self._images[index], self._labels[index]
raise IndexError("image index out of range")
def batch(self, batch_index: int):
"""Return a mini-batch of (image, label) tuples at `batch_index`."""
if batch_index < self.batch_num:
begin = batch_index * self.batch_size
end = (batch_index + 1) * self.batch_size
return self._images[begin:end], self._labels[begin:end]
raise IndexError("batch index out of range")
def to_ndarray(self):
"""Return the raw data tuple `(images, labels)` as `np.ndarray`.
"""
images = []
labels = []
for i in range(self.batch_num):
image, label = self.batch(i)
images.append(image)
labels.append(label)
return np.asarray(images), np.asarray(labels)
def _test():
data = MNIST("train", MNIST_PATH,
data_size=200, batch_size=8,
reshape=True, one_hot=False, binarize=False)
print("Meta-data:")
print("\tDataset size:", data.data_size)
print("\tBatch size:", data.batch_size)
col_num = 4
row_num = data.batch_size // col_num + 1
_test_random_images(data, col_num, row_num)
_test_random_batch(data, col_num, row_num)
_test_next_batch(data, col_num, row_num)
def _test_random_images(data, col_num, row_num):
images = []
labels = []
for _ in range(10):
index = random.randrange(data.data_size)
image, label = data.image_pair(index)
images.append(image)
labels.append(label)
_plot(images, labels, col_num=col_num, row_num=row_num)
def _test_random_batch(data, col_num, row_num):
index = random.randrange(data.batch_num)
images, labels = data.batch(index)
_plot(images, labels, col_num=col_num, row_num=row_num)
def _test_next_batch(data, col_num, row_num):
for _ in range(3):
images, labels = data.next_batch()
_plot(images, labels, col_num=col_num, row_num=row_num)
def _plot(images, labels, col_num, row_num):
for i, (image, label) in enumerate(zip(images, labels)):
plt.subplot(row_num, col_num, i + 1)
plt.imshow(image, cmap="gray")
plt.axis('off')
plt.title(str(label))
plt.show()
def _test_numpy():
images, labels = MNIST("train", MNIST_PATH,
data_size=200, batch_size=8,
reshape=False, one_hot=False, binarize=False).to_ndarray()
print(images.shape) # shape = (num_batches, batch_size, num_visible)
print(np.moveaxis(images, 0, -1).shape) # shape = (batch_size, num_visible, num_batches)
print(labels.shape) # shape = (num_batches, batch_size)
if __name__ == "__main__":
import random
import matplotlib.pyplot as plt
# Local MNIST data
MNIST_PATH = "../../machine-learning/data/mnist/"
_test()
_test_numpy()
|
[
"matplotlib.pyplot.imshow",
"numpy.reshape",
"random.randrange",
"struct.unpack_from",
"numpy.asarray",
"numpy.zeros",
"numpy.vectorize",
"numpy.moveaxis",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((7734, 7766), 'random.randrange', 'random.randrange', (['data.batch_num'], {}), '(data.batch_num)\n', (7750, 7766), False, 'import random\n'), ((8294, 8304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8302, 8304), True, 'import matplotlib.pyplot as plt\n'), ((7475, 7507), 'random.randrange', 'random.randrange', (['data.data_size'], {}), '(data.data_size)\n', (7491, 7507), False, 'import random\n'), ((8160, 8196), 'matplotlib.pyplot.subplot', 'plt.subplot', (['row_num', 'col_num', '(i + 1)'], {}), '(row_num, col_num, i + 1)\n', (8171, 8196), True, 'import matplotlib.pyplot as plt\n'), ((8205, 8235), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (8215, 8235), True, 'import matplotlib.pyplot as plt\n'), ((8244, 8259), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8252, 8259), True, 'import matplotlib.pyplot as plt\n'), ((4190, 4255), 'struct.unpack_from', 'struct.unpack_from', (['self._IMAGE_SIZE_FMT', 'image_buf', 'image_offset'], {}), '(self._IMAGE_SIZE_FMT, image_buf, image_offset)\n', (4208, 4255), False, 'import struct\n'), ((6836, 6854), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (6846, 6854), True, 'import numpy as np\n'), ((6856, 6874), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (6866, 6874), True, 'import numpy as np\n'), ((8620, 8646), 'numpy.moveaxis', 'np.moveaxis', (['images', '(0)', '(-1)'], {}), '(images, 0, -1)\n', (8631, 8646), True, 'import numpy as np\n'), ((4412, 4446), 'numpy.reshape', 'np.reshape', (['temp', 'self.IMAGE_SHAPE'], {}), '(temp, self.IMAGE_SHAPE)\n', (4422, 4446), True, 'import numpy as np\n'), ((5305, 5370), 'struct.unpack_from', 'struct.unpack_from', (['self._LABEL_SIZE_FMT', 'label_buf', 'label_offset'], {}), '(self._LABEL_SIZE_FMT, label_buf, label_offset)\n', (5323, 5370), False, 'import struct\n'), ((5425, 5437), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (5433, 5437), True, 'import numpy as np\n'), ((4309, 4353), 'numpy.vectorize', 'np.vectorize', (['(lambda x: 0 if x <= 127 else 1)'], {}), '(lambda x: 0 if x <= 127 else 1)\n', (4321, 4353), True, 'import numpy as np\n')]
|
import threading, queue, time, os, pickle
# from queue import Queue
import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
from tensorflow.python.keras.backend import set_session
lock = threading.Lock()
class MultiTrainTD3(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_end_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]] for i in range(self.num_agents)] for _ in range(self.num_env)]
# self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_end_rewards = []
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
self.save_n_ep = self.num_env * 10
self.print_step = -int(self.save_n_ep / self.num_env)
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_qdebug":
out = self.get_qdebug(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t[p_index], is_train)
# print(np.shape(obs_n_t))
act_j_t, state_j_t1, mem_j_t1, attn_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM" or self.args.encoder_model != "DDPG":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t
def get_qdebug(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
obs_n_t, action_n_t, q1_h_n_t, q2_h_n_t = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
q1_j_input = agent.prep_q_input(obs_n_t, action_n_t, q1_h_n_t[p_index])
_, q1_h_j_t1 = agent.q1_debug['q_values'](*(q1_j_input))
if self.args.td3:
q2_input = agent.prep_q_input(obs_n_t, action_n_t, q2_h_n_t[p_index])
_, q2_h_j_t1 = agent.q2_debug['q_values'](*(q2_input))
else:
q2_h_j_t1 = []
return q1_h_j_t1, q2_h_j_t1
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
train_step = data
loss = agent.update(self.trainers, self.buffer_op, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, ep_step = data
# rew_n (num_env, num_agents)
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
if ep_step >= self.args.max_episode_len - 10: # Compute only last 10 episode step rewards
self.ep_end_rewards[j][-1] += rew
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[j][i][-1].append(info)
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
self.ep_end_rewards[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
for i in range(self.num_agents):
self.agent_info[j][i].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
# pickle_info = [self.agent_info[j] for j in range(self.num_env)]
with open(file_name, 'wb') as fp:
# Dump files as [num_env, [# agents, [#ep, [#stps, [dim]]]]
pickle.dump(self.agent_info, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
if num_episodes % (self.save_n_ep) == 0:
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
# episode_rewards, agent_rewards, final_ep_rewards, final_ep_ag_rewards = rewards
if self.args.env_type == "mpe":
# print statement depends on whether or not there are adversaries
if self.num_adversaries == 0:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards) / 10.
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)))
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)) + "\n")
else:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards)
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
# Keep track of final episode reward
self.final_ep_rewards.append(episode_b_rewards)
self.final_ep_end_rewards.append(ep_end_b_rewards)
for rew in ep_ag_b_rewards:
self.final_ep_ag_rewards.append(rew)
self.time_prev = time.time()
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
rew_ep_end_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards_ep_end.pkl'
with open(rew_ep_end_file_name, 'wb') as fp:
pickle.dump(self.final_ep_end_rewards, fp)
agrew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_ag_rewards, fp)
"""
REINFORCE Threads
"""
class MultiTrainVPG(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
if self.args.env_type == "mpe":
self.print_step = -int(self.save_rate / self.num_env)
else: # print for episode end only (success rate)
self.print_step = -int(self.save_rate / (self.num_env * self.args.max_episode_len))
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "add_to_buffer_reinforce":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2)
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, is_train)
act_j_t, act_soft_j_t, state_j_t1, mem_j_t1, attn_j_t, value_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, act_soft_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t, value_j_t
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
train_step, buffer_data = data
agent = self.trainers[p_index]
loss = agent.update(self.trainers, buffer_data, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, terminal = data
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[-1][i].append(info_n[0]['n'])
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
self.agent_info[j].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(self.ep_success, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
episode_b_success = []
for j in range(self.num_env):
episode_b_success.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_success = np.mean(np.array(episode_b_success)) / self.args.max_episode_len
print("steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
self.final_ep_rewards.append(episode_b_success)
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
def get_gputhreads(trainers, args, buffer_op, num_env, num_agents, num_adv):
threads = []
sess = tf.compat.v1.get_default_session()
for t in range(args.num_gpu_threads):
input_q = queue.Queue()
output_q = queue.Queue()
if args.policy_grad == "maddpg":
threads.append(MultiTrainTD3(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
elif args.policy_grad == "reinforce":
threads.append(
MultiTrainVPG(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
threads[t].start()
time.sleep(1)
return threads
def close_gputhreads(threads):
for t in threads:
t.input_queue.put(("None", None, None))
for t in threads:
t.join()
print('GPU trainers cancelled')
return
|
[
"threading.Thread.__init__",
"os.path.exists",
"numpy.mean",
"pickle.dump",
"sarnet_td3.common.tf_util.save_state",
"threading.Lock",
"tensorflow.compat.v1.get_default_session",
"os.path.join",
"time.sleep",
"numpy.stack",
"numpy.zeros",
"numpy.array",
"os.mkdir",
"numpy.expand_dims",
"queue.Queue",
"time.time",
"sarnet_td3.common.tf_util.get_session"
] |
[((212, 228), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (226, 228), False, 'import threading, queue, time, os, pickle\n'), ((23871, 23905), 'tensorflow.compat.v1.get_default_session', 'tf.compat.v1.get_default_session', ([], {}), '()\n', (23903, 23905), True, 'import tensorflow as tf\n'), ((351, 404), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {'args': '()', 'kwargs': 'None'}), '(self, args=(), kwargs=None)\n', (376, 404), False, 'import threading, queue, time, os, pickle\n'), ((1582, 1636), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.critic_units)'}), '(shape=(self.num_env, self.args.critic_units))\n', (1590, 1636), True, 'import numpy as np\n'), ((1661, 1714), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.value_units)'}), '(shape=(self.num_env, self.args.value_units))\n', (1669, 1714), True, 'import numpy as np\n'), ((1740, 1751), 'time.time', 'time.time', ([], {}), '()\n', (1749, 1751), False, 'import threading, queue, time, os, pickle\n'), ((14661, 14714), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {'args': '()', 'kwargs': 'None'}), '(self, args=(), kwargs=None)\n', (14686, 14714), False, 'import threading, queue, time, os, pickle\n'), ((15844, 15898), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.critic_units)'}), '(shape=(self.num_env, self.args.critic_units))\n', (15852, 15898), True, 'import numpy as np\n'), ((15923, 15976), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.value_units)'}), '(shape=(self.num_env, self.args.value_units))\n', (15931, 15976), True, 'import numpy as np\n'), ((16003, 16014), 'time.time', 'time.time', ([], {}), '()\n', (16012, 16014), False, 'import threading, queue, time, os, pickle\n'), ((23966, 23979), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (23977, 23979), False, 'import threading, queue, time, os, pickle\n'), ((23999, 24012), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (24010, 24012), False, 'import threading, queue, time, os, pickle\n'), ((24414, 24427), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (24424, 24427), False, 'import threading, queue, time, os, pickle\n'), ((3740, 3766), 'numpy.stack', 'np.stack', (['obs_n_t'], {'axis': '(-2)'}), '(obs_n_t, axis=-2)\n', (3748, 3766), True, 'import numpy as np\n'), ((3825, 3856), 'numpy.expand_dims', 'np.expand_dims', (['obs_n_t'], {'axis': '(1)'}), '(obs_n_t, axis=1)\n', (3839, 3856), True, 'import numpy as np\n'), ((4779, 4805), 'numpy.stack', 'np.stack', (['obs_n_t'], {'axis': '(-2)'}), '(obs_n_t, axis=-2)\n', (4787, 4805), True, 'import numpy as np\n'), ((4864, 4895), 'numpy.expand_dims', 'np.expand_dims', (['obs_n_t'], {'axis': '(1)'}), '(obs_n_t, axis=1)\n', (4878, 4895), True, 'import numpy as np\n'), ((8566, 8636), 'os.path.join', 'os.path.join', (['"""./exp_data"""', 'exp_name', 'exp_itr', 'self.args.benchmark_dir'], {}), "('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)\n", (8578, 8636), False, 'import threading, queue, time, os, pickle\n'), ((13622, 13688), 'os.path.join', 'os.path.join', (['"""./exp_data"""', 'exp_name', 'exp_itr', 'self.args.plots_dir'], {}), "('./exp_data', exp_name, exp_itr, self.args.plots_dir)\n", (13634, 13688), False, 'import threading, queue, time, os, pickle\n'), ((17953, 17979), 'numpy.stack', 'np.stack', (['obs_n_t'], {'axis': '(-2)'}), '(obs_n_t, axis=-2)\n', (17961, 17979), True, 'import numpy as np\n'), ((18002, 18033), 'numpy.expand_dims', 'np.expand_dims', (['obs_n_t'], {'axis': '(1)'}), '(obs_n_t, axis=1)\n', (18016, 18033), True, 'import numpy as np\n'), ((21579, 21649), 'os.path.join', 'os.path.join', (['"""./exp_data"""', 'exp_name', 'exp_itr', 'self.args.benchmark_dir'], {}), "('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)\n", (21591, 21649), False, 'import threading, queue, time, os, pickle\n'), ((22385, 22431), 'sarnet_td3.common.tf_util.save_state', 'U.save_state', (['save_dir', 'self.sess'], {'saver': 'saver'}), '(save_dir, self.sess, saver=saver)\n', (22397, 22431), True, 'import sarnet_td3.common.tf_util as U\n'), ((23381, 23447), 'os.path.join', 'os.path.join', (['"""./exp_data"""', 'exp_name', 'exp_itr', 'self.args.plots_dir'], {}), "('./exp_data', exp_name, exp_itr, self.args.plots_dir)\n", (23393, 23447), False, 'import threading, queue, time, os, pickle\n'), ((4438, 4491), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.value_units)'}), '(shape=(self.num_env, self.args.value_units))\n', (4446, 4491), True, 'import numpy as np\n'), ((6170, 6197), 'numpy.array', 'np.array', (['episode_b_rewards'], {}), '(episode_b_rewards)\n', (6178, 6197), True, 'import numpy as np\n'), ((8656, 8685), 'os.path.exists', 'os.path.exists', (['benchmark_dir'], {}), '(benchmark_dir)\n', (8670, 8685), False, 'import threading, queue, time, os, pickle\n'), ((8703, 8726), 'os.mkdir', 'os.mkdir', (['benchmark_dir'], {}), '(benchmark_dir)\n', (8711, 8726), False, 'import threading, queue, time, os, pickle\n'), ((9124, 9156), 'pickle.dump', 'pickle.dump', (['self.agent_info', 'fp'], {}), '(self.agent_info, fp)\n', (9135, 9156), False, 'import threading, queue, time, os, pickle\n'), ((9587, 9633), 'sarnet_td3.common.tf_util.save_state', 'U.save_state', (['save_dir', 'self.sess'], {'saver': 'saver'}), '(save_dir, self.sess, saver=saver)\n', (9599, 9633), True, 'import sarnet_td3.common.tf_util as U\n'), ((13443, 13454), 'time.time', 'time.time', ([], {}), '()\n', (13452, 13454), False, 'import threading, queue, time, os, pickle\n'), ((13708, 13732), 'os.path.exists', 'os.path.exists', (['plot_dir'], {}), '(plot_dir)\n', (13722, 13732), False, 'import threading, queue, time, os, pickle\n'), ((13750, 13768), 'os.mkdir', 'os.mkdir', (['plot_dir'], {}), '(plot_dir)\n', (13758, 13768), False, 'import threading, queue, time, os, pickle\n'), ((13966, 14004), 'pickle.dump', 'pickle.dump', (['self.final_ep_rewards', 'fp'], {}), '(self.final_ep_rewards, fp)\n', (13977, 14004), False, 'import threading, queue, time, os, pickle\n'), ((14223, 14265), 'pickle.dump', 'pickle.dump', (['self.final_ep_end_rewards', 'fp'], {}), '(self.final_ep_end_rewards, fp)\n', (14234, 14265), False, 'import threading, queue, time, os, pickle\n'), ((14469, 14510), 'pickle.dump', 'pickle.dump', (['self.final_ep_ag_rewards', 'fp'], {}), '(self.final_ep_ag_rewards, fp)\n', (14480, 14510), False, 'import threading, queue, time, os, pickle\n'), ((18547, 18600), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.value_units)'}), '(shape=(self.num_env, self.args.value_units))\n', (18555, 18600), True, 'import numpy as np\n'), ((19504, 19531), 'numpy.array', 'np.array', (['episode_b_rewards'], {}), '(episode_b_rewards)\n', (19512, 19531), True, 'import numpy as np\n'), ((21669, 21698), 'os.path.exists', 'os.path.exists', (['benchmark_dir'], {}), '(benchmark_dir)\n', (21683, 21698), False, 'import threading, queue, time, os, pickle\n'), ((21716, 21739), 'os.mkdir', 'os.mkdir', (['benchmark_dir'], {}), '(benchmark_dir)\n', (21724, 21739), False, 'import threading, queue, time, os, pickle\n'), ((21983, 22015), 'pickle.dump', 'pickle.dump', (['self.ep_success', 'fp'], {}), '(self.ep_success, fp)\n', (21994, 22015), False, 'import threading, queue, time, os, pickle\n'), ((23467, 23491), 'os.path.exists', 'os.path.exists', (['plot_dir'], {}), '(plot_dir)\n', (23481, 23491), False, 'import threading, queue, time, os, pickle\n'), ((23509, 23527), 'os.mkdir', 'os.mkdir', (['plot_dir'], {}), '(plot_dir)\n', (23517, 23527), False, 'import threading, queue, time, os, pickle\n'), ((23725, 23763), 'pickle.dump', 'pickle.dump', (['self.final_ep_rewards', 'fp'], {}), '(self.final_ep_rewards, fp)\n', (23736, 23763), False, 'import threading, queue, time, os, pickle\n'), ((22550, 22595), 'numpy.mean', 'np.mean', (['self.ep_success[j][self.print_step:]'], {}), '(self.ep_success[j][self.print_step:])\n', (22557, 22595), True, 'import numpy as np\n'), ((22637, 22664), 'numpy.array', 'np.array', (['episode_b_success'], {}), '(episode_b_success)\n', (22645, 22664), True, 'import numpy as np\n'), ((5969, 6014), 'numpy.mean', 'np.mean', (['self.ep_rewards[j][self.print_step:]'], {}), '(self.ep_rewards[j][self.print_step:])\n', (5976, 6014), True, 'import numpy as np\n'), ((6083, 6128), 'numpy.mean', 'np.mean', (['self.ep_success[j][self.print_step:]'], {}), '(self.ep_success[j][self.print_step:])\n', (6090, 6128), True, 'import numpy as np\n'), ((6817, 6832), 'sarnet_td3.common.tf_util.get_session', 'U.get_session', ([], {}), '()\n', (6830, 6832), True, 'import sarnet_td3.common.tf_util as U\n'), ((11951, 11976), 'numpy.mean', 'np.mean', (['ep_end_b_rewards'], {}), '(ep_end_b_rewards)\n', (11958, 11976), True, 'import numpy as np\n'), ((19303, 19348), 'numpy.mean', 'np.mean', (['self.ep_rewards[j][self.print_step:]'], {}), '(self.ep_rewards[j][self.print_step:])\n', (19310, 19348), True, 'import numpy as np\n'), ((19417, 19462), 'numpy.mean', 'np.mean', (['self.ep_success[j][self.print_step:]'], {}), '(self.ep_success[j][self.print_step:])\n', (19424, 19462), True, 'import numpy as np\n'), ((20151, 20166), 'sarnet_td3.common.tf_util.get_session', 'U.get_session', ([], {}), '()\n', (20164, 20166), True, 'import sarnet_td3.common.tf_util as U\n'), ((10363, 10390), 'numpy.array', 'np.array', (['episode_b_rewards'], {}), '(episode_b_rewards)\n', (10371, 10390), True, 'import numpy as np\n'), ((10435, 10460), 'numpy.mean', 'np.mean', (['ep_end_b_rewards'], {}), '(ep_end_b_rewards)\n', (10442, 10460), True, 'import numpy as np\n'), ((11879, 11906), 'numpy.array', 'np.array', (['episode_b_rewards'], {}), '(episode_b_rewards)\n', (11887, 11906), True, 'import numpy as np\n'), ((10161, 10206), 'numpy.mean', 'np.mean', (['self.ep_rewards[j][self.print_step:]'], {}), '(self.ep_rewards[j][self.print_step:])\n', (10168, 10206), True, 'import numpy as np\n'), ((10260, 10309), 'numpy.mean', 'np.mean', (['self.ep_end_rewards[j][self.print_step:]'], {}), '(self.ep_end_rewards[j][self.print_step:])\n', (10267, 10309), True, 'import numpy as np\n'), ((11677, 11722), 'numpy.mean', 'np.mean', (['self.ep_rewards[j][self.print_step:]'], {}), '(self.ep_rewards[j][self.print_step:])\n', (11684, 11722), True, 'import numpy as np\n'), ((11776, 11825), 'numpy.mean', 'np.mean', (['self.ep_end_rewards[j][self.print_step:]'], {}), '(self.ep_end_rewards[j][self.print_step:])\n', (11783, 11825), True, 'import numpy as np\n'), ((22849, 22860), 'time.time', 'time.time', ([], {}), '()\n', (22858, 22860), False, 'import threading, queue, time, os, pickle\n'), ((10684, 10735), 'numpy.mean', 'np.mean', (['self.agent_rewards[j][i][self.print_step:]'], {}), '(self.agent_rewards[j][i][self.print_step:])\n', (10691, 10735), True, 'import numpy as np\n'), ((10796, 10820), 'numpy.array', 'np.array', (['temp_ag_reward'], {}), '(temp_ag_reward)\n', (10804, 10820), True, 'import numpy as np\n'), ((12194, 12245), 'numpy.mean', 'np.mean', (['self.agent_rewards[j][i][self.print_step:]'], {}), '(self.agent_rewards[j][i][self.print_step:])\n', (12201, 12245), True, 'import numpy as np\n'), ((12306, 12330), 'numpy.array', 'np.array', (['temp_ag_reward'], {}), '(temp_ag_reward)\n', (12314, 12330), True, 'import numpy as np\n'), ((11041, 11052), 'time.time', 'time.time', ([], {}), '()\n', (11050, 11052), False, 'import threading, queue, time, os, pickle\n'), ((23108, 23119), 'time.time', 'time.time', ([], {}), '()\n', (23117, 23119), False, 'import threading, queue, time, os, pickle\n'), ((12640, 12651), 'time.time', 'time.time', ([], {}), '()\n', (12649, 12651), False, 'import threading, queue, time, os, pickle\n'), ((11364, 11375), 'time.time', 'time.time', ([], {}), '()\n', (11373, 11375), False, 'import threading, queue, time, os, pickle\n'), ((13058, 13069), 'time.time', 'time.time', ([], {}), '()\n', (13067, 13069), False, 'import threading, queue, time, os, pickle\n')]
|
#
# Copyright 2018, 2020 <NAME>
# 2019-2020 <NAME>
# 2015-2016 <NAME>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Tests to understand the difficulties in extracting hurst from noisy data
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import PyCo.Tools as Tools
import SurfaceTopography as Surf
def plot_naive(surface, lam_max):
fig = plt.figure()
ax=fig.add_subplot(111)
ax.set_yscale('log')
ax.set_xscale('log')
surf = Tools.CharacterisePeriodicSurface(surface)
q = surf.q
C = surf.C
H, alpha = surf.estimate_hurst_naive(lambda_max=lam_max, full_output=True)
print("H = {}, alpha = {}".format(H, alpha))
ax.loglog(q, C, alpha=.1)
mean, err, q_g = surf.grouped_stats(100)
mask = np.isfinite(mean)
mean = mean[mask]
err = err[:, mask]
q_g = q_g[mask]
ax.errorbar(q_g, mean, yerr=err)
ax.set_title("Naive: H={:.2f}, h_rms={:.2e}".format(H, np.sqrt((surface.heights() ** 2).mean())))
a, b = np.polyfit(np.log(q), np.log(C), 1)
ax.plot(q, q**(-2-2*H)*alpha, label="{}, H={:.2f}".format('fit', H))
ax.legend(loc='best')
def plot_grad_C0(surface, H_in, lam_max):
surf = Tools.CharacterisePeriodicSurface(surface)
q_min = 2*np.pi/lam_max
sl = surf.q > q_min
q = surf.q[sl]
C = surf.C[sl]
dim = 2
def C0_of_H(H):
return ((q**(-3-2*H)).sum() /
(q**(-5-4*H)/C).sum())
def objective(H, C0):
return ((1 - C0*q**(-2*H-2)/C)**2 /
q**(dim-1)).sum()
C0 = C0_of_H(H_in)
O0 = objective(H_in, C0)
c_s = np.linspace(0, 2*C0, 51)
o_s = np.zeros_like(c_s)
for i, c in enumerate(c_s):
o_s[i] = objective(H_in, c)
fig = plt.figure()
ax=fig.add_subplot(111)
fig.suptitle('grad(C0)')
ax.plot(c_s, o_s, marker= '+')
ax.scatter(C0, O0, marker='x', label = 'root', c='r')
ax.grid(True)
print("C0 = {}, obj0 = {}".format(C0, O0))
return C0
def plot_grad_H(surface, lam_max):
surf = Tools.CharacterisePeriodicSurface(surface)
q_min = 2*np.pi/lam_max
sl = surf.q > q_min
q = surf.q[sl]# np.array(surf.q[sl][0], surf.q[sl][-1])
C = surf.C[sl]# np.array(surf.C[sl][0], surf.C[sl][-1])
dim = 2
def C0_of_H(H):
return ((C**2/q**(-5-dim-4*H)).sum() /
(C/q**(-3-dim-2*H)).sum())
def grad_h(H, C0):
return (4*C0/C*np.log(q)*q**(-1-2*H-dim)*(1 - C0*q**(-2-2*H)/C)).sum()
def objective(H, C0):
return ((c/q**(-2*H-2) - C0)**2 /
q**(dim-1)).sum()
def full_obj(H):
C0 = C0_of_H(H)
return ((1 - C0/C*q**(-2*H-2))**2 /
q**(dim-1)).sum()
h_s = np.linspace(.0, 2., 51)
o_s = np.zeros_like(h_s)
g_s = np.zeros_like(h_s)
for i, h in enumerate(h_s):
c = C0_of_H(h)
o_s[i] = objective(h, c)
g_s[i] = grad_h(h, c)
H_opt, obj_opt, err, nfeq = scipy.optimize.fminbound(full_obj, 0, 2, full_output=True)
if err != 0:
raise Exception()
fig = plt.figure()
ax=fig.add_subplot(211)
ax.set_xlim(h_s[0], h_s[-1])
fig.suptitle('grad(H)')
ax.plot(h_s, o_s, marker= '+')
ax.grid(True)
ax.scatter(H_opt, obj_opt, marker='x', label = 'root', c='r')
ax=fig.add_subplot(212)
ax.set_xlim(h_s[0], h_s[-1])
ax.plot(h_s, g_s, marker= '+')
grad_opt = grad_h(H_opt, C0_of_H(H_opt))
ax.scatter(H_opt, grad_opt, marker='x', label = 'root', c='r')
#res = scipy.optimize.fmin
#print("H_out = {}, obj0 = {}".format(C0, O0))
ax.grid(True)
return H_opt, C0_of_H(H_opt)
def compare_to_PyPy(surface, lam_max, H_ref, C0_ref):
fig = plt.figure()
ax=fig.add_subplot(111)
ax.set_yscale('log')
ax.set_xscale('log')
surf = Tools.CharacterisePeriodicSurface(surface)
q_min = 2*np.pi/lam_max
sl = surf.q > q_min
q = surf.q
C = surf.C
H, alpha, res = surf.estimate_hurst_alt(lambda_max=lam_max, full_output=True)
print("H = {}, alpha = {}".format(H, alpha))
ax.loglog(q, C, alpha=.1)
mean, err, q_g = surf.grouped_stats(100)
mask = np.isfinite(mean)
mean = mean[mask]
err = err[:, mask]
q_g = q_g[mask]
ax.errorbar(q_g, mean, yerr=err)
ax.set_title("New: H_pypy={:.2f}, H_ref = {:.2f}, h_rms={:.2e}".format(H, H_ref, np.sqrt((surface.heights() ** 2).mean())))
ax.plot(q[sl], q[sl]**(-2-2*H)*alpha, label="{}, H={:.4f}".format('fit', H), lw = 3)
ax.plot(q[sl], q[sl]**(-2-2*H_ref)*C0_ref, label="{}, H={:.4f}".format('ref_fit', H_ref), lw = 3)
ax.legend(loc='best')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.loglog(q[sl], C[sl]/(q[sl]**(-2-2*H_ref)*C0_ref), alpha=.1)
ax.errorbar(q_g, mean/(q_g**(-2-2*H_ref)*C0_ref), yerr=err/(q_g**(-2-2*H_ref)*C0_ref))
def main():
siz = 2000e-9
lam_max = .2*siz
size = (siz, siz)
hurst = .75
h_rms = 3.24e-8
res = 128
nb_grid_pts = (res, res)
seed = 2
surface = Tools.RandomSurfaceGaussian(
nb_grid_pts, size, hurst, h_rms, lambda_max=lam_max, seed=seed).get_surface()
plot_naive(surface, lam_max)
plot_grad_C0(surface, hurst, lam_max)
H, C0 = plot_grad_H(surface, lam_max)
print("H_ref = {}, C0_ref = {}".format(H, C0))
compare_to_PyPy(surface, lam_max, H, C0)
if __name__ == "__main__":
main()
plt.show()
|
[
"PyCo.Tools.CharacterisePeriodicSurface",
"scipy.optimize.fminbound",
"numpy.log",
"PyCo.Tools.RandomSurfaceGaussian",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.isfinite",
"numpy.zeros_like",
"matplotlib.pyplot.show"
] |
[((1421, 1433), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1431, 1433), True, 'import matplotlib.pyplot as plt\n'), ((1524, 1566), 'PyCo.Tools.CharacterisePeriodicSurface', 'Tools.CharacterisePeriodicSurface', (['surface'], {}), '(surface)\n', (1557, 1566), True, 'import PyCo.Tools as Tools\n'), ((1811, 1828), 'numpy.isfinite', 'np.isfinite', (['mean'], {}), '(mean)\n', (1822, 1828), True, 'import numpy as np\n'), ((2234, 2276), 'PyCo.Tools.CharacterisePeriodicSurface', 'Tools.CharacterisePeriodicSurface', (['surface'], {}), '(surface)\n', (2267, 2276), True, 'import PyCo.Tools as Tools\n'), ((2641, 2667), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * C0)', '(51)'], {}), '(0, 2 * C0, 51)\n', (2652, 2667), True, 'import numpy as np\n'), ((2676, 2694), 'numpy.zeros_like', 'np.zeros_like', (['c_s'], {}), '(c_s)\n', (2689, 2694), True, 'import numpy as np\n'), ((2775, 2787), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2785, 2787), True, 'import matplotlib.pyplot as plt\n'), ((3064, 3106), 'PyCo.Tools.CharacterisePeriodicSurface', 'Tools.CharacterisePeriodicSurface', (['surface'], {}), '(surface)\n', (3097, 3106), True, 'import PyCo.Tools as Tools\n'), ((3739, 3764), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2.0)', '(51)'], {}), '(0.0, 2.0, 51)\n', (3750, 3764), True, 'import numpy as np\n'), ((3773, 3791), 'numpy.zeros_like', 'np.zeros_like', (['h_s'], {}), '(h_s)\n', (3786, 3791), True, 'import numpy as np\n'), ((3802, 3820), 'numpy.zeros_like', 'np.zeros_like', (['h_s'], {}), '(h_s)\n', (3815, 3820), True, 'import numpy as np\n'), ((3973, 4031), 'scipy.optimize.fminbound', 'scipy.optimize.fminbound', (['full_obj', '(0)', '(2)'], {'full_output': '(True)'}), '(full_obj, 0, 2, full_output=True)\n', (3997, 4031), False, 'import scipy\n'), ((4086, 4098), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4096, 4098), True, 'import matplotlib.pyplot as plt\n'), ((4715, 4727), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4725, 4727), True, 'import matplotlib.pyplot as plt\n'), ((4818, 4860), 'PyCo.Tools.CharacterisePeriodicSurface', 'Tools.CharacterisePeriodicSurface', (['surface'], {}), '(surface)\n', (4851, 4860), True, 'import PyCo.Tools as Tools\n'), ((5160, 5177), 'numpy.isfinite', 'np.isfinite', (['mean'], {}), '(mean)\n', (5171, 5177), True, 'import numpy as np\n'), ((5642, 5654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5652, 5654), True, 'import matplotlib.pyplot as plt\n'), ((6401, 6411), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6409, 6411), True, 'import matplotlib.pyplot as plt\n'), ((2056, 2065), 'numpy.log', 'np.log', (['q'], {}), '(q)\n', (2062, 2065), True, 'import numpy as np\n'), ((2067, 2076), 'numpy.log', 'np.log', (['C'], {}), '(C)\n', (2073, 2076), True, 'import numpy as np\n'), ((6026, 6122), 'PyCo.Tools.RandomSurfaceGaussian', 'Tools.RandomSurfaceGaussian', (['nb_grid_pts', 'size', 'hurst', 'h_rms'], {'lambda_max': 'lam_max', 'seed': 'seed'}), '(nb_grid_pts, size, hurst, h_rms, lambda_max=\n lam_max, seed=seed)\n', (6053, 6122), True, 'import PyCo.Tools as Tools\n'), ((3445, 3454), 'numpy.log', 'np.log', (['q'], {}), '(q)\n', (3451, 3454), True, 'import numpy as np\n')]
|
import numpy as np
from stable_baselines import PPO2
from stable_baselines.common.policies import CnnPolicy
from stable_baselines.a2c.utils import conv, linear, conv_to_fc
from src.envs import CMDP, FrozenLakeEnvCustomMap
from src.envs.frozen_lake.frozen_maps import MAPS
from src.students import LagrangianStudent, identity_transfer
from src.online_learning import ExponetiatedGradient
from src.teacher import FrozenLakeEvaluationLogger, TeacherFrozenEnv, \
create_intervention, SmallFrozenTeacherEnv
from src.teacher.frozen_lake_env import SmallFrozenTrainingObservation, SmallFrozenNonStationaryBandits
from src.envs.frozen_lake.utils import create_intervention_from_map, \
OptimalAgent, add_teacher
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
__all__ = ['create_teacher_env', 'small_base_cenv_fn']
def constraint(info=None, **kwargs):
return {'g': float(info['next_state_type'] in 'H')}
def small_base_env_fn():
# Base MDP
world_map = MAPS['small']
not_slipping_prob = 0.8
env_kwargs = dict(desc=world_map,
not_slipping_prob=not_slipping_prob,
base_r_mapping=None,
timeout=200)
return FrozenLakeEnvCustomMap(**env_kwargs)
# Base CMDP
def small_base_cenv_fn():
return CMDP(small_base_env_fn(), constraint,
constraints_values=[0],
n_constraints=1,
avg_constraint=True)
def make_base_small_cenvs():
# Base MDP
world_map = MAPS['small']
# # 2 interventions
# dist = [1, 1]
# tau = [0.1, 0]
# buff_size = [1, 0]
# avg_constraint = [True, True]
# 3 Interventions
dist = [2, 1, 1]
tau = [0.1, 0.1, 0]
buff_size = [1, 1, 0]
avg_constraint = [True, True, True]
interventions = []
for d, t, b, avg in zip(dist, tau, buff_size, avg_constraint):
interventions.append(
create_intervention(
small_base_cenv_fn,
create_intervention_from_map(add_teacher(world_map, d)),
[t], b, use_vec=True, avg_constraint=avg)
)
assert callable(interventions[0])
test_env = create_intervention(
small_base_cenv_fn(), create_intervention_from_map(add_teacher(
world_map)),
[0.0], 0, avg_constraint=True)
return interventions, test_env
############################## TEACHER ENV ###################################
def my_small_cnn(scaled_images, **kwargs):
activ = tf.nn.relu
layer_1 = activ(conv(scaled_images, 'c1', n_filters=32, filter_size=3,
stride=1, **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=64, filter_size=3,
stride=1, **kwargs))
layer_3 = conv_to_fc(layer_2)
return activ(
linear(layer_3, 'fc1', n_hidden=32, init_scale=np.sqrt(2)))
def create_teacher_env(new_br_kwargs={}, new_online_kwargs={},
original=False, obs_from_training=False,
non_stationary_bandit=False):
# Student definition
br_kwargs = dict(policy=CnnPolicy, verbose=0, n_steps=128,
ent_coef=0.05, cliprange=0.2, learning_rate=1e-3,
noptepochs=9,
policy_kwargs={'cnn_extractor': my_small_cnn})
br_kwargs.update(new_br_kwargs)
# Define online kwargs
online_kwargs = dict(B=0.5, eta=1.0)
online_kwargs.update(new_online_kwargs)
student_cls = LagrangianStudent
n_envs = 4
use_sub_proc_env = False
student_default_kwargs = {'env': None,
'br_algo': PPO2,
'online_algo': ExponetiatedGradient,
'br_kwargs': br_kwargs,
'online_kwargs': online_kwargs,
'lagrangian_ronuds': 2,
'curriculum_transfer': identity_transfer,
'br_uses_vec_env': True,
'use_sub_proc_env': use_sub_proc_env,
'n_envs': n_envs,
}
student_ranges_dict = {}
# Teacher interventions
if original:
# To preserve the teacher env interface while training in the
# original environment, we introduce a dummy intervention
# condition that is always False.
def dummy_intervention(**kwargs):
return 0
_, test_env = make_base_small_cenvs()
intervention = create_intervention(
base_cenv=small_base_cenv_fn,
interventions=[dummy_intervention], taus=[0], buf_size=0,
use_vec=True, avg_constraint=True)
interventions = [intervention]
else:
interventions, test_env = make_base_small_cenvs()
learning_steps = 4800 * 2
time_steps_lim = learning_steps * 10
test_episode_timeout = 200
test_episode_number = 5
if obs_from_training:
env_cls = SmallFrozenTrainingObservation
elif non_stationary_bandit:
env_cls = SmallFrozenNonStationaryBandits
else:
env_cls = SmallFrozenTeacherEnv
return env_cls(student_cls=student_cls,
student_default_kwargs=student_default_kwargs,
interventions=interventions,
final_env=test_env,
logger_cls=FrozenLakeEvaluationLogger,
student_ranges_dict=student_ranges_dict,
learning_steps=learning_steps,
test_episode_number=test_episode_number,
test_episode_timeout=test_episode_timeout,
time_steps_lim=time_steps_lim,
normalize_obs=False)
|
[
"stable_baselines.a2c.utils.conv_to_fc",
"numpy.sqrt",
"src.envs.FrozenLakeEnvCustomMap",
"stable_baselines.a2c.utils.conv",
"src.teacher.create_intervention",
"tensorflow.compat.v1.logging.set_verbosity",
"src.envs.frozen_lake.utils.add_teacher"
] |
[((741, 803), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (775, 803), True, 'import tensorflow as tf\n'), ((1242, 1278), 'src.envs.FrozenLakeEnvCustomMap', 'FrozenLakeEnvCustomMap', ([], {}), '(**env_kwargs)\n', (1264, 1278), False, 'from src.envs import CMDP, FrozenLakeEnvCustomMap\n'), ((2792, 2811), 'stable_baselines.a2c.utils.conv_to_fc', 'conv_to_fc', (['layer_2'], {}), '(layer_2)\n', (2802, 2811), False, 'from stable_baselines.a2c.utils import conv, linear, conv_to_fc\n'), ((2562, 2636), 'stable_baselines.a2c.utils.conv', 'conv', (['scaled_images', '"""c1"""'], {'n_filters': '(32)', 'filter_size': '(3)', 'stride': '(1)'}), "(scaled_images, 'c1', n_filters=32, filter_size=3, stride=1, **kwargs)\n", (2566, 2636), False, 'from stable_baselines.a2c.utils import conv, linear, conv_to_fc\n'), ((2683, 2751), 'stable_baselines.a2c.utils.conv', 'conv', (['layer_1', '"""c2"""'], {'n_filters': '(64)', 'filter_size': '(3)', 'stride': '(1)'}), "(layer_1, 'c2', n_filters=64, filter_size=3, stride=1, **kwargs)\n", (2687, 2751), False, 'from stable_baselines.a2c.utils import conv, linear, conv_to_fc\n'), ((4559, 4711), 'src.teacher.create_intervention', 'create_intervention', ([], {'base_cenv': 'small_base_cenv_fn', 'interventions': '[dummy_intervention]', 'taus': '[0]', 'buf_size': '(0)', 'use_vec': '(True)', 'avg_constraint': '(True)'}), '(base_cenv=small_base_cenv_fn, interventions=[\n dummy_intervention], taus=[0], buf_size=0, use_vec=True, avg_constraint\n =True)\n', (4578, 4711), False, 'from src.teacher import FrozenLakeEvaluationLogger, TeacherFrozenEnv, create_intervention, SmallFrozenTeacherEnv\n'), ((2280, 2302), 'src.envs.frozen_lake.utils.add_teacher', 'add_teacher', (['world_map'], {}), '(world_map)\n', (2291, 2302), False, 'from src.envs.frozen_lake.utils import create_intervention_from_map, OptimalAgent, add_teacher\n'), ((2885, 2895), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2892, 2895), True, 'import numpy as np\n'), ((2050, 2075), 'src.envs.frozen_lake.utils.add_teacher', 'add_teacher', (['world_map', 'd'], {}), '(world_map, d)\n', (2061, 2075), False, 'from src.envs.frozen_lake.utils import create_intervention_from_map, OptimalAgent, add_teacher\n')]
|
import numpy as np
import os
from scanorama import *
from scipy.sparse import vstack
from process import load_names
from experiments import *
from utils import *
NAMESPACE = 'zeisel'
METHOD = 'svd'
DIMRED = 100
data_names = [
'data/mouse_brain/zeisel/amygdala',
'data/mouse_brain/zeisel/cerebellum',
'data/mouse_brain/zeisel/cortex1',
'data/mouse_brain/zeisel/cortex2',
'data/mouse_brain/zeisel/cortex3',
'data/mouse_brain/zeisel/enteric',
'data/mouse_brain/zeisel/hippocampus',
'data/mouse_brain/zeisel/hypothalamus',
'data/mouse_brain/zeisel/medulla',
'data/mouse_brain/zeisel/midbraindorsal',
'data/mouse_brain/zeisel/midbrainventral',
'data/mouse_brain/zeisel/olfactory',
'data/mouse_brain/zeisel/pons',
'data/mouse_brain/zeisel/spinalcord',
'data/mouse_brain/zeisel/striatumdorsal',
'data/mouse_brain/zeisel/striatumventral',
'data/mouse_brain/zeisel/sympathetic',
'data/mouse_brain/zeisel/thalamus',
]
if __name__ == '__main__':
datasets, genes_list, n_cells = load_names(data_names, norm=False)
datasets, genes = merge_datasets(datasets, genes_list)
X = vstack(datasets)
if not os.path.isfile('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE)):
log('Dimension reduction with {}...'.format(METHOD))
X_dimred = reduce_dimensionality(
normalize(X), method=METHOD, dimred=DIMRED
)
log('Dimensionality = {}'.format(X_dimred.shape[1]))
np.savetxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE), X_dimred)
else:
X_dimred = np.loadtxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE))
from ample import gs, uniform, srs
#samp_idx = gs(X_dimred, 20000, replace=False)
#samp_idx = uniform(X_dimred, 20000, replace=False)
samp_idx = srs(X_dimred, 20000, replace=False)
#from anndata import AnnData
#import scanpy.api as sc
#adata = AnnData(X=X_dimred[samp_idx, :])
#sc.pp.neighbors(adata, use_rep='X')
#sc.tl.louvain(adata, resolution=1.5, key_added='louvain')
#
#louv_labels = np.array(adata.obs['louvain'].tolist())
#le = LabelEncoder().fit(louv_labels)
#cell_labels = le.transform(louv_labels)
#
#np.savetxt('data/cell_labels/zeisel_louvain.txt', cell_labels)
labels = (
open('data/cell_labels/zeisel_cluster.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(labels)
cell_labels = le.transform(labels)
experiments(
X_dimred, NAMESPACE, n_seeds=2,
cell_labels=cell_labels,
kmeans_ami=True, louvain_ami=True,
rare=True,
rare_label=le.transform(['Ependymal'])[0],
)
exit()
embedding = visualize(
[ X_dimred[samp_idx, :] ], cell_labels[samp_idx],
NAMESPACE + '_srs{}'.format(len(samp_idx)),
[ str(ct) for ct in sorted(set(cell_labels)) ],
perplexity=100, n_iter=500, image_suffix='.png',
viz_cluster=True
)
exit()
cell_labels = (
open('data/cell_labels/zeisel_louvain.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(cell_labels)
cell_labels = le.transform(cell_labels)
astro = set([ 32, 38, 40, ])
oligo = set([ 2, 5, 12, 20, 23, 33, 37, ])
focus = set([ 15, 36, 41 ])
labels = []
aob_labels = []
for cl in cell_labels:
if cl in focus:
labels.append(0)
aob_labels.append('both')
elif cl in astro or cl in oligo:
labels.append(1)
if cl in astro:
aob_labels.append('astro')
else:
aob_labels.append('oligo')
else:
labels.append(2)
aob_labels.append('none')
labels = np.array(labels)
aob_labels = np.array(aob_labels)
X = np.log1p(normalize(X[samp_idx, :]))
from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'astro', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'oligo', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'both', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'astro', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'oligo', NAMESPACE)
#astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'both', NAMESPACE)
astro_oligo_violin(X, genes, 'GJA1', aob_labels, NAMESPACE)
astro_oligo_violin(X, genes, 'MBP', aob_labels, NAMESPACE)
astro_oligo_violin(X, genes, 'PLP1', aob_labels, NAMESPACE)
viz_genes = [
#'GJA1', 'MBP', 'PLP1', 'TRF',
#'CST3', 'CPE', 'FTH1', 'APOE', 'MT1', 'NDRG2', 'TSPAN7',
#'PLP1', 'MAL', 'PTGDS', 'CLDN11', 'APOD', 'QDPR', 'MAG', 'ERMN',
#'PLP1', 'MAL', 'PTGDS', 'MAG', 'CLDN11', 'APOD', 'FTH1',
#'ERMN', 'MBP', 'ENPP2', 'QDPR', 'MOBP', 'TRF',
#'CST3', 'SPARCL1', 'PTN', 'CD81', 'APOE', 'ATP1A2', 'ITM2B'
]
cell_labels = (
open('data/cell_labels/zeisel_cluster.txt')
.read().rstrip().split('\n')
)
le = LabelEncoder().fit(cell_labels)
cell_labels = le.transform(cell_labels)
embedding = visualize(
[ X_dimred[samp_idx, :] ], cell_labels[samp_idx],
NAMESPACE + '_astro{}'.format(len(samp_idx)),
[ str(ct) for ct in sorted(set(cell_labels)) ],
gene_names=viz_genes, gene_expr=X, genes=genes,
perplexity=100, n_iter=500, image_suffix='.png',
viz_cluster=True
)
#visualize_dropout(X, embedding, image_suffix='.png',
# viz_prefix=NAMESPACE + '_dropout')
from differential_entropies import differential_entropies
differential_entropies(X_dimred, labels)
|
[
"scipy.sparse.vstack",
"ample.srs",
"process.load_names",
"numpy.array",
"differential_entropies.differential_entropies",
"mouse_brain_astrocyte.astro_oligo_violin"
] |
[((1047, 1081), 'process.load_names', 'load_names', (['data_names'], {'norm': '(False)'}), '(data_names, norm=False)\n', (1057, 1081), False, 'from process import load_names\n'), ((1149, 1165), 'scipy.sparse.vstack', 'vstack', (['datasets'], {}), '(datasets)\n', (1155, 1165), False, 'from scipy.sparse import vstack\n'), ((1811, 1846), 'ample.srs', 'srs', (['X_dimred', '(20000)'], {'replace': '(False)'}), '(X_dimred, 20000, replace=False)\n', (1814, 1846), False, 'from ample import gs, uniform, srs\n'), ((3771, 3787), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3779, 3787), True, 'import numpy as np\n'), ((3805, 3825), 'numpy.array', 'np.array', (['aob_labels'], {}), '(aob_labels)\n', (3813, 3825), True, 'import numpy as np\n'), ((4438, 4497), 'mouse_brain_astrocyte.astro_oligo_violin', 'astro_oligo_violin', (['X', 'genes', '"""GJA1"""', 'aob_labels', 'NAMESPACE'], {}), "(X, genes, 'GJA1', aob_labels, NAMESPACE)\n", (4456, 4497), False, 'from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin\n'), ((4502, 4560), 'mouse_brain_astrocyte.astro_oligo_violin', 'astro_oligo_violin', (['X', 'genes', '"""MBP"""', 'aob_labels', 'NAMESPACE'], {}), "(X, genes, 'MBP', aob_labels, NAMESPACE)\n", (4520, 4560), False, 'from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin\n'), ((4565, 4624), 'mouse_brain_astrocyte.astro_oligo_violin', 'astro_oligo_violin', (['X', 'genes', '"""PLP1"""', 'aob_labels', 'NAMESPACE'], {}), "(X, genes, 'PLP1', aob_labels, NAMESPACE)\n", (4583, 4624), False, 'from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin\n'), ((5765, 5805), 'differential_entropies.differential_entropies', 'differential_entropies', (['X_dimred', 'labels'], {}), '(X_dimred, labels)\n', (5787, 5805), False, 'from differential_entropies import differential_entropies\n')]
|
import numpy
import pytest
import os
from shutil import rmtree
from numpy.testing import assert_allclose
import scipy.stats
import scipy.integrate
import scipy.special
from fgivenx.mass import PMF, compute_pmf
def gaussian_pmf(y, mu=0, sigma=1):
return scipy.special.erfc(numpy.abs(y-mu)/numpy.sqrt(2)/sigma)
def test_gaussian():
numpy.random.seed(0)
nsamp = 5000
samples = numpy.random.randn(nsamp)
y = numpy.random.uniform(-3, 3, 10)
m = PMF(samples, y)
m_ = gaussian_pmf(y)
assert_allclose(m, m_, rtol=3e-1)
def test_PMF():
# Compute samples
numpy.random.seed(0)
nsamp = 100
samples = numpy.concatenate((-5+numpy.random.randn(nsamp//2),
5+numpy.random.randn(nsamp//2)))
# Compute PMF
y = numpy.random.uniform(-10, 10, 10)
m = PMF(samples, y)
# Compute PMF via monte carlo
N = 100000
kernel = scipy.stats.gaussian_kde(samples)
s = kernel.resample(N)[0]
m_ = [sum(kernel(s) <= kernel(y_i))/float(N) for y_i in y]
assert_allclose(m, m_, atol=3*N**-0.5)
# Compute PMF via quadrature
m_ = [scipy.integrate.quad(lambda x: kernel(x)*(kernel(x) <= kernel(y_i)),
-numpy.inf, numpy.inf, limit=500)[0]
for y_i in y]
assert_allclose(m, m_, atol=1e-4)
assert_allclose([0, 0], PMF(samples, [-1e3, 1e3]))
samples = [0, 0]
m = PMF(samples, y)
assert_allclose(m, numpy.zeros_like(y))
def test_compute_pmf():
with pytest.raises(TypeError):
compute_pmf(None, None, wrong_argument=None)
cache = '.test_cache/test'
numpy.random.seed(0)
nsamp = 5000
a, b, e, f = 0, 1, 0, 1
m = numpy.random.normal(a, b, nsamp)
c = numpy.random.normal(e, f, nsamp)
nx = 100
x = numpy.linspace(-1, 1, nx)
fsamps = (numpy.outer(x, m) + c)
ny = 100
y = numpy.linspace(-3, 3, ny)
assert(not os.path.isfile(cache + '_masses.pkl'))
m = compute_pmf(fsamps, y, cache=cache)
assert(os.path.isfile(cache + '_masses.pkl'))
m_ = [gaussian_pmf(y, a*xi+e, numpy.sqrt(b**2*xi**2+f**2)) for xi in x]
assert_allclose(m.transpose(), m_, atol=3e-1)
m = compute_pmf(fsamps, y, cache=cache)
assert_allclose(m.transpose(), m_, atol=3e-1)
rmtree('.test_cache')
|
[
"numpy.random.normal",
"numpy.abs",
"fgivenx.mass.compute_pmf",
"fgivenx.mass.PMF",
"numpy.sqrt",
"numpy.testing.assert_allclose",
"numpy.zeros_like",
"shutil.rmtree",
"os.path.isfile",
"numpy.linspace",
"numpy.outer",
"pytest.raises",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.random.randn"
] |
[((342, 362), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (359, 362), False, 'import numpy\n'), ((394, 419), 'numpy.random.randn', 'numpy.random.randn', (['nsamp'], {}), '(nsamp)\n', (412, 419), False, 'import numpy\n'), ((428, 459), 'numpy.random.uniform', 'numpy.random.uniform', (['(-3)', '(3)', '(10)'], {}), '(-3, 3, 10)\n', (448, 459), False, 'import numpy\n'), ((468, 483), 'fgivenx.mass.PMF', 'PMF', (['samples', 'y'], {}), '(samples, y)\n', (471, 483), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((513, 545), 'numpy.testing.assert_allclose', 'assert_allclose', (['m', 'm_'], {'rtol': '(0.3)'}), '(m, m_, rtol=0.3)\n', (528, 545), False, 'from numpy.testing import assert_allclose\n'), ((591, 611), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (608, 611), False, 'import numpy\n'), ((787, 820), 'numpy.random.uniform', 'numpy.random.uniform', (['(-10)', '(10)', '(10)'], {}), '(-10, 10, 10)\n', (807, 820), False, 'import numpy\n'), ((829, 844), 'fgivenx.mass.PMF', 'PMF', (['samples', 'y'], {}), '(samples, y)\n', (832, 844), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((1039, 1081), 'numpy.testing.assert_allclose', 'assert_allclose', (['m', 'm_'], {'atol': '(3 * N ** -0.5)'}), '(m, m_, atol=3 * N ** -0.5)\n', (1054, 1081), False, 'from numpy.testing import assert_allclose\n'), ((1287, 1322), 'numpy.testing.assert_allclose', 'assert_allclose', (['m', 'm_'], {'atol': '(0.0001)'}), '(m, m_, atol=0.0001)\n', (1302, 1322), False, 'from numpy.testing import assert_allclose\n'), ((1407, 1422), 'fgivenx.mass.PMF', 'PMF', (['samples', 'y'], {}), '(samples, y)\n', (1410, 1422), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((1618, 1638), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (1635, 1638), False, 'import numpy\n'), ((1692, 1724), 'numpy.random.normal', 'numpy.random.normal', (['a', 'b', 'nsamp'], {}), '(a, b, nsamp)\n', (1711, 1724), False, 'import numpy\n'), ((1733, 1765), 'numpy.random.normal', 'numpy.random.normal', (['e', 'f', 'nsamp'], {}), '(e, f, nsamp)\n', (1752, 1765), False, 'import numpy\n'), ((1787, 1812), 'numpy.linspace', 'numpy.linspace', (['(-1)', '(1)', 'nx'], {}), '(-1, 1, nx)\n', (1801, 1812), False, 'import numpy\n'), ((1871, 1896), 'numpy.linspace', 'numpy.linspace', (['(-3)', '(3)', 'ny'], {}), '(-3, 3, ny)\n', (1885, 1896), False, 'import numpy\n'), ((1960, 1995), 'fgivenx.mass.compute_pmf', 'compute_pmf', (['fsamps', 'y'], {'cache': 'cache'}), '(fsamps, y, cache=cache)\n', (1971, 1995), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((2007, 2044), 'os.path.isfile', 'os.path.isfile', (["(cache + '_masses.pkl')"], {}), "(cache + '_masses.pkl')\n", (2021, 2044), False, 'import os\n'), ((2182, 2217), 'fgivenx.mass.compute_pmf', 'compute_pmf', (['fsamps', 'y'], {'cache': 'cache'}), '(fsamps, y, cache=cache)\n', (2193, 2217), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((2273, 2294), 'shutil.rmtree', 'rmtree', (['""".test_cache"""'], {}), "('.test_cache')\n", (2279, 2294), False, 'from shutil import rmtree\n'), ((1350, 1381), 'fgivenx.mass.PMF', 'PMF', (['samples', '[-1000.0, 1000.0]'], {}), '(samples, [-1000.0, 1000.0])\n', (1353, 1381), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((1446, 1465), 'numpy.zeros_like', 'numpy.zeros_like', (['y'], {}), '(y)\n', (1462, 1465), False, 'import numpy\n'), ((1503, 1527), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1516, 1527), False, 'import pytest\n'), ((1537, 1581), 'fgivenx.mass.compute_pmf', 'compute_pmf', (['None', 'None'], {'wrong_argument': 'None'}), '(None, None, wrong_argument=None)\n', (1548, 1581), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((1827, 1844), 'numpy.outer', 'numpy.outer', (['x', 'm'], {}), '(x, m)\n', (1838, 1844), False, 'import numpy\n'), ((1913, 1950), 'os.path.isfile', 'os.path.isfile', (["(cache + '_masses.pkl')"], {}), "(cache + '_masses.pkl')\n", (1927, 1950), False, 'import os\n'), ((2081, 2118), 'numpy.sqrt', 'numpy.sqrt', (['(b ** 2 * xi ** 2 + f ** 2)'], {}), '(b ** 2 * xi ** 2 + f ** 2)\n', (2091, 2118), False, 'import numpy\n'), ((278, 295), 'numpy.abs', 'numpy.abs', (['(y - mu)'], {}), '(y - mu)\n', (287, 295), False, 'import numpy\n'), ((294, 307), 'numpy.sqrt', 'numpy.sqrt', (['(2)'], {}), '(2)\n', (304, 307), False, 'import numpy\n'), ((664, 694), 'numpy.random.randn', 'numpy.random.randn', (['(nsamp // 2)'], {}), '(nsamp // 2)\n', (682, 694), False, 'import numpy\n'), ((729, 759), 'numpy.random.randn', 'numpy.random.randn', (['(nsamp // 2)'], {}), '(nsamp // 2)\n', (747, 759), False, 'import numpy\n')]
|
from builtins import str
from builtins import range
from robust.simulations.simulate import filter_gamma_result_dict
from SimPleAC_save import load_obj
import pickle as pickle
import numpy as np
import matplotlib.pyplot as plt
from SimPleAC_pof_simulate import pof_parameters
if __name__ == "__main__":
# Retrieving pof parameters
[model, methods, gammas, number_of_iterations,
min_num_of_linear_sections, max_num_of_linear_sections, verbosity, linearization_tolerance,
number_of_time_average_solves, uncertainty_sets, nominal_solution, directly_uncertain_vars_subs, parallel,
nominal_number_of_constraints, nominal_solve_time] = pof_parameters()
method = methods[0] # only care about Best Pairs
# Loading results
margin = {}
nGammas = nmargins = len(gammas)
margins = gammas
margin['solutions'] = {}
for i in range(nmargins):
margin['solutions'][margins[i]] = pickle.load(open("marginResults/" +
str(margins[i]), 'rb'))
margin['number_of_constraints'] = load_obj('marginnumber_of_constraints', 'marginResults')
margin['simulation_results'] = load_obj('marginsimulation_results', 'marginResults')
gamma = {}
gamma['solutions'] = {}
for i in range(nGammas):
for j in range(len(methods)):
for k in range((len(uncertainty_sets))):
gamma['solutions'][gammas[i], methods[j]['name'], uncertainty_sets[k]] = pickle.load(open(
"gammaResults\\" + str((gammas[i], methods[j]['name'], uncertainty_sets[k])), 'rb'))
gamma['solve_times'] = load_obj('gammasolve_times', 'gammaResults')
gamma['simulation_results'] = load_obj('gammasimulation_results', 'gammaResults')
gamma['number_of_constraints'] = load_obj('gammanumber_of_constraints', 'gammaResults')
# Plotting of cost and probability of failure
objective_name = 'Total fuel weight'
objective_units = 'N'
title = ''
filteredResults = [margin['solutions'],
filter_gamma_result_dict(gamma['solutions'], 1, method['name'], 2, 'box'),
filter_gamma_result_dict(gamma['solutions'], 1, method['name'], 2, 'ellipsoidal')]
filteredSimulations = [margin['simulation_results'],
filter_gamma_result_dict(gamma['simulation_results'], 1, method['name'], 2, 'box'),
filter_gamma_result_dict(gamma['simulation_results'], 1, method['name'], 2, 'ellipsoidal')]
objective_varkey = 'W_{f_m}'
legend_keys = ['margins', 'box', 'ellipsoidal']
edgecolors = ['#FFBF00', '#CC0000', '#008000']
facecolors = ['#FFE135','#FF2052', '#8DB600']
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
lines = []
mincost = 1e10
maxcost = 0
for i in range(len(legend_keys)):
sims = list(filteredSimulations[i].items())
pofs = []
objective_costs = []
objective_stddev = []
for j in sims:
pofs.append(j[1][0])
objective_costs.append(j[1][1])
objective_stddev.append(j[1][2])
mincost = np.min([mincost] + objective_costs)
maxcost = np.max([maxcost] + objective_costs)
lines.append(ax1.plot(gammas, objective_costs, color=edgecolors[i], label=legend_keys[i] + ', cost'))
inds = np.nonzero(np.ones(len(gammas)) - pofs)[0]
uppers = [objective_costs[ind] + objective_stddev[ind] for ind in inds]
lowers = [objective_costs[ind] - objective_stddev[ind] for ind in inds]
x = [gammas[ind] for ind in inds]
ax1.fill_between(x, lowers, uppers,
alpha=0.5, edgecolor = edgecolors[i], facecolor = facecolors[i])
lines.append(ax2.plot(gammas, pofs, color=edgecolors[i], label=legend_keys[i] + ', PoF'))
ax1.set_xlabel(r'Uncertainty Set Scaling Factor $\Gamma$', fontsize=12)
ax1.set_ylabel('Cost [' + objective_name + ' (' + objective_units.capitalize() + ')]', fontsize=12)
ax2.set_ylabel("Probability of Failure", fontsize=12)
ax1.set_ylim([mincost, maxcost])
ax2.set_ylim([0, 1])
plt.title(title, fontsize=12)
labs = [lines[l][0].get_label() for l in [1,3,5,0,2,4]]
ax1.legend(labs, loc="lower right", fontsize=9, numpoints=1)
# ax1.legend(loc="lower right", fontsize=10, numpoints=1)
# fig.legend(loc="lower right", fontsize=10, numpoints=1)
plt.show()
|
[
"matplotlib.pyplot.show",
"SimPleAC_pof_simulate.pof_parameters",
"builtins.str",
"numpy.max",
"builtins.range",
"numpy.min",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"robust.simulations.simulate.filter_gamma_result_dict",
"SimPleAC_save.load_obj"
] |
[((659, 675), 'SimPleAC_pof_simulate.pof_parameters', 'pof_parameters', ([], {}), '()\n', (673, 675), False, 'from SimPleAC_pof_simulate import pof_parameters\n'), ((868, 883), 'builtins.range', 'range', (['nmargins'], {}), '(nmargins)\n', (873, 883), False, 'from builtins import range\n'), ((1093, 1149), 'SimPleAC_save.load_obj', 'load_obj', (['"""marginnumber_of_constraints"""', '"""marginResults"""'], {}), "('marginnumber_of_constraints', 'marginResults')\n", (1101, 1149), False, 'from SimPleAC_save import load_obj\n'), ((1185, 1238), 'SimPleAC_save.load_obj', 'load_obj', (['"""marginsimulation_results"""', '"""marginResults"""'], {}), "('marginsimulation_results', 'marginResults')\n", (1193, 1238), False, 'from SimPleAC_save import load_obj\n'), ((1296, 1310), 'builtins.range', 'range', (['nGammas'], {}), '(nGammas)\n', (1301, 1310), False, 'from builtins import range\n'), ((1662, 1706), 'SimPleAC_save.load_obj', 'load_obj', (['"""gammasolve_times"""', '"""gammaResults"""'], {}), "('gammasolve_times', 'gammaResults')\n", (1670, 1706), False, 'from SimPleAC_save import load_obj\n'), ((1741, 1792), 'SimPleAC_save.load_obj', 'load_obj', (['"""gammasimulation_results"""', '"""gammaResults"""'], {}), "('gammasimulation_results', 'gammaResults')\n", (1749, 1792), False, 'from SimPleAC_save import load_obj\n'), ((1830, 1884), 'SimPleAC_save.load_obj', 'load_obj', (['"""gammanumber_of_constraints"""', '"""gammaResults"""'], {}), "('gammanumber_of_constraints', 'gammaResults')\n", (1838, 1884), False, 'from SimPleAC_save import load_obj\n'), ((2756, 2770), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2768, 2770), True, 'import matplotlib.pyplot as plt\n'), ((4170, 4199), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(12)'}), '(title, fontsize=12)\n', (4179, 4199), True, 'import matplotlib.pyplot as plt\n'), ((4453, 4463), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4461, 4463), True, 'import matplotlib.pyplot as plt\n'), ((2085, 2158), 'robust.simulations.simulate.filter_gamma_result_dict', 'filter_gamma_result_dict', (["gamma['solutions']", '(1)', "method['name']", '(2)', '"""box"""'], {}), "(gamma['solutions'], 1, method['name'], 2, 'box')\n", (2109, 2158), False, 'from robust.simulations.simulate import filter_gamma_result_dict\n'), ((2183, 2268), 'robust.simulations.simulate.filter_gamma_result_dict', 'filter_gamma_result_dict', (["gamma['solutions']", '(1)', "method['name']", '(2)', '"""ellipsoidal"""'], {}), "(gamma['solutions'], 1, method['name'], 2,\n 'ellipsoidal')\n", (2207, 2268), False, 'from robust.simulations.simulate import filter_gamma_result_dict\n'), ((2350, 2436), 'robust.simulations.simulate.filter_gamma_result_dict', 'filter_gamma_result_dict', (["gamma['simulation_results']", '(1)', "method['name']", '(2)', '"""box"""'], {}), "(gamma['simulation_results'], 1, method['name'], 2,\n 'box')\n", (2374, 2436), False, 'from robust.simulations.simulate import filter_gamma_result_dict\n'), ((2462, 2556), 'robust.simulations.simulate.filter_gamma_result_dict', 'filter_gamma_result_dict', (["gamma['simulation_results']", '(1)', "method['name']", '(2)', '"""ellipsoidal"""'], {}), "(gamma['simulation_results'], 1, method['name'], 2,\n 'ellipsoidal')\n", (2486, 2556), False, 'from robust.simulations.simulate import filter_gamma_result_dict\n'), ((3173, 3208), 'numpy.min', 'np.min', (['([mincost] + objective_costs)'], {}), '([mincost] + objective_costs)\n', (3179, 3208), True, 'import numpy as np\n'), ((3227, 3262), 'numpy.max', 'np.max', (['([maxcost] + objective_costs)'], {}), '([maxcost] + objective_costs)\n', (3233, 3262), True, 'import numpy as np\n'), ((1031, 1046), 'builtins.str', 'str', (['margins[i]'], {}), '(margins[i])\n', (1034, 1046), False, 'from builtins import str\n'), ((1569, 1626), 'builtins.str', 'str', (["(gammas[i], methods[j]['name'], uncertainty_sets[k])"], {}), "((gammas[i], methods[j]['name'], uncertainty_sets[k]))\n", (1572, 1626), False, 'from builtins import str\n')]
|
#!/usr/bin/env python
from __future__ import absolute_import
import numpy as np
import os
import pytest
import tempfile
import training_data
class TestTrainingData():
def test_add(self):
td = training_data.training_data()
assert np.array_equal(td.get_x(), np.empty([0, 4, 4], dtype=np.int))
assert np.array_equal(td.get_y_digit(), np.empty([0, 1], dtype=np.int))
assert np.allclose(td.get_reward(), np.empty([0, 1], dtype=np.float))
assert np.array_equal(td.get_next_x(), np.empty([0, 4, 4], dtype=np.int))
assert np.array_equal(td.get_done(), np.empty([0, 1], dtype=np.bool))
td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]), True)
assert np.array_equal(td.get_x(), np.ones([1, 4, 4], dtype=np.int))
assert np.array_equal(td.get_y_digit(), np.array([[1]], dtype=np.int))
assert np.allclose(td.get_reward(), np.array([[4]], dtype=np.float))
assert np.array_equal(td.get_next_x(), np.zeros([1, 4, 4], dtype=np.int))
assert np.array_equal(td.get_done(), np.array([[1]], dtype=np.bool))
def test_get_x_stacked(self):
td = training_data.training_data()
td.add(np.full([4, 4], 2), 0, 4, np.zeros([4, 4]))
td.add(np.full([4, 4], 8), 1, 8, np.ones([4, 4]))
td.add(np.full([4, 4], 2048), 1, 8, np.ones([4, 4]))
expected_x_stacked = np.array([
[
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]
]
], dtype=np.int)
assert np.array_equal(td.get_x_stacked(), expected_x_stacked)
def test_get_y_one_hot(self):
td = training_data.training_data()
td.add(np.ones([4, 4]), 0, 4, np.zeros([4, 4]))
td.add(np.zeros([4, 4]), 1, 8, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 3, 8, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 2, 8, np.ones([4, 4]))
expected_y_one_hot = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]
], dtype=np.int)
assert np.array_equal(td.get_y_one_hot(), expected_y_one_hot)
def test_get_total_reward(self):
td = training_data.training_data()
td.add(np.ones([4, 4]), 0, 4, np.zeros([4, 4]))
td.add(np.zeros([4, 4]), 1, 8, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 3, 16, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 2, 32, np.ones([4, 4]))
assert td.get_total_reward() == 60
def test_get_highest_tile(self):
td = training_data.training_data()
td.add(np.full((4, 4), 1), 0, 4, np.full((4, 4), 2))
td.add(np.full((4, 4), 2), 0, 4, np.full((4, 4), 4))
assert td.get_highest_tile() == 4
def test_get_n(self):
td = training_data.training_data()
td.add(np.ones([4, 4]), 1, 4, np.zeros([4, 4]))
td.add(np.zeros([4, 4]), 2, 8, np.ones([4, 4]))
(state, action, reward, next_state, done) = td.get_n(1)
assert np.array_equal(state, np.zeros([4, 4], dtype=np.int))
assert action == 2
assert reward == pytest.approx(8.)
assert np.array_equal(next_state, np.ones([4, 4], dtype=np.int))
def test_hflip(self):
td = training_data.training_data()
board1 = np.array([[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
board2 = np.array([[0, 0, 0, 0],
[2, 4, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
td.add(board1, 1, 2, board2)
td.add(board2, 2, 0, board1)
td.hflip()
expected_x = np.array([
[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[3],
[2]
], dtype=np.int)
expected_reward = np.array([
[2],
[0],
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.allclose(td.get_next_x(), expected_next_x)
def test_rotate(self):
td = training_data.training_data()
board1 = np.array([[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
board2 = np.array([[0, 0, 0, 0],
[2, 4, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
td.add(board1, 1, 2, board2)
td.add(board2, 2, 0, board1)
td.rotate(3)
expected_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[0],
[1]
], dtype=np.int)
expected_reward = np.array([
[2],
[0],
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
def test_augment(self):
td = training_data.training_data()
initial_board = np.array([[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
next_board = np.array([[0, 0, 0, 2],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
td.add(initial_board, 1, 4, next_board)
td.augment()
assert td.size() == 8
expected_x = np.array([
[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]],
[[1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[1],
[3],
[2],
[0],
[3],
[1],
[0],
[2]
], dtype=np.int)
expected_reward = np.array([
[4],
[4],
[4],
[4],
[4],
[4],
[4],
[4]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], # Original
[[2, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 0]], # Hflip'd
[[0, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 2]], # Original, rotated 90 degrees
[[0, 0, 0, 2], [0, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]], # Hflip, rotated 90 degrees
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 2, 0], [2, 0, 0, 0]], # Original, rotated 180 degrees
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 2]], # Hflip, rotated 180 degrees
[[2, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0]], # Original, rotate 270 degrees
[[0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [2, 0, 0, 0]] # Hflip, rotated 270 degrees
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
def test_merge(self):
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 16, np.zeros([1, 4, 4]))
td2 = training_data.training_data()
td2.add(np.zeros([1, 4, 4]), 2, 0, np.ones([1, 4, 4]))
td.merge(td2)
expected_x = np.array([
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[1],
[2]
], dtype=np.int)
expected_reward = np.array([
[16],
[0]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
def test_split(self):
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 16, np.zeros([1, 4, 4]))
td2 = training_data.training_data()
td2.add(np.zeros([1, 4, 4]), 2, 0, np.ones([1, 4, 4]))
td.merge(td2)
a, b = td.split()
assert np.array_equal(a.get_x(), np.ones([1, 4, 4]))
assert np.array_equal(a.get_y_digit(), [[1]])
assert np.array_equal(a.get_reward(), [[16]])
assert np.array_equal(a.get_next_x(), np.zeros([1, 4, 4]))
assert np.array_equal(b.get_x(), np.zeros([1, 4, 4]))
assert np.array_equal(b.get_y_digit(), [[2]])
assert np.array_equal(b.get_reward(), [[0]])
assert np.array_equal(b.get_next_x(), np.ones([1, 4, 4]))
def test_sample(self):
td = training_data.training_data()
td.add(np.zeros([1, 4, 4]), 0, 0, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 1, np.ones([1, 4, 4]))
sample = td.sample([1])
assert sample.size() == 1
assert sample.get_y_digit() in [[[0]], [[1]]]
if sample.get_y_digit() == 0:
assert np.array_equal(sample.get_x(), np.zeros([1, 4, 4]))
if sample.get_y_digit() == 1:
assert np.array_equal(sample.get_x(), np.ones([1, 4, 4]))
def test_size(self):
td = training_data.training_data()
assert td.size() == 0
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
assert td.size() == 1
def test_log2_rewards(self):
# Set up training data
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 0, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 16, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 0, 75, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 2048, np.zeros([1, 4, 4]))
td.log2_rewards()
expected_reward = np.array([
[0], [1], [2], [4], [6.2288], [11]
], dtype=np.float)
assert np.allclose(td.get_reward(), expected_reward)
expected_action = np.array([
[0], [1], [2], [3], [0], [1]
], dtype=np.int)
assert np.allclose(td.get_y_digit(), expected_action)
def test_get_discounted_return(self):
# Set up training data
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 2, np.zeros([1, 4, 4]))
# Test using default gamma value of 0.9
td2 = td.copy()
discounted_return = td2.get_discounted_return()
expected_return = np.array([
[20.218], [18.02], [17.8], [2.0]
], dtype=np.float)
assert np.allclose(discounted_return, expected_return)
# Test using gamma value of 0, should have no effect on rewards
td2 = td.copy()
discounted_return = td2.get_discounted_return(gamma=0.0)
expected_return = np.array([
[4], [2], [16], [2]
], dtype=np.float)
assert np.allclose(discounted_return, expected_return)
# Test end of episode
td3 = training_data.training_data()
td3.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]), False)
td3.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4]), True)
td3.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]), False)
td3.add(np.ones([1, 4, 4]), 3, 2, np.zeros([1, 4, 4]), True)
discounted_return = td3.get_discounted_return()
expected_return = np.array([
[5.8], [2.0], [17.8], [2.0]
], dtype=np.float)
assert np.allclose(discounted_return, expected_return)
def test_normalize_rewards(self):
# Test calculating mean and standard deviation
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 8, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 0, 16, np.zeros([1, 4, 4]))
td.normalize_rewards()
expected_reward = np.array([
[-0.8165], [-0.8165], [0.], [1.633],
], dtype=np.float)
assert np.allclose(td.get_reward(), expected_reward)
# Test specifying mean and standard deviation
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 8, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 0, 16, np.zeros([1, 4, 4]))
td.normalize_rewards(mean=8, sd=1)
expected_reward = np.array([
[-4.], [-4.], [0.], [8.],
], dtype=np.float)
assert np.allclose(td.get_reward(), expected_reward)
def test_normalize_boards(self):
# Test calculating mean and standard deviation
td = training_data.training_data()
td.add(np.full((1, 4, 4), 4), 1, 4, np.full((1, 4, 4), 8))
td.add(np.full((1, 4, 4), 8), 2, 4, np.full((1, 4, 4), 16))
td.add(np.full((1, 4, 4), 16), 3, 4, np.full((1, 4, 4), 32))
td.add(np.full((1, 4, 4), 32), 4, 4, np.full((1, 4, 4), 64))
td.normalize_boards()
mean = 15.
sd = 10.7238052947636
a = (4. - mean) / sd
b = (8. - mean) / sd
c = (16. - mean) / sd
d = (32. - mean) / sd
e = (64. - mean) / sd
expected_x = np.array([
[[a, a, a, a], [a, a, a, a], [a, a, a, a], [a, a, a, a]],
[[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]],
[[c, c, c, c], [c, c, c, c], [c, c, c, c], [c, c, c, c]],
[[d, d, d, d], [d, d, d, d], [d, d, d, d], [d, d, d, d]]
], dtype=np.float)
assert np.allclose(td.get_x(), expected_x)
expected_next_x = np.array([
[[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]],
[[c, c, c, c], [c, c, c, c], [c, c, c, c], [c, c, c, c]],
[[d, d, d, d], [d, d, d, d], [d, d, d, d], [d, d, d, d]],
[[e, e, e, e], [e, e, e, e], [e, e, e, e], [e, e, e, e]]
], dtype=np.float)
assert np.allclose(td.get_next_x(), expected_next_x)
def test_save_restore(self):
# Set up training data
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 1, 2, np.ones([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 3, 2, np.ones([1, 4, 4]))
temp_dir = tempfile.mkdtemp()
temp_filename = os.path.join(temp_dir, 'data.csv')
td.export_csv(temp_filename)
td2 = training_data.training_data()
td2.import_csv(temp_filename)
expected_x = np.array([
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[0],
[1],
[2],
[3]
], dtype=np.int)
expected_reward = np.array([
[4],
[2],
[16],
[2]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
], dtype=np.int)
assert np.array_equal(td2.get_x(), expected_x)
assert np.array_equal(td2.get_y_digit(), expected_y_digit)
assert np.allclose(td2.get_reward(), expected_reward)
assert np.array_equal(td2.get_next_x(), expected_next_x)
os.remove(temp_filename)
os.rmdir(temp_dir)
def test_shuffle(self):
td = training_data.training_data()
n = 5
for i in range(n):
# Use "is odd" for done
td.add(np.full((1, 4, 4), i), i, i, np.full((1, 4, 4), i), (i % 2) == 1)
td.shuffle()
for i in range(n):
# Find where this has been shuffled too
index_of_val = np.where(td.get_y_digit() == i)[0].item(0)
# Check that all parts of this equal i
arrays = td.get_n(index_of_val)
for a in arrays:
if a.dtype is np.dtype(np.bool):
assert((a == ((i % 2) == 1)).all())
else:
assert((a == i).all())
def test_make_boards_unique(self):
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 1, 2, np.ones([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 3, 2, np.ones([1, 4, 4]))
td.make_boards_unique()
expected_x = np.array([
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[0],
[1]
], dtype=np.int)
expected_reward = np.array([
[4],
[2]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
if __name__ == '__main__':
import pytest
pytest.main()
|
[
"pytest.approx",
"numpy.allclose",
"numpy.ones",
"os.path.join",
"pytest.main",
"training_data.training_data",
"numpy.array",
"os.rmdir",
"tempfile.mkdtemp",
"numpy.empty",
"numpy.zeros",
"numpy.full",
"numpy.dtype",
"os.remove"
] |
[((22956, 22969), 'pytest.main', 'pytest.main', ([], {}), '()\n', (22967, 22969), False, 'import pytest\n'), ((208, 237), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (235, 237), False, 'import training_data\n'), ((1140, 1169), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (1167, 1169), False, 'import training_data\n'), ((1377, 3983), 'numpy.array', 'np.array', (['[[[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0]]], [[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, \n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0]], [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 1, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]], [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, \n 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]], [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, \n 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]]]'], {'dtype': 'np.int'}), '([[[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0]]], [[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0]], [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]], [[[0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,\n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, \n 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]]], dtype\n =np.int)\n', (1385, 3983), True, 'import numpy as np\n'), ((4654, 4683), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (4681, 4683), False, 'import training_data\n'), ((4937, 5022), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]'], {'dtype': 'np.int'}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]], dtype=np.int\n )\n', (4945, 5022), True, 'import numpy as np\n'), ((5201, 5230), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (5228, 5230), False, 'import training_data\n'), ((5551, 5580), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (5578, 5580), False, 'import training_data\n'), ((5785, 5814), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (5812, 5814), False, 'import training_data\n'), ((6243, 6272), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (6270, 6272), False, 'import training_data\n'), ((6290, 6356), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (6298, 6356), True, 'import numpy as np\n'), ((6455, 6521), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [2, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [2, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (6463, 6521), True, 'import numpy as np\n'), ((6717, 6861), 'numpy.array', 'np.array', (['[[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [\n 0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0,\n 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (6725, 6861), True, 'import numpy as np\n'), ((6923, 6957), 'numpy.array', 'np.array', (['[[3], [2]]'], {'dtype': 'np.int'}), '([[3], [2]], dtype=np.int)\n', (6931, 6957), True, 'import numpy as np\n'), ((7022, 7058), 'numpy.array', 'np.array', (['[[2], [0]]'], {'dtype': 'np.float'}), '([[2], [0]], dtype=np.float)\n', (7030, 7058), True, 'import numpy as np\n'), ((7124, 7268), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 1, 1], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0,\n 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (7132, 7268), True, 'import numpy as np\n'), ((7586, 7615), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (7613, 7615), False, 'import training_data\n'), ((7633, 7699), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (7641, 7699), True, 'import numpy as np\n'), ((7798, 7864), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [2, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [2, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (7806, 7864), True, 'import numpy as np\n'), ((8062, 8206), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]]], dtype=np.int)\n', (8070, 8206), True, 'import numpy as np\n'), ((8268, 8302), 'numpy.array', 'np.array', (['[[0], [1]]'], {'dtype': 'np.int'}), '([[0], [1]], dtype=np.int)\n', (8276, 8302), True, 'import numpy as np\n'), ((8367, 8403), 'numpy.array', 'np.array', (['[[2], [0]]'], {'dtype': 'np.float'}), '([[2], [0]], dtype=np.float)\n', (8375, 8403), True, 'import numpy as np\n'), ((8469, 8613), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]]], dtype=np.int)\n', (8477, 8613), True, 'import numpy as np\n'), ((8935, 8964), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (8962, 8964), False, 'import training_data\n'), ((8989, 9055), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (8997, 9055), True, 'import numpy as np\n'), ((9179, 9245), 'numpy.array', 'np.array', (['[[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (9187, 9245), True, 'import numpy as np\n'), ((9459, 9972), 'numpy.array', 'np.array', (['[[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 1, 1], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 1], [0, 0, 0, 1],\n [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1],\n [0, 0, 0, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 0, 0]], [[0, 0, 0, 0],\n [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [1, 0, 0, 0],\n [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0,\n 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 1], [0, 0,\n 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0,\n 0, 1], [0, 0, 0, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0,\n 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 0, 0]], [[0, \n 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [1,\n 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (9467, 9972), True, 'import numpy as np\n'), ((10085, 10149), 'numpy.array', 'np.array', (['[[1], [3], [2], [0], [3], [1], [0], [2]]'], {'dtype': 'np.int'}), '([[1], [3], [2], [0], [3], [1], [0], [2]], dtype=np.int)\n', (10093, 10149), True, 'import numpy as np\n'), ((10286, 10352), 'numpy.array', 'np.array', (['[[4], [4], [4], [4], [4], [4], [4], [4]]'], {'dtype': 'np.float'}), '([[4], [4], [4], [4], [4], [4], [4], [4]], dtype=np.float)\n', (10294, 10352), True, 'import numpy as np\n'), ((10489, 11002), 'numpy.array', 'np.array', (['[[[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[2, 0, 0, 0], [\n 0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 2, 0],\n [0, 0, 0, 0], [0, 0, 0, 2]], [[0, 0, 0, 2], [0, 0, 0, 0], [0, 0, 2, 0],\n [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 2, 0], [2, 0, 0, 0]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 2]], [[2, 0, 0, 0],\n [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 2, 0, 0],\n [0, 0, 0, 0], [2, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[2, 0,\n 0, 0], [0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0,\n 2, 0], [0, 0, 0, 0], [0, 0, 0, 2]], [[0, 0, 0, 2], [0, 0, 0, 0], [0, 0,\n 2, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 2, 0], [2, 0,\n 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 2]], [[2, \n 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0,\n 2, 0, 0], [0, 0, 0, 0], [2, 0, 0, 0]]], dtype=np.int)\n', (10497, 11002), True, 'import numpy as np\n'), ((11575, 11604), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (11602, 11604), False, 'import training_data\n'), ((11682, 11711), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (11709, 11711), False, 'import training_data\n'), ((11818, 11962), 'numpy.array', 'np.array', (['[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (11826, 11962), True, 'import numpy as np\n'), ((12024, 12058), 'numpy.array', 'np.array', (['[[1], [2]]'], {'dtype': 'np.int'}), '([[1], [2]], dtype=np.int)\n', (12032, 12058), True, 'import numpy as np\n'), ((12123, 12160), 'numpy.array', 'np.array', (['[[16], [0]]'], {'dtype': 'np.float'}), '([[16], [0]], dtype=np.float)\n', (12131, 12160), True, 'import numpy as np\n'), ((12225, 12369), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [\n 1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1,\n 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]], dtype=np.int)\n', (12233, 12369), True, 'import numpy as np\n'), ((12689, 12718), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (12716, 12718), False, 'import training_data\n'), ((12796, 12825), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (12823, 12825), False, 'import training_data\n'), ((13449, 13478), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (13476, 13478), False, 'import training_data\n'), ((13979, 14008), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (14006, 14008), False, 'import training_data\n'), ((14209, 14238), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (14236, 14238), False, 'import training_data\n'), ((14668, 14730), 'numpy.array', 'np.array', (['[[0], [1], [2], [4], [6.2288], [11]]'], {'dtype': 'np.float'}), '([[0], [1], [2], [4], [6.2288], [11]], dtype=np.float)\n', (14676, 14730), True, 'import numpy as np\n'), ((14844, 14898), 'numpy.array', 'np.array', (['[[0], [1], [2], [3], [0], [1]]'], {'dtype': 'np.int'}), '([[0], [1], [2], [3], [0], [1]], dtype=np.int)\n', (14852, 14898), True, 'import numpy as np\n'), ((15074, 15103), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (15101, 15103), False, 'import training_data\n'), ((15508, 15568), 'numpy.array', 'np.array', (['[[20.218], [18.02], [17.8], [2.0]]'], {'dtype': 'np.float'}), '([[20.218], [18.02], [17.8], [2.0]], dtype=np.float)\n', (15516, 15568), True, 'import numpy as np\n'), ((15610, 15657), 'numpy.allclose', 'np.allclose', (['discounted_return', 'expected_return'], {}), '(discounted_return, expected_return)\n', (15621, 15657), True, 'import numpy as np\n'), ((15846, 15893), 'numpy.array', 'np.array', (['[[4], [2], [16], [2]]'], {'dtype': 'np.float'}), '([[4], [2], [16], [2]], dtype=np.float)\n', (15854, 15893), True, 'import numpy as np\n'), ((15935, 15982), 'numpy.allclose', 'np.allclose', (['discounted_return', 'expected_return'], {}), '(discounted_return, expected_return)\n', (15946, 15982), True, 'import numpy as np\n'), ((16028, 16057), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (16055, 16057), False, 'import training_data\n'), ((16419, 16474), 'numpy.array', 'np.array', (['[[5.8], [2.0], [17.8], [2.0]]'], {'dtype': 'np.float'}), '([[5.8], [2.0], [17.8], [2.0]], dtype=np.float)\n', (16427, 16474), True, 'import numpy as np\n'), ((16516, 16563), 'numpy.allclose', 'np.allclose', (['discounted_return', 'expected_return'], {}), '(discounted_return, expected_return)\n', (16527, 16563), True, 'import numpy as np\n'), ((16671, 16700), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (16698, 16700), False, 'import training_data\n'), ((17007, 17071), 'numpy.array', 'np.array', (['[[-0.8165], [-0.8165], [0.0], [1.633]]'], {'dtype': 'np.float'}), '([[-0.8165], [-0.8165], [0.0], [1.633]], dtype=np.float)\n', (17015, 17071), True, 'import numpy as np\n'), ((17226, 17255), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (17253, 17255), False, 'import training_data\n'), ((17574, 17630), 'numpy.array', 'np.array', (['[[-4.0], [-4.0], [0.0], [8.0]]'], {'dtype': 'np.float'}), '([[-4.0], [-4.0], [0.0], [8.0]], dtype=np.float)\n', (17582, 17630), True, 'import numpy as np\n'), ((17821, 17850), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (17848, 17850), False, 'import training_data\n'), ((18372, 18642), 'numpy.array', 'np.array', (['[[[a, a, a, a], [a, a, a, a], [a, a, a, a], [a, a, a, a]], [[b, b, b, b], [\n b, b, b, b], [b, b, b, b], [b, b, b, b]], [[c, c, c, c], [c, c, c, c],\n [c, c, c, c], [c, c, c, c]], [[d, d, d, d], [d, d, d, d], [d, d, d, d],\n [d, d, d, d]]]'], {'dtype': 'np.float'}), '([[[a, a, a, a], [a, a, a, a], [a, a, a, a], [a, a, a, a]], [[b, b,\n b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]], [[c, c, c, c], [c, c,\n c, c], [c, c, c, c], [c, c, c, c]], [[d, d, d, d], [d, d, d, d], [d, d,\n d, d], [d, d, d, d]]], dtype=np.float)\n', (18380, 18642), True, 'import numpy as np\n'), ((18770, 19040), 'numpy.array', 'np.array', (['[[[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]], [[c, c, c, c], [\n c, c, c, c], [c, c, c, c], [c, c, c, c]], [[d, d, d, d], [d, d, d, d],\n [d, d, d, d], [d, d, d, d]], [[e, e, e, e], [e, e, e, e], [e, e, e, e],\n [e, e, e, e]]]'], {'dtype': 'np.float'}), '([[[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]], [[c, c,\n c, c], [c, c, c, c], [c, c, c, c], [c, c, c, c]], [[d, d, d, d], [d, d,\n d, d], [d, d, d, d], [d, d, d, d]], [[e, e, e, e], [e, e, e, e], [e, e,\n e, e], [e, e, e, e]]], dtype=np.float)\n', (18778, 19040), True, 'import numpy as np\n'), ((19230, 19259), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (19257, 19259), False, 'import training_data\n'), ((19529, 19547), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (19545, 19547), False, 'import tempfile\n'), ((19572, 19606), 'os.path.join', 'os.path.join', (['temp_dir', '"""data.csv"""'], {}), "(temp_dir, 'data.csv')\n", (19584, 19606), False, 'import os\n'), ((19659, 19688), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (19686, 19688), False, 'import training_data\n'), ((19749, 20017), 'numpy.array', 'np.array', (['[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1],\n [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],\n [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1,\n 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0,\n 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (19757, 20017), True, 'import numpy as np\n'), ((20095, 20139), 'numpy.array', 'np.array', (['[[0], [1], [2], [3]]'], {'dtype': 'np.int'}), '([[0], [1], [2], [3]], dtype=np.int)\n', (20103, 20139), True, 'import numpy as np\n'), ((20228, 20275), 'numpy.array', 'np.array', (['[[4], [2], [16], [2]]'], {'dtype': 'np.float'}), '([[4], [2], [16], [2]], dtype=np.float)\n', (20236, 20275), True, 'import numpy as np\n'), ((20364, 20632), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [\n 1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0],\n [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1],\n [1, 1, 1, 1]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1,\n 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0,\n 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1,\n 1, 1], [1, 1, 1, 1]]], dtype=np.int)\n', (20372, 20632), True, 'import numpy as np\n'), ((20940, 20964), 'os.remove', 'os.remove', (['temp_filename'], {}), '(temp_filename)\n', (20949, 20964), False, 'import os\n'), ((20973, 20991), 'os.rmdir', 'os.rmdir', (['temp_dir'], {}), '(temp_dir)\n', (20981, 20991), False, 'import os\n'), ((21034, 21063), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (21061, 21063), False, 'import training_data\n'), ((21744, 21773), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (21771, 21773), False, 'import training_data\n'), ((22076, 22220), 'numpy.array', 'np.array', (['[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (22084, 22220), True, 'import numpy as np\n'), ((22282, 22316), 'numpy.array', 'np.array', (['[[0], [1]]'], {'dtype': 'np.int'}), '([[0], [1]], dtype=np.int)\n', (22290, 22316), True, 'import numpy as np\n'), ((22381, 22417), 'numpy.array', 'np.array', (['[[4], [2]]'], {'dtype': 'np.float'}), '([[4], [2]], dtype=np.float)\n', (22389, 22417), True, 'import numpy as np\n'), ((22482, 22626), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [\n 1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1,\n 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]], dtype=np.int)\n', (22490, 22626), True, 'import numpy as np\n'), ((280, 313), 'numpy.empty', 'np.empty', (['[0, 4, 4]'], {'dtype': 'np.int'}), '([0, 4, 4], dtype=np.int)\n', (288, 313), True, 'import numpy as np\n'), ((363, 393), 'numpy.empty', 'np.empty', (['[0, 1]'], {'dtype': 'np.int'}), '([0, 1], dtype=np.int)\n', (371, 393), True, 'import numpy as np\n'), ((439, 471), 'numpy.empty', 'np.empty', (['[0, 1]'], {'dtype': 'np.float'}), '([0, 1], dtype=np.float)\n', (447, 471), True, 'import numpy as np\n'), ((520, 553), 'numpy.empty', 'np.empty', (['[0, 4, 4]'], {'dtype': 'np.int'}), '([0, 4, 4], dtype=np.int)\n', (528, 553), True, 'import numpy as np\n'), ((600, 631), 'numpy.empty', 'np.empty', (['[0, 1]'], {'dtype': 'np.bool'}), '([0, 1], dtype=np.bool)\n', (608, 631), True, 'import numpy as np\n'), ((648, 666), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (655, 666), True, 'import numpy as np\n'), ((674, 693), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (682, 693), True, 'import numpy as np\n'), ((743, 775), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {'dtype': 'np.int'}), '([1, 4, 4], dtype=np.int)\n', (750, 775), True, 'import numpy as np\n'), ((825, 854), 'numpy.array', 'np.array', (['[[1]]'], {'dtype': 'np.int'}), '([[1]], dtype=np.int)\n', (833, 854), True, 'import numpy as np\n'), ((900, 931), 'numpy.array', 'np.array', (['[[4]]'], {'dtype': 'np.float'}), '([[4]], dtype=np.float)\n', (908, 931), True, 'import numpy as np\n'), ((980, 1013), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {'dtype': 'np.int'}), '([1, 4, 4], dtype=np.int)\n', (988, 1013), True, 'import numpy as np\n'), ((1060, 1090), 'numpy.array', 'np.array', (['[[1]]'], {'dtype': 'np.bool'}), '([[1]], dtype=np.bool)\n', (1068, 1090), True, 'import numpy as np\n'), ((1185, 1203), 'numpy.full', 'np.full', (['[4, 4]', '(2)'], {}), '([4, 4], 2)\n', (1192, 1203), True, 'import numpy as np\n'), ((1211, 1227), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (1219, 1227), True, 'import numpy as np\n'), ((1244, 1262), 'numpy.full', 'np.full', (['[4, 4]', '(8)'], {}), '([4, 4], 8)\n', (1251, 1262), True, 'import numpy as np\n'), ((1270, 1285), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (1277, 1285), True, 'import numpy as np\n'), ((1302, 1323), 'numpy.full', 'np.full', (['[4, 4]', '(2048)'], {}), '([4, 4], 2048)\n', (1309, 1323), True, 'import numpy as np\n'), ((1331, 1346), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (1338, 1346), True, 'import numpy as np\n'), ((4699, 4714), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (4706, 4714), True, 'import numpy as np\n'), ((4722, 4738), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (4730, 4738), True, 'import numpy as np\n'), ((4755, 4771), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (4763, 4771), True, 'import numpy as np\n'), ((4779, 4794), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (4786, 4794), True, 'import numpy as np\n'), ((4811, 4827), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (4819, 4827), True, 'import numpy as np\n'), ((4835, 4850), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (4842, 4850), True, 'import numpy as np\n'), ((4867, 4883), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (4875, 4883), True, 'import numpy as np\n'), ((4891, 4906), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (4898, 4906), True, 'import numpy as np\n'), ((5246, 5261), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5253, 5261), True, 'import numpy as np\n'), ((5269, 5285), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5277, 5285), True, 'import numpy as np\n'), ((5302, 5318), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5310, 5318), True, 'import numpy as np\n'), ((5326, 5341), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5333, 5341), True, 'import numpy as np\n'), ((5358, 5374), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5366, 5374), True, 'import numpy as np\n'), ((5383, 5398), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5390, 5398), True, 'import numpy as np\n'), ((5415, 5431), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5423, 5431), True, 'import numpy as np\n'), ((5440, 5455), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5447, 5455), True, 'import numpy as np\n'), ((5596, 5614), 'numpy.full', 'np.full', (['(4, 4)', '(1)'], {}), '((4, 4), 1)\n', (5603, 5614), True, 'import numpy as np\n'), ((5622, 5640), 'numpy.full', 'np.full', (['(4, 4)', '(2)'], {}), '((4, 4), 2)\n', (5629, 5640), True, 'import numpy as np\n'), ((5657, 5675), 'numpy.full', 'np.full', (['(4, 4)', '(2)'], {}), '((4, 4), 2)\n', (5664, 5675), True, 'import numpy as np\n'), ((5683, 5701), 'numpy.full', 'np.full', (['(4, 4)', '(4)'], {}), '((4, 4), 4)\n', (5690, 5701), True, 'import numpy as np\n'), ((5830, 5845), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5837, 5845), True, 'import numpy as np\n'), ((5853, 5869), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5861, 5869), True, 'import numpy as np\n'), ((5886, 5902), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5894, 5902), True, 'import numpy as np\n'), ((5910, 5925), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5917, 5925), True, 'import numpy as np\n'), ((6028, 6058), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {'dtype': 'np.int'}), '([4, 4], dtype=np.int)\n', (6036, 6058), True, 'import numpy as np\n'), ((6112, 6130), 'pytest.approx', 'pytest.approx', (['(8.0)'], {}), '(8.0)\n', (6125, 6130), False, 'import pytest\n'), ((6172, 6201), 'numpy.ones', 'np.ones', (['[4, 4]'], {'dtype': 'np.int'}), '([4, 4], dtype=np.int)\n', (6179, 6201), True, 'import numpy as np\n'), ((11620, 11638), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (11627, 11638), True, 'import numpy as np\n'), ((11647, 11666), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (11655, 11666), True, 'import numpy as np\n'), ((11728, 11747), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (11736, 11747), True, 'import numpy as np\n'), ((11755, 11773), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (11762, 11773), True, 'import numpy as np\n'), ((12734, 12752), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (12741, 12752), True, 'import numpy as np\n'), ((12761, 12780), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (12769, 12780), True, 'import numpy as np\n'), ((12842, 12861), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (12850, 12861), True, 'import numpy as np\n'), ((12869, 12887), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (12876, 12887), True, 'import numpy as np\n'), ((12978, 12996), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (12985, 12996), True, 'import numpy as np\n'), ((13152, 13171), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13160, 13171), True, 'import numpy as np\n'), ((13214, 13233), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13222, 13233), True, 'import numpy as np\n'), ((13388, 13406), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13395, 13406), True, 'import numpy as np\n'), ((13494, 13513), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13502, 13513), True, 'import numpy as np\n'), ((13521, 13540), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13529, 13540), True, 'import numpy as np\n'), ((13557, 13575), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13564, 13575), True, 'import numpy as np\n'), ((13583, 13601), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13590, 13601), True, 'import numpy as np\n'), ((14054, 14072), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14061, 14072), True, 'import numpy as np\n'), ((14080, 14099), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14088, 14099), True, 'import numpy as np\n'), ((14254, 14272), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14261, 14272), True, 'import numpy as np\n'), ((14280, 14299), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14288, 14299), True, 'import numpy as np\n'), ((14316, 14334), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14323, 14334), True, 'import numpy as np\n'), ((14342, 14361), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14350, 14361), True, 'import numpy as np\n'), ((14378, 14396), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14385, 14396), True, 'import numpy as np\n'), ((14404, 14423), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14412, 14423), True, 'import numpy as np\n'), ((14440, 14458), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14447, 14458), True, 'import numpy as np\n'), ((14467, 14486), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14475, 14486), True, 'import numpy as np\n'), ((14503, 14521), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14510, 14521), True, 'import numpy as np\n'), ((14530, 14549), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14538, 14549), True, 'import numpy as np\n'), ((14566, 14584), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14573, 14584), True, 'import numpy as np\n'), ((14595, 14614), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14603, 14614), True, 'import numpy as np\n'), ((15119, 15137), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15126, 15137), True, 'import numpy as np\n'), ((15145, 15164), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15153, 15164), True, 'import numpy as np\n'), ((15181, 15199), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15188, 15199), True, 'import numpy as np\n'), ((15207, 15226), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15215, 15226), True, 'import numpy as np\n'), ((15243, 15261), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15250, 15261), True, 'import numpy as np\n'), ((15270, 15289), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15278, 15289), True, 'import numpy as np\n'), ((15306, 15324), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15313, 15324), True, 'import numpy as np\n'), ((15332, 15351), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15340, 15351), True, 'import numpy as np\n'), ((16074, 16092), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16081, 16092), True, 'import numpy as np\n'), ((16100, 16119), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16108, 16119), True, 'import numpy as np\n'), ((16144, 16162), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16151, 16162), True, 'import numpy as np\n'), ((16170, 16189), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16178, 16189), True, 'import numpy as np\n'), ((16213, 16231), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16220, 16231), True, 'import numpy as np\n'), ((16240, 16259), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16248, 16259), True, 'import numpy as np\n'), ((16284, 16302), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16291, 16302), True, 'import numpy as np\n'), ((16310, 16329), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16318, 16329), True, 'import numpy as np\n'), ((16716, 16734), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16723, 16734), True, 'import numpy as np\n'), ((16742, 16761), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16750, 16761), True, 'import numpy as np\n'), ((16778, 16796), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16785, 16796), True, 'import numpy as np\n'), ((16804, 16823), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16812, 16823), True, 'import numpy as np\n'), ((16840, 16858), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16847, 16858), True, 'import numpy as np\n'), ((16866, 16885), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16874, 16885), True, 'import numpy as np\n'), ((16902, 16920), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16909, 16920), True, 'import numpy as np\n'), ((16929, 16948), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16937, 16948), True, 'import numpy as np\n'), ((17271, 17289), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17278, 17289), True, 'import numpy as np\n'), ((17297, 17316), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17305, 17316), True, 'import numpy as np\n'), ((17333, 17351), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17340, 17351), True, 'import numpy as np\n'), ((17359, 17378), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17367, 17378), True, 'import numpy as np\n'), ((17395, 17413), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17402, 17413), True, 'import numpy as np\n'), ((17421, 17440), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17429, 17440), True, 'import numpy as np\n'), ((17457, 17475), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17464, 17475), True, 'import numpy as np\n'), ((17484, 17503), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17492, 17503), True, 'import numpy as np\n'), ((17866, 17887), 'numpy.full', 'np.full', (['(1, 4, 4)', '(4)'], {}), '((1, 4, 4), 4)\n', (17873, 17887), True, 'import numpy as np\n'), ((17895, 17916), 'numpy.full', 'np.full', (['(1, 4, 4)', '(8)'], {}), '((1, 4, 4), 8)\n', (17902, 17916), True, 'import numpy as np\n'), ((17933, 17954), 'numpy.full', 'np.full', (['(1, 4, 4)', '(8)'], {}), '((1, 4, 4), 8)\n', (17940, 17954), True, 'import numpy as np\n'), ((17962, 17984), 'numpy.full', 'np.full', (['(1, 4, 4)', '(16)'], {}), '((1, 4, 4), 16)\n', (17969, 17984), True, 'import numpy as np\n'), ((18001, 18023), 'numpy.full', 'np.full', (['(1, 4, 4)', '(16)'], {}), '((1, 4, 4), 16)\n', (18008, 18023), True, 'import numpy as np\n'), ((18031, 18053), 'numpy.full', 'np.full', (['(1, 4, 4)', '(32)'], {}), '((1, 4, 4), 32)\n', (18038, 18053), True, 'import numpy as np\n'), ((18070, 18092), 'numpy.full', 'np.full', (['(1, 4, 4)', '(32)'], {}), '((1, 4, 4), 32)\n', (18077, 18092), True, 'import numpy as np\n'), ((18100, 18122), 'numpy.full', 'np.full', (['(1, 4, 4)', '(64)'], {}), '((1, 4, 4), 64)\n', (18107, 18122), True, 'import numpy as np\n'), ((19275, 19293), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19282, 19293), True, 'import numpy as np\n'), ((19301, 19320), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19309, 19320), True, 'import numpy as np\n'), ((19337, 19356), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19345, 19356), True, 'import numpy as np\n'), ((19364, 19382), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19371, 19382), True, 'import numpy as np\n'), ((19399, 19417), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19406, 19417), True, 'import numpy as np\n'), ((19426, 19445), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19434, 19445), True, 'import numpy as np\n'), ((19462, 19481), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19470, 19481), True, 'import numpy as np\n'), ((19489, 19507), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19496, 19507), True, 'import numpy as np\n'), ((21789, 21807), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21796, 21807), True, 'import numpy as np\n'), ((21815, 21834), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21823, 21834), True, 'import numpy as np\n'), ((21851, 21870), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21859, 21870), True, 'import numpy as np\n'), ((21878, 21896), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21885, 21896), True, 'import numpy as np\n'), ((21913, 21931), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21920, 21931), True, 'import numpy as np\n'), ((21940, 21959), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21948, 21959), True, 'import numpy as np\n'), ((21976, 21995), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21984, 21995), True, 'import numpy as np\n'), ((22003, 22021), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (22010, 22021), True, 'import numpy as np\n'), ((13811, 13830), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13819, 13830), True, 'import numpy as np\n'), ((13920, 13938), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13927, 13938), True, 'import numpy as np\n'), ((21160, 21181), 'numpy.full', 'np.full', (['(1, 4, 4)', 'i'], {}), '((1, 4, 4), i)\n', (21167, 21181), True, 'import numpy as np\n'), ((21189, 21210), 'numpy.full', 'np.full', (['(1, 4, 4)', 'i'], {}), '((1, 4, 4), i)\n', (21196, 21210), True, 'import numpy as np\n'), ((21551, 21568), 'numpy.dtype', 'np.dtype', (['np.bool'], {}), '(np.bool)\n', (21559, 21568), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.