body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
f0c4de5b1bbf8b36bdcbd18b95ab18f5bb667284594f92a4f2903931aecc081d | def load_keypoints(file_path):
'\n Load keypoints from a specific file as tuples\n\n Parameters\n ----------\n file_path : str\n path to the file with keypoints\n\n Returns\n -------\n keypoints : list of tuples\n list of keypoint tuples in format (x, y, obj_class)\n\n Note\n ----\n This function serves as helper for the pointdet.utils.dataset.PointsDataset class\n and probably should be moved there\n '
keypoints = []
with open(file_path, 'r') as labels_file:
for line in labels_file:
line_contents = line.strip().split(' ')
line_floated = tuple((int(float(x)) for x in line_contents))
(x_center, y_center, obj_class) = tuple(line_floated)
if (obj_class == 2):
obj_class = 0
keypoint = (x_center, y_center, obj_class)
keypoints.append(keypoint)
return keypoints | Load keypoints from a specific file as tuples
Parameters
----------
file_path : str
path to the file with keypoints
Returns
-------
keypoints : list of tuples
list of keypoint tuples in format (x, y, obj_class)
Note
----
This function serves as helper for the pointdet.utils.dataset.PointsDataset class
and probably should be moved there | contextnet/utils/utils.py | load_keypoints | ushakovegor/ContextNetwork | 0 | python | def load_keypoints(file_path):
'\n Load keypoints from a specific file as tuples\n\n Parameters\n ----------\n file_path : str\n path to the file with keypoints\n\n Returns\n -------\n keypoints : list of tuples\n list of keypoint tuples in format (x, y, obj_class)\n\n Note\n ----\n This function serves as helper for the pointdet.utils.dataset.PointsDataset class\n and probably should be moved there\n '
keypoints = []
with open(file_path, 'r') as labels_file:
for line in labels_file:
line_contents = line.strip().split(' ')
line_floated = tuple((int(float(x)) for x in line_contents))
(x_center, y_center, obj_class) = tuple(line_floated)
if (obj_class == 2):
obj_class = 0
keypoint = (x_center, y_center, obj_class)
keypoints.append(keypoint)
return keypoints | def load_keypoints(file_path):
'\n Load keypoints from a specific file as tuples\n\n Parameters\n ----------\n file_path : str\n path to the file with keypoints\n\n Returns\n -------\n keypoints : list of tuples\n list of keypoint tuples in format (x, y, obj_class)\n\n Note\n ----\n This function serves as helper for the pointdet.utils.dataset.PointsDataset class\n and probably should be moved there\n '
keypoints = []
with open(file_path, 'r') as labels_file:
for line in labels_file:
line_contents = line.strip().split(' ')
line_floated = tuple((int(float(x)) for x in line_contents))
(x_center, y_center, obj_class) = tuple(line_floated)
if (obj_class == 2):
obj_class = 0
keypoint = (x_center, y_center, obj_class)
keypoints.append(keypoint)
return keypoints<|docstring|>Load keypoints from a specific file as tuples
Parameters
----------
file_path : str
path to the file with keypoints
Returns
-------
keypoints : list of tuples
list of keypoint tuples in format (x, y, obj_class)
Note
----
This function serves as helper for the pointdet.utils.dataset.PointsDataset class
and probably should be moved there<|endoftext|> |
7ffb869d7eb856815ccd5f71c0d09015a01347e87f26c1aa9ca2e100de8b092a | def parse_master_yaml(yaml_path):
'\n Imports master yaml and converts paths to make the usable from inside the script\n\n Parameters\n ----------\n yaml_path : str\n path to master yaml from the script\n\n Returns\n -------\n lists : dict of list of str\n dict with lists pf converted paths\n '
with open(yaml_path, 'r') as file:
lists = yaml.safe_load(file)
for (list_type, paths_list) in lists.items():
new_paths_list = []
for path in paths_list:
new_path = os.path.join(os.path.dirname(yaml_path), path)
new_path = os.path.normpath(new_path)
new_paths_list.append(new_path)
lists[list_type] = new_paths_list
return lists | Imports master yaml and converts paths to make the usable from inside the script
Parameters
----------
yaml_path : str
path to master yaml from the script
Returns
-------
lists : dict of list of str
dict with lists pf converted paths | contextnet/utils/utils.py | parse_master_yaml | ushakovegor/ContextNetwork | 0 | python | def parse_master_yaml(yaml_path):
'\n Imports master yaml and converts paths to make the usable from inside the script\n\n Parameters\n ----------\n yaml_path : str\n path to master yaml from the script\n\n Returns\n -------\n lists : dict of list of str\n dict with lists pf converted paths\n '
with open(yaml_path, 'r') as file:
lists = yaml.safe_load(file)
for (list_type, paths_list) in lists.items():
new_paths_list = []
for path in paths_list:
new_path = os.path.join(os.path.dirname(yaml_path), path)
new_path = os.path.normpath(new_path)
new_paths_list.append(new_path)
lists[list_type] = new_paths_list
return lists | def parse_master_yaml(yaml_path):
'\n Imports master yaml and converts paths to make the usable from inside the script\n\n Parameters\n ----------\n yaml_path : str\n path to master yaml from the script\n\n Returns\n -------\n lists : dict of list of str\n dict with lists pf converted paths\n '
with open(yaml_path, 'r') as file:
lists = yaml.safe_load(file)
for (list_type, paths_list) in lists.items():
new_paths_list = []
for path in paths_list:
new_path = os.path.join(os.path.dirname(yaml_path), path)
new_path = os.path.normpath(new_path)
new_paths_list.append(new_path)
lists[list_type] = new_paths_list
return lists<|docstring|>Imports master yaml and converts paths to make the usable from inside the script
Parameters
----------
yaml_path : str
path to master yaml from the script
Returns
-------
lists : dict of list of str
dict with lists pf converted paths<|endoftext|> |
406a98c0a8755fd68c5e292c0f71174f4df3e64d11a37d9fb374e703c0c9a590 | def compute_distances_no_loops(Y, X):
'\n Compute the distance between each test point in X and each training point\n in self.X_train using no explicit loops.\n\n Input / Output: Same as compute_distances_two_loops\n '
dists = np.zeros((Y.shape[0], X.shape[0]))
dists -= ((2 * X) @ Y.T)
dists += np.sum((Y ** 2), axis=1)
dists = (dists.T + np.sum((X ** 2), axis=1))
dists = dists.T
return np.sqrt(dists) | Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops | contextnet/utils/utils.py | compute_distances_no_loops | ushakovegor/ContextNetwork | 0 | python | def compute_distances_no_loops(Y, X):
'\n Compute the distance between each test point in X and each training point\n in self.X_train using no explicit loops.\n\n Input / Output: Same as compute_distances_two_loops\n '
dists = np.zeros((Y.shape[0], X.shape[0]))
dists -= ((2 * X) @ Y.T)
dists += np.sum((Y ** 2), axis=1)
dists = (dists.T + np.sum((X ** 2), axis=1))
dists = dists.T
return np.sqrt(dists) | def compute_distances_no_loops(Y, X):
'\n Compute the distance between each test point in X and each training point\n in self.X_train using no explicit loops.\n\n Input / Output: Same as compute_distances_two_loops\n '
dists = np.zeros((Y.shape[0], X.shape[0]))
dists -= ((2 * X) @ Y.T)
dists += np.sum((Y ** 2), axis=1)
dists = (dists.T + np.sum((X ** 2), axis=1))
dists = dists.T
return np.sqrt(dists)<|docstring|>Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops<|endoftext|> |
2f5b5288e19cc0f3123c05c9a72b089e93ebd3bccf99651fc5ac2cb448764b75 | def measure(self, object1, object2):
'\n Returns the measure value between two objects\n '
return 0 | Returns the measure value between two objects | contextnet/utils/utils.py | measure | ushakovegor/ContextNetwork | 0 | python | def measure(self, object1, object2):
'\n \n '
return 0 | def measure(self, object1, object2):
'\n \n '
return 0<|docstring|>Returns the measure value between two objects<|endoftext|> |
be53767f60000534d3f848e3acafbedf8c66b146fd92a406d9ba5af92e4e8786 | def matrix(self, container1, container2):
'\n Returns the matrix of measure values between two sets of objects\n Sometimes can be implemented in a faster way than making couplewise measurements\n '
matrix = np.zeros((len(container1), len(container2)))
for (i, object1) in enumerate(container1):
for (j, object2) in enumerate(container2):
matrix[(i, j)] = self.measure(object1, object2)
return matrix | Returns the matrix of measure values between two sets of objects
Sometimes can be implemented in a faster way than making couplewise measurements | contextnet/utils/utils.py | matrix | ushakovegor/ContextNetwork | 0 | python | def matrix(self, container1, container2):
'\n Returns the matrix of measure values between two sets of objects\n Sometimes can be implemented in a faster way than making couplewise measurements\n '
matrix = np.zeros((len(container1), len(container2)))
for (i, object1) in enumerate(container1):
for (j, object2) in enumerate(container2):
matrix[(i, j)] = self.measure(object1, object2)
return matrix | def matrix(self, container1, container2):
'\n Returns the matrix of measure values between two sets of objects\n Sometimes can be implemented in a faster way than making couplewise measurements\n '
matrix = np.zeros((len(container1), len(container2)))
for (i, object1) in enumerate(container1):
for (j, object2) in enumerate(container2):
matrix[(i, j)] = self.measure(object1, object2)
return matrix<|docstring|>Returns the matrix of measure values between two sets of objects
Sometimes can be implemented in a faster way than making couplewise measurements<|endoftext|> |
41b9ac6e745e94aea266cf5ad488492adb61e200d7a7b89efb7399222547a608 | def pixel_histogram(img, nbits=None, ax=None, log_scale=True):
'\n Plot pixel value histogram.\n\n Parameters\n ----------\n img : py:class:`~numpy.ndarray`\n 2D or 3D image.\n nbits : int, optional\n Bit-depth of camera data.\n ax : :py:class:`~matplotlib.axes.Axes`, optional\n `Axes` object to fill, default is to create one.\n log_scale : bool, optional\n Whether to use log scale in counting number of pixels.\n\n Return\n ------\n ax : :py:class:`~matplotlib.axes.Axes`\n '
if (ax is None):
(_, ax) = plt.subplots()
if nbits:
max_val = ((2 ** nbits) - 1)
else:
max_val = int(img.max())
if (len(img.shape) == 3):
color_order = ('r', 'g', 'b')
for (i, col) in enumerate(color_order):
(hist, bins) = np.histogram(img[(:, :, i)].ravel(), bins=max_val, range=[0, (max_val + 1)])
ax.plot(hist, color=col)
else:
vals = img.flatten()
(hist, bins) = np.histogram(vals, bins=max_val, range=[0, (max_val + 1)])
ax.plot(hist, color='gray')
ax.set_xlim([(max_val - (1.1 * max_val)), (max_val * 1.1)])
if log_scale:
ax.set_yscale('log')
ax.set_xlabel('Pixel value')
ax.grid()
return ax | Plot pixel value histogram.
Parameters
----------
img : py:class:`~numpy.ndarray`
2D or 3D image.
nbits : int, optional
Bit-depth of camera data.
ax : :py:class:`~matplotlib.axes.Axes`, optional
`Axes` object to fill, default is to create one.
log_scale : bool, optional
Whether to use log scale in counting number of pixels.
Return
------
ax : :py:class:`~matplotlib.axes.Axes` | DiffuserCam/diffcam/plot.py | pixel_histogram | WilliamCappelletti/diffusercam-project | 0 | python | def pixel_histogram(img, nbits=None, ax=None, log_scale=True):
'\n Plot pixel value histogram.\n\n Parameters\n ----------\n img : py:class:`~numpy.ndarray`\n 2D or 3D image.\n nbits : int, optional\n Bit-depth of camera data.\n ax : :py:class:`~matplotlib.axes.Axes`, optional\n `Axes` object to fill, default is to create one.\n log_scale : bool, optional\n Whether to use log scale in counting number of pixels.\n\n Return\n ------\n ax : :py:class:`~matplotlib.axes.Axes`\n '
if (ax is None):
(_, ax) = plt.subplots()
if nbits:
max_val = ((2 ** nbits) - 1)
else:
max_val = int(img.max())
if (len(img.shape) == 3):
color_order = ('r', 'g', 'b')
for (i, col) in enumerate(color_order):
(hist, bins) = np.histogram(img[(:, :, i)].ravel(), bins=max_val, range=[0, (max_val + 1)])
ax.plot(hist, color=col)
else:
vals = img.flatten()
(hist, bins) = np.histogram(vals, bins=max_val, range=[0, (max_val + 1)])
ax.plot(hist, color='gray')
ax.set_xlim([(max_val - (1.1 * max_val)), (max_val * 1.1)])
if log_scale:
ax.set_yscale('log')
ax.set_xlabel('Pixel value')
ax.grid()
return ax | def pixel_histogram(img, nbits=None, ax=None, log_scale=True):
'\n Plot pixel value histogram.\n\n Parameters\n ----------\n img : py:class:`~numpy.ndarray`\n 2D or 3D image.\n nbits : int, optional\n Bit-depth of camera data.\n ax : :py:class:`~matplotlib.axes.Axes`, optional\n `Axes` object to fill, default is to create one.\n log_scale : bool, optional\n Whether to use log scale in counting number of pixels.\n\n Return\n ------\n ax : :py:class:`~matplotlib.axes.Axes`\n '
if (ax is None):
(_, ax) = plt.subplots()
if nbits:
max_val = ((2 ** nbits) - 1)
else:
max_val = int(img.max())
if (len(img.shape) == 3):
color_order = ('r', 'g', 'b')
for (i, col) in enumerate(color_order):
(hist, bins) = np.histogram(img[(:, :, i)].ravel(), bins=max_val, range=[0, (max_val + 1)])
ax.plot(hist, color=col)
else:
vals = img.flatten()
(hist, bins) = np.histogram(vals, bins=max_val, range=[0, (max_val + 1)])
ax.plot(hist, color='gray')
ax.set_xlim([(max_val - (1.1 * max_val)), (max_val * 1.1)])
if log_scale:
ax.set_yscale('log')
ax.set_xlabel('Pixel value')
ax.grid()
return ax<|docstring|>Plot pixel value histogram.
Parameters
----------
img : py:class:`~numpy.ndarray`
2D or 3D image.
nbits : int, optional
Bit-depth of camera data.
ax : :py:class:`~matplotlib.axes.Axes`, optional
`Axes` object to fill, default is to create one.
log_scale : bool, optional
Whether to use log scale in counting number of pixels.
Return
------
ax : :py:class:`~matplotlib.axes.Axes`<|endoftext|> |
4ebe2b22f624a50525973beaca2e62fff69c7aaae783aae85f074c8be89ad500 | def plot_cross_section(vals, idx=None, ax=None, dB=True, plot_db_drop=3, min_val=0.0001, max_val=None, plot_width=None, **kwargs):
'\n Plot cross-section of a 2-D image.\n\n Parameters\n ----------\n vals : py:class:`~numpy.ndarray`\n 2-D image data.\n idx : int, optional\n Row for which to plot cross-section. Default is to take middle.\n ax : :py:class:`~matplotlib.axes.Axes`, optional\n `Axes` object to fill, default is to create one.\n dB : bool, optional\n Whether to plot in dB scale.\n\n Return\n ------\n ax : :py:class:`~matplotlib.axes.Axes`\n '
if (ax is None):
(_, ax) = plt.subplots()
if (idx is None):
max_idx = np.unravel_index(np.argmax(vals, axis=None), vals.shape)
idx = max_idx[0]
cross_section = vals[(idx, :)].astype(np.float32)
if (max_val is None):
max_val = cross_section.max()
cross_section /= max_val
min_val = max(min_val, cross_section.min())
if dB:
cross_section[(cross_section < min_val)] = min_val
cross_section = (10 * np.log10(cross_section))
min_val = (10 * np.log10(min_val))
ax.set_ylabel('dB')
x_vals = np.arange(len(cross_section))
x_vals -= np.argmax(cross_section)
ax.plot(x_vals, cross_section, **kwargs)
ax.set_ylim([min_val, 0])
if (plot_width is not None):
half_width = ((plot_width // 2) + 1)
ax.set_xlim([(- half_width), half_width])
ax.grid()
if (dB and plot_db_drop):
cross_section -= np.max(cross_section)
zero_crossings = np.where(np.diff(np.signbit((cross_section + plot_db_drop))))[0]
if (len(zero_crossings) >= 2):
zero_crossings -= np.argmax(cross_section)
width = (zero_crossings[(- 1)] - zero_crossings[0])
ax.set_title(f'-{plot_db_drop}dB width = {width}')
ax.axvline(x=zero_crossings[0], c='k', linestyle='--')
ax.axvline(x=zero_crossings[(- 1)], c='k', linestyle='--')
else:
warnings.warn('Width could not be determined. Did not detect two -{} points : {}'.format(plot_db_drop, zero_crossings))
return (ax, cross_section) | Plot cross-section of a 2-D image.
Parameters
----------
vals : py:class:`~numpy.ndarray`
2-D image data.
idx : int, optional
Row for which to plot cross-section. Default is to take middle.
ax : :py:class:`~matplotlib.axes.Axes`, optional
`Axes` object to fill, default is to create one.
dB : bool, optional
Whether to plot in dB scale.
Return
------
ax : :py:class:`~matplotlib.axes.Axes` | DiffuserCam/diffcam/plot.py | plot_cross_section | WilliamCappelletti/diffusercam-project | 0 | python | def plot_cross_section(vals, idx=None, ax=None, dB=True, plot_db_drop=3, min_val=0.0001, max_val=None, plot_width=None, **kwargs):
'\n Plot cross-section of a 2-D image.\n\n Parameters\n ----------\n vals : py:class:`~numpy.ndarray`\n 2-D image data.\n idx : int, optional\n Row for which to plot cross-section. Default is to take middle.\n ax : :py:class:`~matplotlib.axes.Axes`, optional\n `Axes` object to fill, default is to create one.\n dB : bool, optional\n Whether to plot in dB scale.\n\n Return\n ------\n ax : :py:class:`~matplotlib.axes.Axes`\n '
if (ax is None):
(_, ax) = plt.subplots()
if (idx is None):
max_idx = np.unravel_index(np.argmax(vals, axis=None), vals.shape)
idx = max_idx[0]
cross_section = vals[(idx, :)].astype(np.float32)
if (max_val is None):
max_val = cross_section.max()
cross_section /= max_val
min_val = max(min_val, cross_section.min())
if dB:
cross_section[(cross_section < min_val)] = min_val
cross_section = (10 * np.log10(cross_section))
min_val = (10 * np.log10(min_val))
ax.set_ylabel('dB')
x_vals = np.arange(len(cross_section))
x_vals -= np.argmax(cross_section)
ax.plot(x_vals, cross_section, **kwargs)
ax.set_ylim([min_val, 0])
if (plot_width is not None):
half_width = ((plot_width // 2) + 1)
ax.set_xlim([(- half_width), half_width])
ax.grid()
if (dB and plot_db_drop):
cross_section -= np.max(cross_section)
zero_crossings = np.where(np.diff(np.signbit((cross_section + plot_db_drop))))[0]
if (len(zero_crossings) >= 2):
zero_crossings -= np.argmax(cross_section)
width = (zero_crossings[(- 1)] - zero_crossings[0])
ax.set_title(f'-{plot_db_drop}dB width = {width}')
ax.axvline(x=zero_crossings[0], c='k', linestyle='--')
ax.axvline(x=zero_crossings[(- 1)], c='k', linestyle='--')
else:
warnings.warn('Width could not be determined. Did not detect two -{} points : {}'.format(plot_db_drop, zero_crossings))
return (ax, cross_section) | def plot_cross_section(vals, idx=None, ax=None, dB=True, plot_db_drop=3, min_val=0.0001, max_val=None, plot_width=None, **kwargs):
'\n Plot cross-section of a 2-D image.\n\n Parameters\n ----------\n vals : py:class:`~numpy.ndarray`\n 2-D image data.\n idx : int, optional\n Row for which to plot cross-section. Default is to take middle.\n ax : :py:class:`~matplotlib.axes.Axes`, optional\n `Axes` object to fill, default is to create one.\n dB : bool, optional\n Whether to plot in dB scale.\n\n Return\n ------\n ax : :py:class:`~matplotlib.axes.Axes`\n '
if (ax is None):
(_, ax) = plt.subplots()
if (idx is None):
max_idx = np.unravel_index(np.argmax(vals, axis=None), vals.shape)
idx = max_idx[0]
cross_section = vals[(idx, :)].astype(np.float32)
if (max_val is None):
max_val = cross_section.max()
cross_section /= max_val
min_val = max(min_val, cross_section.min())
if dB:
cross_section[(cross_section < min_val)] = min_val
cross_section = (10 * np.log10(cross_section))
min_val = (10 * np.log10(min_val))
ax.set_ylabel('dB')
x_vals = np.arange(len(cross_section))
x_vals -= np.argmax(cross_section)
ax.plot(x_vals, cross_section, **kwargs)
ax.set_ylim([min_val, 0])
if (plot_width is not None):
half_width = ((plot_width // 2) + 1)
ax.set_xlim([(- half_width), half_width])
ax.grid()
if (dB and plot_db_drop):
cross_section -= np.max(cross_section)
zero_crossings = np.where(np.diff(np.signbit((cross_section + plot_db_drop))))[0]
if (len(zero_crossings) >= 2):
zero_crossings -= np.argmax(cross_section)
width = (zero_crossings[(- 1)] - zero_crossings[0])
ax.set_title(f'-{plot_db_drop}dB width = {width}')
ax.axvline(x=zero_crossings[0], c='k', linestyle='--')
ax.axvline(x=zero_crossings[(- 1)], c='k', linestyle='--')
else:
warnings.warn('Width could not be determined. Did not detect two -{} points : {}'.format(plot_db_drop, zero_crossings))
return (ax, cross_section)<|docstring|>Plot cross-section of a 2-D image.
Parameters
----------
vals : py:class:`~numpy.ndarray`
2-D image data.
idx : int, optional
Row for which to plot cross-section. Default is to take middle.
ax : :py:class:`~matplotlib.axes.Axes`, optional
`Axes` object to fill, default is to create one.
dB : bool, optional
Whether to plot in dB scale.
Return
------
ax : :py:class:`~matplotlib.axes.Axes`<|endoftext|> |
51562f2d4f069fb566b15da1e4ed6e9102b550587c3e02c330707424c78df0dc | def plot_autocorr2d(vals, pad_mode='reflect', ax=None):
'\n Plot 2-D autocorrelation of image.\n\n Parameters\n ----------\n vals : py:class:`~numpy.ndarray`\n 2-D image.\n pad_mode : str\n Desired padding. See NumPy documentation: https://numpy.org/doc/stable/reference/generated/numpy.pad.html\n ax : :py:class:`~matplotlib.axes.Axes`, optional\n `Axes` object to fill, default is to create one.\n\n Return\n ------\n ax : :py:class:`~matplotlib.axes.Axes`\n autocorr : py:class:`~numpy.ndarray`\n '
nbit_plot = 8
max_val_plot = ((2 ** nbit_plot) - 1)
autocorr = autocorr2d(vals, pad_mode=pad_mode)
data = (autocorr - np.min(autocorr))
data = (data / np.max(np.abs(data)))
data = (max_val_plot * data)
autocorr_img = data.astype(np.uint8)
if (ax is None):
(_, ax) = plt.subplots()
ax.imshow(autocorr_img, cmap='gray', vmin=0, vmax=max_val_plot)
ax.axis('off')
return (ax, autocorr) | Plot 2-D autocorrelation of image.
Parameters
----------
vals : py:class:`~numpy.ndarray`
2-D image.
pad_mode : str
Desired padding. See NumPy documentation: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
ax : :py:class:`~matplotlib.axes.Axes`, optional
`Axes` object to fill, default is to create one.
Return
------
ax : :py:class:`~matplotlib.axes.Axes`
autocorr : py:class:`~numpy.ndarray` | DiffuserCam/diffcam/plot.py | plot_autocorr2d | WilliamCappelletti/diffusercam-project | 0 | python | def plot_autocorr2d(vals, pad_mode='reflect', ax=None):
'\n Plot 2-D autocorrelation of image.\n\n Parameters\n ----------\n vals : py:class:`~numpy.ndarray`\n 2-D image.\n pad_mode : str\n Desired padding. See NumPy documentation: https://numpy.org/doc/stable/reference/generated/numpy.pad.html\n ax : :py:class:`~matplotlib.axes.Axes`, optional\n `Axes` object to fill, default is to create one.\n\n Return\n ------\n ax : :py:class:`~matplotlib.axes.Axes`\n autocorr : py:class:`~numpy.ndarray`\n '
nbit_plot = 8
max_val_plot = ((2 ** nbit_plot) - 1)
autocorr = autocorr2d(vals, pad_mode=pad_mode)
data = (autocorr - np.min(autocorr))
data = (data / np.max(np.abs(data)))
data = (max_val_plot * data)
autocorr_img = data.astype(np.uint8)
if (ax is None):
(_, ax) = plt.subplots()
ax.imshow(autocorr_img, cmap='gray', vmin=0, vmax=max_val_plot)
ax.axis('off')
return (ax, autocorr) | def plot_autocorr2d(vals, pad_mode='reflect', ax=None):
'\n Plot 2-D autocorrelation of image.\n\n Parameters\n ----------\n vals : py:class:`~numpy.ndarray`\n 2-D image.\n pad_mode : str\n Desired padding. See NumPy documentation: https://numpy.org/doc/stable/reference/generated/numpy.pad.html\n ax : :py:class:`~matplotlib.axes.Axes`, optional\n `Axes` object to fill, default is to create one.\n\n Return\n ------\n ax : :py:class:`~matplotlib.axes.Axes`\n autocorr : py:class:`~numpy.ndarray`\n '
nbit_plot = 8
max_val_plot = ((2 ** nbit_plot) - 1)
autocorr = autocorr2d(vals, pad_mode=pad_mode)
data = (autocorr - np.min(autocorr))
data = (data / np.max(np.abs(data)))
data = (max_val_plot * data)
autocorr_img = data.astype(np.uint8)
if (ax is None):
(_, ax) = plt.subplots()
ax.imshow(autocorr_img, cmap='gray', vmin=0, vmax=max_val_plot)
ax.axis('off')
return (ax, autocorr)<|docstring|>Plot 2-D autocorrelation of image.
Parameters
----------
vals : py:class:`~numpy.ndarray`
2-D image.
pad_mode : str
Desired padding. See NumPy documentation: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
ax : :py:class:`~matplotlib.axes.Axes`, optional
`Axes` object to fill, default is to create one.
Return
------
ax : :py:class:`~matplotlib.axes.Axes`
autocorr : py:class:`~numpy.ndarray`<|endoftext|> |
0dd9962b22210a6ff737c6802f3f9a89c9a31f58b1c3c56301f073dc174b09ac | def check_pipeline_parameters(self):
'Check pipeline parameters.'
if ('full_width_at_half_maximum' not in self.parameters.keys()):
self.parameters['full_width_at_half_maximum'] = [8, 8, 8]
if ('t1_native_space' not in self.parameters.keys()):
self.parameters['t1_native_space'] = False
if ('freesurfer_brain_mask' not in self.parameters.keys()):
self.parameters['freesurfer_brain_mask'] = False
if ('unwarping' not in self.parameters.keys()):
self.parameters['unwarping'] = False | Check pipeline parameters. | pipelines/fmri_preprocessing/fmri_preprocessing_pipeline.py | check_pipeline_parameters | aramis-lab/clinica_pipeline_fmri_preprocessing | 1 | python | def check_pipeline_parameters(self):
if ('full_width_at_half_maximum' not in self.parameters.keys()):
self.parameters['full_width_at_half_maximum'] = [8, 8, 8]
if ('t1_native_space' not in self.parameters.keys()):
self.parameters['t1_native_space'] = False
if ('freesurfer_brain_mask' not in self.parameters.keys()):
self.parameters['freesurfer_brain_mask'] = False
if ('unwarping' not in self.parameters.keys()):
self.parameters['unwarping'] = False | def check_pipeline_parameters(self):
if ('full_width_at_half_maximum' not in self.parameters.keys()):
self.parameters['full_width_at_half_maximum'] = [8, 8, 8]
if ('t1_native_space' not in self.parameters.keys()):
self.parameters['t1_native_space'] = False
if ('freesurfer_brain_mask' not in self.parameters.keys()):
self.parameters['freesurfer_brain_mask'] = False
if ('unwarping' not in self.parameters.keys()):
self.parameters['unwarping'] = False<|docstring|>Check pipeline parameters.<|endoftext|> |
c8fcfaa17224eb15d04c1abe0461f9ea3cdb1ae5c642ab770c92f2088911dbbf | def get_input_fields(self):
'Specify the list of possible inputs of this pipelines.\n\n Returns:\n A list of (string) input fields name.\n '
if (('unwarping' in self.parameters) and self.parameters['unwarping']):
return ['et', 'blipdir', 'tert', 'time_repetition', 'num_slices', 'magnitude1', 'slice_order', 'ref_slice', 'time_acquisition', 'phasediff', 'bold', 'T1w']
else:
return ['time_repetition', 'num_slices', 'slice_order', 'ref_slice', 'time_acquisition', 'bold', 'T1w'] | Specify the list of possible inputs of this pipelines.
Returns:
A list of (string) input fields name. | pipelines/fmri_preprocessing/fmri_preprocessing_pipeline.py | get_input_fields | aramis-lab/clinica_pipeline_fmri_preprocessing | 1 | python | def get_input_fields(self):
'Specify the list of possible inputs of this pipelines.\n\n Returns:\n A list of (string) input fields name.\n '
if (('unwarping' in self.parameters) and self.parameters['unwarping']):
return ['et', 'blipdir', 'tert', 'time_repetition', 'num_slices', 'magnitude1', 'slice_order', 'ref_slice', 'time_acquisition', 'phasediff', 'bold', 'T1w']
else:
return ['time_repetition', 'num_slices', 'slice_order', 'ref_slice', 'time_acquisition', 'bold', 'T1w'] | def get_input_fields(self):
'Specify the list of possible inputs of this pipelines.\n\n Returns:\n A list of (string) input fields name.\n '
if (('unwarping' in self.parameters) and self.parameters['unwarping']):
return ['et', 'blipdir', 'tert', 'time_repetition', 'num_slices', 'magnitude1', 'slice_order', 'ref_slice', 'time_acquisition', 'phasediff', 'bold', 'T1w']
else:
return ['time_repetition', 'num_slices', 'slice_order', 'ref_slice', 'time_acquisition', 'bold', 'T1w']<|docstring|>Specify the list of possible inputs of this pipelines.
Returns:
A list of (string) input fields name.<|endoftext|> |
e279c6f2441d20b5ff74ee605ccaf1a051c22278d65e8c3d6c148d17fa875b29 | def get_output_fields(self):
'Specify the list of possible outputs of this pipelines.\n\n Returns:\n A list of (string) output fields name.\n '
if (('t1_native_space' in self.parameters) and self.parameters['t1_native_space']):
return ['t1_brain_mask', 'mc_params', 'native_fmri', 't1_fmri', 'mni_fmri', 'mni_smoothed_fmri']
else:
return ['t1_brain_mask', 'mc_params', 'native_fmri', 'mni_fmri', 'mni_smoothed_fmri'] | Specify the list of possible outputs of this pipelines.
Returns:
A list of (string) output fields name. | pipelines/fmri_preprocessing/fmri_preprocessing_pipeline.py | get_output_fields | aramis-lab/clinica_pipeline_fmri_preprocessing | 1 | python | def get_output_fields(self):
'Specify the list of possible outputs of this pipelines.\n\n Returns:\n A list of (string) output fields name.\n '
if (('t1_native_space' in self.parameters) and self.parameters['t1_native_space']):
return ['t1_brain_mask', 'mc_params', 'native_fmri', 't1_fmri', 'mni_fmri', 'mni_smoothed_fmri']
else:
return ['t1_brain_mask', 'mc_params', 'native_fmri', 'mni_fmri', 'mni_smoothed_fmri'] | def get_output_fields(self):
'Specify the list of possible outputs of this pipelines.\n\n Returns:\n A list of (string) output fields name.\n '
if (('t1_native_space' in self.parameters) and self.parameters['t1_native_space']):
return ['t1_brain_mask', 'mc_params', 'native_fmri', 't1_fmri', 'mni_fmri', 'mni_smoothed_fmri']
else:
return ['t1_brain_mask', 'mc_params', 'native_fmri', 'mni_fmri', 'mni_smoothed_fmri']<|docstring|>Specify the list of possible outputs of this pipelines.
Returns:
A list of (string) output fields name.<|endoftext|> |
74ecf0c7fb3d9a1557516837143edee4814d7da3f53710fceb7054e8a37498d8 | def build_input_node(self):
'Build and connect an input node to the pipelines.\n\n References:\n https://lcni.uoregon.edu/kb-articles/kb-0003\n\n '
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
import json
import numpy as np
from clinica.utils.stream import cprint
from clinica.utils.inputs import clinica_file_reader
from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
import clinica.utils.input_files as input_files
read_node = npe.Node(name='ReadingBIDS', interface=nutil.IdentityInterface(fields=self.get_input_fields(), mandatory_inputs=True))
all_errors = []
if (('unwarping' in self.parameters) and self.parameters['unwarping']):
try:
read_node.inputs.magnitude1 = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMAP_MAGNITUDE1_NII)
except ClinicaException as e:
all_errors.append(e)
try:
read_node.inputs.phasediff = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMAP_PHASEDIFF_NII)
except ClinicaException as e:
all_errors.append(e)
try:
read_node.inputs.bold = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMRI_BOLD_NII)
except ClinicaException as e:
all_errors.append(e)
try:
read_node.inputs.T1w = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.T1W_NII)
except ClinicaException as e:
all_errors.append(e)
read_node.inputs.et = []
read_node.inputs.blipdir = []
read_node.inputs.tert = []
read_node.inputs.time_repetition = []
read_node.inputs.num_slices = []
read_node.inputs.slice_order = []
read_node.inputs.ref_slice = []
read_node.inputs.time_acquisition = []
if self.parameters['unwarping']:
try:
phasediff_json = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMAP_PHASEDIFF_JSON)
for json_f in phasediff_json:
with open(json_f) as json_file:
data = json.load(json_file)
read_node.inputs.et.append([data['EchoTime1'], data['EchoTime2']])
blipdir_raw = data['PhaseEncodingDirection']
if ((len(blipdir_raw) > 1) and (blipdir_raw[1] == '-')):
read_node.inputs.blipdir.append((- 1))
else:
read_node.inputs.blipdir.append(1)
except ClinicaException as e:
all_errors.append(e)
try:
func_json = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMRI_BOLD_JSON)
for json_f in func_json:
with open(json_f) as json_file:
data = json.load(json_file)
read_node.inputs.tert.append((1 / data['BandwidthPerPixelPhaseEncode']))
read_node.inputs.time_repetition.append(data['RepetitionTime'])
slice_timing = data['SliceTiming']
read_node.inputs.num_slices.append(len(slice_timing))
slice_order = (np.argsort(slice_timing) + 1)
read_node.inputs.slice_order.append(slice_order.tolist())
read_node.inputs.ref_slice.append((np.argmin(slice_timing) + 1))
read_node.inputs.time_acquisition.append((data['RepetitionTime'] - (data['RepetitionTime'] / float(len(slice_timing)))))
except ClinicaException as e:
all_errors.append(e)
if (len(all_errors) > 0):
error_message = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n'
for msg in all_errors:
error_message += str(msg)
raise ClinicaBIDSError(error_message)
if (('unwarping' in self.parameters) and self.parameters['unwarping']):
self.connect([(read_node, self.input_node, [('et', 'et')]), (read_node, self.input_node, [('blipdir', 'blipdir')]), (read_node, self.input_node, [('tert', 'tert')]), (read_node, self.input_node, [('phasediff', 'phasediff')]), (read_node, self.input_node, [('magnitude1', 'magnitude1')])])
self.connect([(read_node, self.input_node, [('time_repetition', 'time_repetition')]), (read_node, self.input_node, [('num_slices', 'num_slices')]), (read_node, self.input_node, [('slice_order', 'slice_order')]), (read_node, self.input_node, [('ref_slice', 'ref_slice')]), (read_node, self.input_node, [('time_acquisition', 'time_acquisition')]), (read_node, self.input_node, [('bold', 'bold')]), (read_node, self.input_node, [('T1w', 'T1w')])]) | Build and connect an input node to the pipelines.
References:
https://lcni.uoregon.edu/kb-articles/kb-0003 | pipelines/fmri_preprocessing/fmri_preprocessing_pipeline.py | build_input_node | aramis-lab/clinica_pipeline_fmri_preprocessing | 1 | python | def build_input_node(self):
'Build and connect an input node to the pipelines.\n\n References:\n https://lcni.uoregon.edu/kb-articles/kb-0003\n\n '
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
import json
import numpy as np
from clinica.utils.stream import cprint
from clinica.utils.inputs import clinica_file_reader
from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
import clinica.utils.input_files as input_files
read_node = npe.Node(name='ReadingBIDS', interface=nutil.IdentityInterface(fields=self.get_input_fields(), mandatory_inputs=True))
all_errors = []
if (('unwarping' in self.parameters) and self.parameters['unwarping']):
try:
read_node.inputs.magnitude1 = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMAP_MAGNITUDE1_NII)
except ClinicaException as e:
all_errors.append(e)
try:
read_node.inputs.phasediff = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMAP_PHASEDIFF_NII)
except ClinicaException as e:
all_errors.append(e)
try:
read_node.inputs.bold = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMRI_BOLD_NII)
except ClinicaException as e:
all_errors.append(e)
try:
read_node.inputs.T1w = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.T1W_NII)
except ClinicaException as e:
all_errors.append(e)
read_node.inputs.et = []
read_node.inputs.blipdir = []
read_node.inputs.tert = []
read_node.inputs.time_repetition = []
read_node.inputs.num_slices = []
read_node.inputs.slice_order = []
read_node.inputs.ref_slice = []
read_node.inputs.time_acquisition = []
if self.parameters['unwarping']:
try:
phasediff_json = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMAP_PHASEDIFF_JSON)
for json_f in phasediff_json:
with open(json_f) as json_file:
data = json.load(json_file)
read_node.inputs.et.append([data['EchoTime1'], data['EchoTime2']])
blipdir_raw = data['PhaseEncodingDirection']
if ((len(blipdir_raw) > 1) and (blipdir_raw[1] == '-')):
read_node.inputs.blipdir.append((- 1))
else:
read_node.inputs.blipdir.append(1)
except ClinicaException as e:
all_errors.append(e)
try:
func_json = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMRI_BOLD_JSON)
for json_f in func_json:
with open(json_f) as json_file:
data = json.load(json_file)
read_node.inputs.tert.append((1 / data['BandwidthPerPixelPhaseEncode']))
read_node.inputs.time_repetition.append(data['RepetitionTime'])
slice_timing = data['SliceTiming']
read_node.inputs.num_slices.append(len(slice_timing))
slice_order = (np.argsort(slice_timing) + 1)
read_node.inputs.slice_order.append(slice_order.tolist())
read_node.inputs.ref_slice.append((np.argmin(slice_timing) + 1))
read_node.inputs.time_acquisition.append((data['RepetitionTime'] - (data['RepetitionTime'] / float(len(slice_timing)))))
except ClinicaException as e:
all_errors.append(e)
if (len(all_errors) > 0):
error_message = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n'
for msg in all_errors:
error_message += str(msg)
raise ClinicaBIDSError(error_message)
if (('unwarping' in self.parameters) and self.parameters['unwarping']):
self.connect([(read_node, self.input_node, [('et', 'et')]), (read_node, self.input_node, [('blipdir', 'blipdir')]), (read_node, self.input_node, [('tert', 'tert')]), (read_node, self.input_node, [('phasediff', 'phasediff')]), (read_node, self.input_node, [('magnitude1', 'magnitude1')])])
self.connect([(read_node, self.input_node, [('time_repetition', 'time_repetition')]), (read_node, self.input_node, [('num_slices', 'num_slices')]), (read_node, self.input_node, [('slice_order', 'slice_order')]), (read_node, self.input_node, [('ref_slice', 'ref_slice')]), (read_node, self.input_node, [('time_acquisition', 'time_acquisition')]), (read_node, self.input_node, [('bold', 'bold')]), (read_node, self.input_node, [('T1w', 'T1w')])]) | def build_input_node(self):
'Build and connect an input node to the pipelines.\n\n References:\n https://lcni.uoregon.edu/kb-articles/kb-0003\n\n '
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
import json
import numpy as np
from clinica.utils.stream import cprint
from clinica.utils.inputs import clinica_file_reader
from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
import clinica.utils.input_files as input_files
read_node = npe.Node(name='ReadingBIDS', interface=nutil.IdentityInterface(fields=self.get_input_fields(), mandatory_inputs=True))
all_errors = []
if (('unwarping' in self.parameters) and self.parameters['unwarping']):
try:
read_node.inputs.magnitude1 = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMAP_MAGNITUDE1_NII)
except ClinicaException as e:
all_errors.append(e)
try:
read_node.inputs.phasediff = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMAP_PHASEDIFF_NII)
except ClinicaException as e:
all_errors.append(e)
try:
read_node.inputs.bold = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMRI_BOLD_NII)
except ClinicaException as e:
all_errors.append(e)
try:
read_node.inputs.T1w = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.T1W_NII)
except ClinicaException as e:
all_errors.append(e)
read_node.inputs.et = []
read_node.inputs.blipdir = []
read_node.inputs.tert = []
read_node.inputs.time_repetition = []
read_node.inputs.num_slices = []
read_node.inputs.slice_order = []
read_node.inputs.ref_slice = []
read_node.inputs.time_acquisition = []
if self.parameters['unwarping']:
try:
phasediff_json = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMAP_PHASEDIFF_JSON)
for json_f in phasediff_json:
with open(json_f) as json_file:
data = json.load(json_file)
read_node.inputs.et.append([data['EchoTime1'], data['EchoTime2']])
blipdir_raw = data['PhaseEncodingDirection']
if ((len(blipdir_raw) > 1) and (blipdir_raw[1] == '-')):
read_node.inputs.blipdir.append((- 1))
else:
read_node.inputs.blipdir.append(1)
except ClinicaException as e:
all_errors.append(e)
try:
func_json = clinica_file_reader(self.subjects, self.sessions, self.bids_directory, input_files.FMRI_BOLD_JSON)
for json_f in func_json:
with open(json_f) as json_file:
data = json.load(json_file)
read_node.inputs.tert.append((1 / data['BandwidthPerPixelPhaseEncode']))
read_node.inputs.time_repetition.append(data['RepetitionTime'])
slice_timing = data['SliceTiming']
read_node.inputs.num_slices.append(len(slice_timing))
slice_order = (np.argsort(slice_timing) + 1)
read_node.inputs.slice_order.append(slice_order.tolist())
read_node.inputs.ref_slice.append((np.argmin(slice_timing) + 1))
read_node.inputs.time_acquisition.append((data['RepetitionTime'] - (data['RepetitionTime'] / float(len(slice_timing)))))
except ClinicaException as e:
all_errors.append(e)
if (len(all_errors) > 0):
error_message = 'Clinica faced error(s) while trying to read files in your BIDS directory.\n'
for msg in all_errors:
error_message += str(msg)
raise ClinicaBIDSError(error_message)
if (('unwarping' in self.parameters) and self.parameters['unwarping']):
self.connect([(read_node, self.input_node, [('et', 'et')]), (read_node, self.input_node, [('blipdir', 'blipdir')]), (read_node, self.input_node, [('tert', 'tert')]), (read_node, self.input_node, [('phasediff', 'phasediff')]), (read_node, self.input_node, [('magnitude1', 'magnitude1')])])
self.connect([(read_node, self.input_node, [('time_repetition', 'time_repetition')]), (read_node, self.input_node, [('num_slices', 'num_slices')]), (read_node, self.input_node, [('slice_order', 'slice_order')]), (read_node, self.input_node, [('ref_slice', 'ref_slice')]), (read_node, self.input_node, [('time_acquisition', 'time_acquisition')]), (read_node, self.input_node, [('bold', 'bold')]), (read_node, self.input_node, [('T1w', 'T1w')])])<|docstring|>Build and connect an input node to the pipelines.
References:
https://lcni.uoregon.edu/kb-articles/kb-0003<|endoftext|> |
3bfdad0572db2d65299193732e182f939d6aaac40c197d5e903890e67bdfc3ca | def build_output_node(self):
'Build and connect an output node to the pipelines.\n '
import nipype.pipeline.engine as npe
import nipype.interfaces.io as nio
write_node = npe.MapNode(name='WritingCAPS', iterfield=(['container'] + self.get_output_fields()), interface=nio.DataSink(infields=self.get_output_fields()))
write_node.inputs.base_directory = self.caps_directory
write_node.inputs.parameterization = False
write_node.inputs.container = [(((('subjects/' + self.subjects[i]) + '/') + self.sessions[i]) + '/fmri/preprocessing') for i in range(len(self.subjects))]
write_node.inputs.remove_dest_dir = True
if (('freesurfer_brain_mask' in self.parameters) and (not self.parameters['freesurfer_brain_mask'])):
write_node.inputs.regexp_substitutions = [('t1_brain_mask/c3(.+)_maths_dil_ero_thresh_fillh\\.nii\\.gz$', '\\1_brainmask.nii.gz'), ('mc_params/rp_a(.+)\\.txt$', '\\1_motion.tsv'), ('native_fmri/[u|r]a(.+)\\.nii.gz$', '\\1_space-meanBOLD_preproc.nii.gz'), ('t1_fmri/r[u|r]a(.+)\\.nii.gz$', '\\1_space-T1w_preproc.nii.gz'), ('mni_fmri/wr[u|r]a(.+)\\.nii.gz$', '\\1_space-Ixi549Space_preproc.nii.gz'), ('mni_smoothed_fmri/swr[u|r]a(.+)\\.nii.gz$', (('\\1_space-Ixi549Space_fwhm-' + 'x'.join(map(str, self.parameters['full_width_at_half_maximum']))) + '_preproc.nii.gz')), ('trait_added', '')]
else:
write_node.inputs.regexp_substitutions = [('t1_brain_mask/(.+)\\.nii\\.gz$', '\\1_brainmask.nii.gz'), ('mc_params/rp_a(.+)\\.txt$', '\\1_motion.tsv'), ('native_fmri/[u|r]a(.+)\\.nii.gz$', '\\1_space-meanBOLD_preproc.nii.gz'), ('t1_fmri/r[u|r]a(.+)\\.nii.gz$', '\\1_space-T1w_preproc.nii.gz'), ('mni_fmri/wr[u|r]a(.+)\\.nii.gz$', '\\1_space-Ixi549Space_preproc.nii.gz'), ('mni_smoothed_fmri/swr[u|r]a(.+)\\.nii.gz$', (('\\1_space-Ixi549Space_fwhm-' + 'x'.join(map(str, self.parameters['full_width_at_half_maximum']))) + '_preproc.nii.gz')), ('trait_added', '')]
if (('t1_native_space' in self.parameters) and self.parameters['t1_native_space']):
self.connect([(self.output_node, write_node, [('t1_fmri', 't1_fmri')])])
self.connect([(self.output_node, write_node, [('t1_brain_mask', 't1_brain_mask')]), (self.output_node, write_node, [('mc_params', 'mc_params')]), (self.output_node, write_node, [('native_fmri', 'native_fmri')]), (self.output_node, write_node, [('mni_fmri', 'mni_fmri')]), (self.output_node, write_node, [('mni_smoothed_fmri', 'mni_smoothed_fmri')])]) | Build and connect an output node to the pipelines. | pipelines/fmri_preprocessing/fmri_preprocessing_pipeline.py | build_output_node | aramis-lab/clinica_pipeline_fmri_preprocessing | 1 | python | def build_output_node(self):
'\n '
import nipype.pipeline.engine as npe
import nipype.interfaces.io as nio
write_node = npe.MapNode(name='WritingCAPS', iterfield=(['container'] + self.get_output_fields()), interface=nio.DataSink(infields=self.get_output_fields()))
write_node.inputs.base_directory = self.caps_directory
write_node.inputs.parameterization = False
write_node.inputs.container = [(((('subjects/' + self.subjects[i]) + '/') + self.sessions[i]) + '/fmri/preprocessing') for i in range(len(self.subjects))]
write_node.inputs.remove_dest_dir = True
if (('freesurfer_brain_mask' in self.parameters) and (not self.parameters['freesurfer_brain_mask'])):
write_node.inputs.regexp_substitutions = [('t1_brain_mask/c3(.+)_maths_dil_ero_thresh_fillh\\.nii\\.gz$', '\\1_brainmask.nii.gz'), ('mc_params/rp_a(.+)\\.txt$', '\\1_motion.tsv'), ('native_fmri/[u|r]a(.+)\\.nii.gz$', '\\1_space-meanBOLD_preproc.nii.gz'), ('t1_fmri/r[u|r]a(.+)\\.nii.gz$', '\\1_space-T1w_preproc.nii.gz'), ('mni_fmri/wr[u|r]a(.+)\\.nii.gz$', '\\1_space-Ixi549Space_preproc.nii.gz'), ('mni_smoothed_fmri/swr[u|r]a(.+)\\.nii.gz$', (('\\1_space-Ixi549Space_fwhm-' + 'x'.join(map(str, self.parameters['full_width_at_half_maximum']))) + '_preproc.nii.gz')), ('trait_added', )]
else:
write_node.inputs.regexp_substitutions = [('t1_brain_mask/(.+)\\.nii\\.gz$', '\\1_brainmask.nii.gz'), ('mc_params/rp_a(.+)\\.txt$', '\\1_motion.tsv'), ('native_fmri/[u|r]a(.+)\\.nii.gz$', '\\1_space-meanBOLD_preproc.nii.gz'), ('t1_fmri/r[u|r]a(.+)\\.nii.gz$', '\\1_space-T1w_preproc.nii.gz'), ('mni_fmri/wr[u|r]a(.+)\\.nii.gz$', '\\1_space-Ixi549Space_preproc.nii.gz'), ('mni_smoothed_fmri/swr[u|r]a(.+)\\.nii.gz$', (('\\1_space-Ixi549Space_fwhm-' + 'x'.join(map(str, self.parameters['full_width_at_half_maximum']))) + '_preproc.nii.gz')), ('trait_added', )]
if (('t1_native_space' in self.parameters) and self.parameters['t1_native_space']):
self.connect([(self.output_node, write_node, [('t1_fmri', 't1_fmri')])])
self.connect([(self.output_node, write_node, [('t1_brain_mask', 't1_brain_mask')]), (self.output_node, write_node, [('mc_params', 'mc_params')]), (self.output_node, write_node, [('native_fmri', 'native_fmri')]), (self.output_node, write_node, [('mni_fmri', 'mni_fmri')]), (self.output_node, write_node, [('mni_smoothed_fmri', 'mni_smoothed_fmri')])]) | def build_output_node(self):
'\n '
import nipype.pipeline.engine as npe
import nipype.interfaces.io as nio
write_node = npe.MapNode(name='WritingCAPS', iterfield=(['container'] + self.get_output_fields()), interface=nio.DataSink(infields=self.get_output_fields()))
write_node.inputs.base_directory = self.caps_directory
write_node.inputs.parameterization = False
write_node.inputs.container = [(((('subjects/' + self.subjects[i]) + '/') + self.sessions[i]) + '/fmri/preprocessing') for i in range(len(self.subjects))]
write_node.inputs.remove_dest_dir = True
if (('freesurfer_brain_mask' in self.parameters) and (not self.parameters['freesurfer_brain_mask'])):
write_node.inputs.regexp_substitutions = [('t1_brain_mask/c3(.+)_maths_dil_ero_thresh_fillh\\.nii\\.gz$', '\\1_brainmask.nii.gz'), ('mc_params/rp_a(.+)\\.txt$', '\\1_motion.tsv'), ('native_fmri/[u|r]a(.+)\\.nii.gz$', '\\1_space-meanBOLD_preproc.nii.gz'), ('t1_fmri/r[u|r]a(.+)\\.nii.gz$', '\\1_space-T1w_preproc.nii.gz'), ('mni_fmri/wr[u|r]a(.+)\\.nii.gz$', '\\1_space-Ixi549Space_preproc.nii.gz'), ('mni_smoothed_fmri/swr[u|r]a(.+)\\.nii.gz$', (('\\1_space-Ixi549Space_fwhm-' + 'x'.join(map(str, self.parameters['full_width_at_half_maximum']))) + '_preproc.nii.gz')), ('trait_added', )]
else:
write_node.inputs.regexp_substitutions = [('t1_brain_mask/(.+)\\.nii\\.gz$', '\\1_brainmask.nii.gz'), ('mc_params/rp_a(.+)\\.txt$', '\\1_motion.tsv'), ('native_fmri/[u|r]a(.+)\\.nii.gz$', '\\1_space-meanBOLD_preproc.nii.gz'), ('t1_fmri/r[u|r]a(.+)\\.nii.gz$', '\\1_space-T1w_preproc.nii.gz'), ('mni_fmri/wr[u|r]a(.+)\\.nii.gz$', '\\1_space-Ixi549Space_preproc.nii.gz'), ('mni_smoothed_fmri/swr[u|r]a(.+)\\.nii.gz$', (('\\1_space-Ixi549Space_fwhm-' + 'x'.join(map(str, self.parameters['full_width_at_half_maximum']))) + '_preproc.nii.gz')), ('trait_added', )]
if (('t1_native_space' in self.parameters) and self.parameters['t1_native_space']):
self.connect([(self.output_node, write_node, [('t1_fmri', 't1_fmri')])])
self.connect([(self.output_node, write_node, [('t1_brain_mask', 't1_brain_mask')]), (self.output_node, write_node, [('mc_params', 'mc_params')]), (self.output_node, write_node, [('native_fmri', 'native_fmri')]), (self.output_node, write_node, [('mni_fmri', 'mni_fmri')]), (self.output_node, write_node, [('mni_smoothed_fmri', 'mni_smoothed_fmri')])])<|docstring|>Build and connect an output node to the pipelines.<|endoftext|> |
799f3f87b778833914512edd515e8ba302a97529bc71ce8950f23abaeb367880 | def build_core_nodes(self):
'Build and connect the core nodes of the pipelines.\n '
import fmri_preprocessing_workflows as utils
import nipype.interfaces.utility as nutil
import nipype.interfaces.spm as spm
import nipype.pipeline.engine as npe
from clinica.utils.filemanip import zip_nii, unzip_nii
unzip_node = npe.MapNode(name='Unzipping', iterfield=['in_file'], interface=nutil.Function(input_names=['in_file'], output_names=['out_file'], function=unzip_nii))
unzip_T1w = unzip_node.clone('UnzippingT1w')
unzip_phasediff = unzip_node.clone('UnzippingPhasediff')
unzip_bold = unzip_node.clone('UnzippingBold')
unzip_magnitude1 = unzip_node.clone('UnzippingMagnitude1')
if self.parameters['unwarping']:
fm_node = npe.MapNode(name='FieldMapCalculation', iterfield=['phase', 'magnitude', 'epi', 'et', 'blipdir', 'tert'], interface=spm.FieldMap())
st_node = npe.MapNode(name='SliceTimingCorrection', iterfield=['in_files', 'time_repetition', 'slice_order', 'num_slices', 'ref_slice', 'time_acquisition'], interface=spm.SliceTiming())
if self.parameters['unwarping']:
mc_node = npe.MapNode(name='MotionCorrectionUnwarping', iterfield=['scans', 'pmscan'], interface=spm.RealignUnwarp())
mc_node.inputs.register_to_mean = True
mc_node.inputs.reslice_mask = False
else:
mc_node = npe.MapNode(name='MotionCorrection', iterfield=['in_files'], interface=spm.Realign())
mc_node.inputs.register_to_mean = True
import os.path as path
from nipype.interfaces.freesurfer import MRIConvert
if self.parameters['freesurfer_brain_mask']:
brain_masks = [path.join(self.caps_directory, 'subjects', self.subjects[i], self.sessions[i], 't1/freesurfer_cross_sectional', ((self.subjects[i] + '_') + self.sessions[i]), 'mri/brain.mgz') for i in range(len(self.subjects))]
conv_brain_masks = [str((((self.subjects[i] + '_') + self.sessions[i]) + '.nii')) for i in range(len(self.subjects))]
bet_node = npe.MapNode(interface=MRIConvert(), iterfield=['in_file', 'out_file'], name='BrainConversion')
bet_node.inputs.in_file = brain_masks
bet_node.inputs.out_file = conv_brain_masks
bet_node.inputs.out_type = 'nii'
else:
bet_node = utils.BrainExtractionWorkflow(name='BrainExtraction')
reg_node = npe.MapNode(interface=spm.Coregister(), iterfield=['apply_to_files', 'source', 'target'], name='Registration')
norm_node = npe.MapNode(interface=spm.Normalize12(), iterfield=['image_to_align', 'apply_to_files'], name='Normalization')
smooth_node = npe.MapNode(interface=spm.Smooth(), iterfield=['in_files'], name='Smoothing')
smooth_node.inputs.fwhm = self.parameters['full_width_at_half_maximum']
zip_node = npe.MapNode(name='Zipping', iterfield=['in_file'], interface=nutil.Function(input_names=['in_file'], output_names=['out_file'], function=zip_nii))
zip_bet_node = zip_node.clone('ZippingBET')
zip_mc_node = zip_node.clone('ZippingMC')
zip_reg_node = zip_node.clone('ZippingRegistration')
zip_norm_node = zip_node.clone('ZippingNormalization')
zip_smooth_node = zip_node.clone('ZippingSmoothing')
if self.parameters['freesurfer_brain_mask']:
self.connect([(bet_node, reg_node, [('out_file', 'target')]), (bet_node, zip_bet_node, [('out_file', 'in_file')])])
else:
self.connect([(unzip_T1w, bet_node, [('out_file', 'Segmentation.data')]), (unzip_T1w, bet_node, [('out_file', 'ApplyMask.in_file')]), (bet_node, reg_node, [('ApplyMask.out_file', 'target')]), (bet_node, zip_bet_node, [('Fill.out_file', 'in_file')])])
if self.parameters['unwarping']:
self.connect([(self.input_node, fm_node, [('et', 'et')]), (self.input_node, fm_node, [('blipdir', 'blipdir')]), (self.input_node, fm_node, [('tert', 'tert')]), (self.input_node, unzip_phasediff, [('phasediff', 'in_file')]), (self.input_node, unzip_magnitude1, [('magnitude1', 'in_file')]), (unzip_magnitude1, fm_node, [('out_file', 'magnitude')]), (unzip_phasediff, fm_node, [('out_file', 'phase')]), (unzip_bold, fm_node, [('out_file', 'epi')]), (st_node, mc_node, [('timecorrected_files', 'scans')]), (fm_node, mc_node, [('vdm', 'pmscan')]), (mc_node, reg_node, [('realigned_unwarped_files', 'apply_to_files')]), (mc_node, zip_mc_node, [('realigned_unwarped_files', 'in_file')])])
else:
self.connect([(st_node, mc_node, [('timecorrected_files', 'in_files')]), (mc_node, reg_node, [('realigned_files', 'apply_to_files')]), (mc_node, zip_mc_node, [('realigned_files', 'in_file')])])
self.connect([(self.input_node, unzip_T1w, [('T1w', 'in_file')]), (self.input_node, unzip_bold, [('bold', 'in_file')]), (unzip_bold, st_node, [('out_file', 'in_files')]), (self.input_node, st_node, [('time_repetition', 'time_repetition')]), (self.input_node, st_node, [('num_slices', 'num_slices')]), (self.input_node, st_node, [('slice_order', 'slice_order')]), (self.input_node, st_node, [('ref_slice', 'ref_slice')]), (self.input_node, st_node, [('time_acquisition', 'time_acquisition')]), (mc_node, reg_node, [('mean_image', 'source')]), (unzip_T1w, norm_node, [('out_file', 'image_to_align')]), (reg_node, norm_node, [('coregistered_files', 'apply_to_files')]), (norm_node, smooth_node, [('normalized_files', 'in_files')]), (reg_node, zip_reg_node, [('coregistered_files', 'in_file')]), (norm_node, zip_norm_node, [('normalized_files', 'in_file')]), (smooth_node, zip_smooth_node, [('smoothed_files', 'in_file')]), (zip_bet_node, self.output_node, [('out_file', 't1_brain_mask')]), (mc_node, self.output_node, [('realignment_parameters', 'mc_params')]), (zip_mc_node, self.output_node, [('out_file', 'native_fmri')]), (zip_reg_node, self.output_node, [('out_file', 't1_fmri')]), (zip_norm_node, self.output_node, [('out_file', 'mni_fmri')]), (zip_smooth_node, self.output_node, [('out_file', 'mni_smoothed_fmri')])]) | Build and connect the core nodes of the pipelines. | pipelines/fmri_preprocessing/fmri_preprocessing_pipeline.py | build_core_nodes | aramis-lab/clinica_pipeline_fmri_preprocessing | 1 | python | def build_core_nodes(self):
'\n '
import fmri_preprocessing_workflows as utils
import nipype.interfaces.utility as nutil
import nipype.interfaces.spm as spm
import nipype.pipeline.engine as npe
from clinica.utils.filemanip import zip_nii, unzip_nii
unzip_node = npe.MapNode(name='Unzipping', iterfield=['in_file'], interface=nutil.Function(input_names=['in_file'], output_names=['out_file'], function=unzip_nii))
unzip_T1w = unzip_node.clone('UnzippingT1w')
unzip_phasediff = unzip_node.clone('UnzippingPhasediff')
unzip_bold = unzip_node.clone('UnzippingBold')
unzip_magnitude1 = unzip_node.clone('UnzippingMagnitude1')
if self.parameters['unwarping']:
fm_node = npe.MapNode(name='FieldMapCalculation', iterfield=['phase', 'magnitude', 'epi', 'et', 'blipdir', 'tert'], interface=spm.FieldMap())
st_node = npe.MapNode(name='SliceTimingCorrection', iterfield=['in_files', 'time_repetition', 'slice_order', 'num_slices', 'ref_slice', 'time_acquisition'], interface=spm.SliceTiming())
if self.parameters['unwarping']:
mc_node = npe.MapNode(name='MotionCorrectionUnwarping', iterfield=['scans', 'pmscan'], interface=spm.RealignUnwarp())
mc_node.inputs.register_to_mean = True
mc_node.inputs.reslice_mask = False
else:
mc_node = npe.MapNode(name='MotionCorrection', iterfield=['in_files'], interface=spm.Realign())
mc_node.inputs.register_to_mean = True
import os.path as path
from nipype.interfaces.freesurfer import MRIConvert
if self.parameters['freesurfer_brain_mask']:
brain_masks = [path.join(self.caps_directory, 'subjects', self.subjects[i], self.sessions[i], 't1/freesurfer_cross_sectional', ((self.subjects[i] + '_') + self.sessions[i]), 'mri/brain.mgz') for i in range(len(self.subjects))]
conv_brain_masks = [str((((self.subjects[i] + '_') + self.sessions[i]) + '.nii')) for i in range(len(self.subjects))]
bet_node = npe.MapNode(interface=MRIConvert(), iterfield=['in_file', 'out_file'], name='BrainConversion')
bet_node.inputs.in_file = brain_masks
bet_node.inputs.out_file = conv_brain_masks
bet_node.inputs.out_type = 'nii'
else:
bet_node = utils.BrainExtractionWorkflow(name='BrainExtraction')
reg_node = npe.MapNode(interface=spm.Coregister(), iterfield=['apply_to_files', 'source', 'target'], name='Registration')
norm_node = npe.MapNode(interface=spm.Normalize12(), iterfield=['image_to_align', 'apply_to_files'], name='Normalization')
smooth_node = npe.MapNode(interface=spm.Smooth(), iterfield=['in_files'], name='Smoothing')
smooth_node.inputs.fwhm = self.parameters['full_width_at_half_maximum']
zip_node = npe.MapNode(name='Zipping', iterfield=['in_file'], interface=nutil.Function(input_names=['in_file'], output_names=['out_file'], function=zip_nii))
zip_bet_node = zip_node.clone('ZippingBET')
zip_mc_node = zip_node.clone('ZippingMC')
zip_reg_node = zip_node.clone('ZippingRegistration')
zip_norm_node = zip_node.clone('ZippingNormalization')
zip_smooth_node = zip_node.clone('ZippingSmoothing')
if self.parameters['freesurfer_brain_mask']:
self.connect([(bet_node, reg_node, [('out_file', 'target')]), (bet_node, zip_bet_node, [('out_file', 'in_file')])])
else:
self.connect([(unzip_T1w, bet_node, [('out_file', 'Segmentation.data')]), (unzip_T1w, bet_node, [('out_file', 'ApplyMask.in_file')]), (bet_node, reg_node, [('ApplyMask.out_file', 'target')]), (bet_node, zip_bet_node, [('Fill.out_file', 'in_file')])])
if self.parameters['unwarping']:
self.connect([(self.input_node, fm_node, [('et', 'et')]), (self.input_node, fm_node, [('blipdir', 'blipdir')]), (self.input_node, fm_node, [('tert', 'tert')]), (self.input_node, unzip_phasediff, [('phasediff', 'in_file')]), (self.input_node, unzip_magnitude1, [('magnitude1', 'in_file')]), (unzip_magnitude1, fm_node, [('out_file', 'magnitude')]), (unzip_phasediff, fm_node, [('out_file', 'phase')]), (unzip_bold, fm_node, [('out_file', 'epi')]), (st_node, mc_node, [('timecorrected_files', 'scans')]), (fm_node, mc_node, [('vdm', 'pmscan')]), (mc_node, reg_node, [('realigned_unwarped_files', 'apply_to_files')]), (mc_node, zip_mc_node, [('realigned_unwarped_files', 'in_file')])])
else:
self.connect([(st_node, mc_node, [('timecorrected_files', 'in_files')]), (mc_node, reg_node, [('realigned_files', 'apply_to_files')]), (mc_node, zip_mc_node, [('realigned_files', 'in_file')])])
self.connect([(self.input_node, unzip_T1w, [('T1w', 'in_file')]), (self.input_node, unzip_bold, [('bold', 'in_file')]), (unzip_bold, st_node, [('out_file', 'in_files')]), (self.input_node, st_node, [('time_repetition', 'time_repetition')]), (self.input_node, st_node, [('num_slices', 'num_slices')]), (self.input_node, st_node, [('slice_order', 'slice_order')]), (self.input_node, st_node, [('ref_slice', 'ref_slice')]), (self.input_node, st_node, [('time_acquisition', 'time_acquisition')]), (mc_node, reg_node, [('mean_image', 'source')]), (unzip_T1w, norm_node, [('out_file', 'image_to_align')]), (reg_node, norm_node, [('coregistered_files', 'apply_to_files')]), (norm_node, smooth_node, [('normalized_files', 'in_files')]), (reg_node, zip_reg_node, [('coregistered_files', 'in_file')]), (norm_node, zip_norm_node, [('normalized_files', 'in_file')]), (smooth_node, zip_smooth_node, [('smoothed_files', 'in_file')]), (zip_bet_node, self.output_node, [('out_file', 't1_brain_mask')]), (mc_node, self.output_node, [('realignment_parameters', 'mc_params')]), (zip_mc_node, self.output_node, [('out_file', 'native_fmri')]), (zip_reg_node, self.output_node, [('out_file', 't1_fmri')]), (zip_norm_node, self.output_node, [('out_file', 'mni_fmri')]), (zip_smooth_node, self.output_node, [('out_file', 'mni_smoothed_fmri')])]) | def build_core_nodes(self):
'\n '
import fmri_preprocessing_workflows as utils
import nipype.interfaces.utility as nutil
import nipype.interfaces.spm as spm
import nipype.pipeline.engine as npe
from clinica.utils.filemanip import zip_nii, unzip_nii
unzip_node = npe.MapNode(name='Unzipping', iterfield=['in_file'], interface=nutil.Function(input_names=['in_file'], output_names=['out_file'], function=unzip_nii))
unzip_T1w = unzip_node.clone('UnzippingT1w')
unzip_phasediff = unzip_node.clone('UnzippingPhasediff')
unzip_bold = unzip_node.clone('UnzippingBold')
unzip_magnitude1 = unzip_node.clone('UnzippingMagnitude1')
if self.parameters['unwarping']:
fm_node = npe.MapNode(name='FieldMapCalculation', iterfield=['phase', 'magnitude', 'epi', 'et', 'blipdir', 'tert'], interface=spm.FieldMap())
st_node = npe.MapNode(name='SliceTimingCorrection', iterfield=['in_files', 'time_repetition', 'slice_order', 'num_slices', 'ref_slice', 'time_acquisition'], interface=spm.SliceTiming())
if self.parameters['unwarping']:
mc_node = npe.MapNode(name='MotionCorrectionUnwarping', iterfield=['scans', 'pmscan'], interface=spm.RealignUnwarp())
mc_node.inputs.register_to_mean = True
mc_node.inputs.reslice_mask = False
else:
mc_node = npe.MapNode(name='MotionCorrection', iterfield=['in_files'], interface=spm.Realign())
mc_node.inputs.register_to_mean = True
import os.path as path
from nipype.interfaces.freesurfer import MRIConvert
if self.parameters['freesurfer_brain_mask']:
brain_masks = [path.join(self.caps_directory, 'subjects', self.subjects[i], self.sessions[i], 't1/freesurfer_cross_sectional', ((self.subjects[i] + '_') + self.sessions[i]), 'mri/brain.mgz') for i in range(len(self.subjects))]
conv_brain_masks = [str((((self.subjects[i] + '_') + self.sessions[i]) + '.nii')) for i in range(len(self.subjects))]
bet_node = npe.MapNode(interface=MRIConvert(), iterfield=['in_file', 'out_file'], name='BrainConversion')
bet_node.inputs.in_file = brain_masks
bet_node.inputs.out_file = conv_brain_masks
bet_node.inputs.out_type = 'nii'
else:
bet_node = utils.BrainExtractionWorkflow(name='BrainExtraction')
reg_node = npe.MapNode(interface=spm.Coregister(), iterfield=['apply_to_files', 'source', 'target'], name='Registration')
norm_node = npe.MapNode(interface=spm.Normalize12(), iterfield=['image_to_align', 'apply_to_files'], name='Normalization')
smooth_node = npe.MapNode(interface=spm.Smooth(), iterfield=['in_files'], name='Smoothing')
smooth_node.inputs.fwhm = self.parameters['full_width_at_half_maximum']
zip_node = npe.MapNode(name='Zipping', iterfield=['in_file'], interface=nutil.Function(input_names=['in_file'], output_names=['out_file'], function=zip_nii))
zip_bet_node = zip_node.clone('ZippingBET')
zip_mc_node = zip_node.clone('ZippingMC')
zip_reg_node = zip_node.clone('ZippingRegistration')
zip_norm_node = zip_node.clone('ZippingNormalization')
zip_smooth_node = zip_node.clone('ZippingSmoothing')
if self.parameters['freesurfer_brain_mask']:
self.connect([(bet_node, reg_node, [('out_file', 'target')]), (bet_node, zip_bet_node, [('out_file', 'in_file')])])
else:
self.connect([(unzip_T1w, bet_node, [('out_file', 'Segmentation.data')]), (unzip_T1w, bet_node, [('out_file', 'ApplyMask.in_file')]), (bet_node, reg_node, [('ApplyMask.out_file', 'target')]), (bet_node, zip_bet_node, [('Fill.out_file', 'in_file')])])
if self.parameters['unwarping']:
self.connect([(self.input_node, fm_node, [('et', 'et')]), (self.input_node, fm_node, [('blipdir', 'blipdir')]), (self.input_node, fm_node, [('tert', 'tert')]), (self.input_node, unzip_phasediff, [('phasediff', 'in_file')]), (self.input_node, unzip_magnitude1, [('magnitude1', 'in_file')]), (unzip_magnitude1, fm_node, [('out_file', 'magnitude')]), (unzip_phasediff, fm_node, [('out_file', 'phase')]), (unzip_bold, fm_node, [('out_file', 'epi')]), (st_node, mc_node, [('timecorrected_files', 'scans')]), (fm_node, mc_node, [('vdm', 'pmscan')]), (mc_node, reg_node, [('realigned_unwarped_files', 'apply_to_files')]), (mc_node, zip_mc_node, [('realigned_unwarped_files', 'in_file')])])
else:
self.connect([(st_node, mc_node, [('timecorrected_files', 'in_files')]), (mc_node, reg_node, [('realigned_files', 'apply_to_files')]), (mc_node, zip_mc_node, [('realigned_files', 'in_file')])])
self.connect([(self.input_node, unzip_T1w, [('T1w', 'in_file')]), (self.input_node, unzip_bold, [('bold', 'in_file')]), (unzip_bold, st_node, [('out_file', 'in_files')]), (self.input_node, st_node, [('time_repetition', 'time_repetition')]), (self.input_node, st_node, [('num_slices', 'num_slices')]), (self.input_node, st_node, [('slice_order', 'slice_order')]), (self.input_node, st_node, [('ref_slice', 'ref_slice')]), (self.input_node, st_node, [('time_acquisition', 'time_acquisition')]), (mc_node, reg_node, [('mean_image', 'source')]), (unzip_T1w, norm_node, [('out_file', 'image_to_align')]), (reg_node, norm_node, [('coregistered_files', 'apply_to_files')]), (norm_node, smooth_node, [('normalized_files', 'in_files')]), (reg_node, zip_reg_node, [('coregistered_files', 'in_file')]), (norm_node, zip_norm_node, [('normalized_files', 'in_file')]), (smooth_node, zip_smooth_node, [('smoothed_files', 'in_file')]), (zip_bet_node, self.output_node, [('out_file', 't1_brain_mask')]), (mc_node, self.output_node, [('realignment_parameters', 'mc_params')]), (zip_mc_node, self.output_node, [('out_file', 'native_fmri')]), (zip_reg_node, self.output_node, [('out_file', 't1_fmri')]), (zip_norm_node, self.output_node, [('out_file', 'mni_fmri')]), (zip_smooth_node, self.output_node, [('out_file', 'mni_smoothed_fmri')])])<|docstring|>Build and connect the core nodes of the pipelines.<|endoftext|> |
8a99eb5608796a58569716c3a1f4e27a0d05ecb38bae5dd4d86515ad0cced9d8 | def check_metadata(layer_name, neuron_indices, ideal_activation):
'Checks metadata for errors.\n\n :param layer_name: See doc for `get_saliency_one_neuron`.\n :param neuron_indices: Same.\n :param ideal_activation: Same.\n '
error_checking.assert_is_string(layer_name)
error_checking.assert_is_integer_numpy_array(neuron_indices)
error_checking.assert_is_geq_numpy_array(neuron_indices, 0)
error_checking.assert_is_numpy_array(neuron_indices, num_dimensions=1)
error_checking.assert_is_not_nan(ideal_activation) | Checks metadata for errors.
:param layer_name: See doc for `get_saliency_one_neuron`.
:param neuron_indices: Same.
:param ideal_activation: Same. | ml4tc/machine_learning/saliency.py | check_metadata | thunderhoser/ml4tc | 2 | python | def check_metadata(layer_name, neuron_indices, ideal_activation):
'Checks metadata for errors.\n\n :param layer_name: See doc for `get_saliency_one_neuron`.\n :param neuron_indices: Same.\n :param ideal_activation: Same.\n '
error_checking.assert_is_string(layer_name)
error_checking.assert_is_integer_numpy_array(neuron_indices)
error_checking.assert_is_geq_numpy_array(neuron_indices, 0)
error_checking.assert_is_numpy_array(neuron_indices, num_dimensions=1)
error_checking.assert_is_not_nan(ideal_activation) | def check_metadata(layer_name, neuron_indices, ideal_activation):
'Checks metadata for errors.\n\n :param layer_name: See doc for `get_saliency_one_neuron`.\n :param neuron_indices: Same.\n :param ideal_activation: Same.\n '
error_checking.assert_is_string(layer_name)
error_checking.assert_is_integer_numpy_array(neuron_indices)
error_checking.assert_is_geq_numpy_array(neuron_indices, 0)
error_checking.assert_is_numpy_array(neuron_indices, num_dimensions=1)
error_checking.assert_is_not_nan(ideal_activation)<|docstring|>Checks metadata for errors.
:param layer_name: See doc for `get_saliency_one_neuron`.
:param neuron_indices: Same.
:param ideal_activation: Same.<|endoftext|> |
bfef33a2a7623f0e87ba9ed2516c0689b93ab42e962746ed2b435eb8e464cf41 | def get_saliency_one_neuron(model_object, three_predictor_matrices, layer_name, neuron_indices, ideal_activation):
'Computes saliency maps with respect to activation of one neuron.\n\n The "relevant neuron" is that whose activation will be used in the numerator\n of the saliency equation. In other words, if the relevant neuron is n,\n the saliency of each predictor x will be d(a_n) / dx, where a_n is the\n activation of n.\n\n :param model_object: Trained neural net (instance of `keras.models.Model` or\n `keras.models.Sequential`).\n :param three_predictor_matrices: length-3 list, where each element is either\n None or a numpy array of predictors. Predictors must be formatted in\n the same way as for training.\n :param layer_name: Name of layer with relevant neuron.\n :param neuron_indices: 1-D numpy array with indices of relevant neuron.\n Must have length D - 1, where D = number of dimensions in layer output.\n The first dimension is the batch dimension, which always has length\n `None` in Keras.\n :param ideal_activation: Ideal neuron activation, used to define loss\n function. The loss function will be\n (neuron_activation - ideal_activation)**2.\n :return: three_saliency_matrices: length-3 list, where each element is\n either None or a numpy array of saliency values.\n three_saliency_matrices[i] will have the same shape as\n three_predictor_matrices[i].\n '
check_metadata(layer_name=layer_name, neuron_indices=neuron_indices, ideal_activation=ideal_activation)
error_checking.assert_is_list(three_predictor_matrices)
assert (len(three_predictor_matrices) == 3)
for this_predictor_matrix in three_predictor_matrices:
if (this_predictor_matrix is None):
continue
error_checking.assert_is_numpy_array_without_nan(this_predictor_matrix)
these_flags = numpy.array([(m is not None) for m in three_predictor_matrices], dtype=bool)
have_predictors_indices = numpy.where(these_flags)[0]
activation_tensor = None
for k in neuron_indices[::(- 1)]:
if (activation_tensor is None):
activation_tensor = model_object.get_layer(name=layer_name).output[(..., k)]
else:
activation_tensor = activation_tensor[(..., k)]
loss_tensor = ((activation_tensor - ideal_activation) ** 2)
num_examples = three_predictor_matrices[have_predictors_indices[0]].shape[0]
saliency_matrices = ([None] * len(have_predictors_indices))
for i in range(0, num_examples, NUM_EXAMPLES_PER_BATCH):
first_index = i
last_index = min([(i + NUM_EXAMPLES_PER_BATCH), num_examples])
these_matrices = saliency_utils.do_saliency_calculations(model_object=model_object, loss_tensor=loss_tensor, list_of_input_matrices=[three_predictor_matrices[k][(first_index:last_index, ...)] for k in have_predictors_indices])
for j in range(len(have_predictors_indices)):
if (saliency_matrices[j] is None):
these_dim = numpy.array(((num_examples,) + these_matrices[j].shape[1:]), dtype=int)
saliency_matrices[j] = numpy.full(these_dim, numpy.nan)
saliency_matrices[j][(first_index:last_index, ...)] = these_matrices[j]
three_saliency_matrices = ([None] * 3)
for (i, j) in enumerate(have_predictors_indices):
three_saliency_matrices[j] = saliency_matrices[i]
return three_saliency_matrices | Computes saliency maps with respect to activation of one neuron.
The "relevant neuron" is that whose activation will be used in the numerator
of the saliency equation. In other words, if the relevant neuron is n,
the saliency of each predictor x will be d(a_n) / dx, where a_n is the
activation of n.
:param model_object: Trained neural net (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param three_predictor_matrices: length-3 list, where each element is either
None or a numpy array of predictors. Predictors must be formatted in
the same way as for training.
:param layer_name: Name of layer with relevant neuron.
:param neuron_indices: 1-D numpy array with indices of relevant neuron.
Must have length D - 1, where D = number of dimensions in layer output.
The first dimension is the batch dimension, which always has length
`None` in Keras.
:param ideal_activation: Ideal neuron activation, used to define loss
function. The loss function will be
(neuron_activation - ideal_activation)**2.
:return: three_saliency_matrices: length-3 list, where each element is
either None or a numpy array of saliency values.
three_saliency_matrices[i] will have the same shape as
three_predictor_matrices[i]. | ml4tc/machine_learning/saliency.py | get_saliency_one_neuron | thunderhoser/ml4tc | 2 | python | def get_saliency_one_neuron(model_object, three_predictor_matrices, layer_name, neuron_indices, ideal_activation):
'Computes saliency maps with respect to activation of one neuron.\n\n The "relevant neuron" is that whose activation will be used in the numerator\n of the saliency equation. In other words, if the relevant neuron is n,\n the saliency of each predictor x will be d(a_n) / dx, where a_n is the\n activation of n.\n\n :param model_object: Trained neural net (instance of `keras.models.Model` or\n `keras.models.Sequential`).\n :param three_predictor_matrices: length-3 list, where each element is either\n None or a numpy array of predictors. Predictors must be formatted in\n the same way as for training.\n :param layer_name: Name of layer with relevant neuron.\n :param neuron_indices: 1-D numpy array with indices of relevant neuron.\n Must have length D - 1, where D = number of dimensions in layer output.\n The first dimension is the batch dimension, which always has length\n `None` in Keras.\n :param ideal_activation: Ideal neuron activation, used to define loss\n function. The loss function will be\n (neuron_activation - ideal_activation)**2.\n :return: three_saliency_matrices: length-3 list, where each element is\n either None or a numpy array of saliency values.\n three_saliency_matrices[i] will have the same shape as\n three_predictor_matrices[i].\n '
check_metadata(layer_name=layer_name, neuron_indices=neuron_indices, ideal_activation=ideal_activation)
error_checking.assert_is_list(three_predictor_matrices)
assert (len(three_predictor_matrices) == 3)
for this_predictor_matrix in three_predictor_matrices:
if (this_predictor_matrix is None):
continue
error_checking.assert_is_numpy_array_without_nan(this_predictor_matrix)
these_flags = numpy.array([(m is not None) for m in three_predictor_matrices], dtype=bool)
have_predictors_indices = numpy.where(these_flags)[0]
activation_tensor = None
for k in neuron_indices[::(- 1)]:
if (activation_tensor is None):
activation_tensor = model_object.get_layer(name=layer_name).output[(..., k)]
else:
activation_tensor = activation_tensor[(..., k)]
loss_tensor = ((activation_tensor - ideal_activation) ** 2)
num_examples = three_predictor_matrices[have_predictors_indices[0]].shape[0]
saliency_matrices = ([None] * len(have_predictors_indices))
for i in range(0, num_examples, NUM_EXAMPLES_PER_BATCH):
first_index = i
last_index = min([(i + NUM_EXAMPLES_PER_BATCH), num_examples])
these_matrices = saliency_utils.do_saliency_calculations(model_object=model_object, loss_tensor=loss_tensor, list_of_input_matrices=[three_predictor_matrices[k][(first_index:last_index, ...)] for k in have_predictors_indices])
for j in range(len(have_predictors_indices)):
if (saliency_matrices[j] is None):
these_dim = numpy.array(((num_examples,) + these_matrices[j].shape[1:]), dtype=int)
saliency_matrices[j] = numpy.full(these_dim, numpy.nan)
saliency_matrices[j][(first_index:last_index, ...)] = these_matrices[j]
three_saliency_matrices = ([None] * 3)
for (i, j) in enumerate(have_predictors_indices):
three_saliency_matrices[j] = saliency_matrices[i]
return three_saliency_matrices | def get_saliency_one_neuron(model_object, three_predictor_matrices, layer_name, neuron_indices, ideal_activation):
'Computes saliency maps with respect to activation of one neuron.\n\n The "relevant neuron" is that whose activation will be used in the numerator\n of the saliency equation. In other words, if the relevant neuron is n,\n the saliency of each predictor x will be d(a_n) / dx, where a_n is the\n activation of n.\n\n :param model_object: Trained neural net (instance of `keras.models.Model` or\n `keras.models.Sequential`).\n :param three_predictor_matrices: length-3 list, where each element is either\n None or a numpy array of predictors. Predictors must be formatted in\n the same way as for training.\n :param layer_name: Name of layer with relevant neuron.\n :param neuron_indices: 1-D numpy array with indices of relevant neuron.\n Must have length D - 1, where D = number of dimensions in layer output.\n The first dimension is the batch dimension, which always has length\n `None` in Keras.\n :param ideal_activation: Ideal neuron activation, used to define loss\n function. The loss function will be\n (neuron_activation - ideal_activation)**2.\n :return: three_saliency_matrices: length-3 list, where each element is\n either None or a numpy array of saliency values.\n three_saliency_matrices[i] will have the same shape as\n three_predictor_matrices[i].\n '
check_metadata(layer_name=layer_name, neuron_indices=neuron_indices, ideal_activation=ideal_activation)
error_checking.assert_is_list(three_predictor_matrices)
assert (len(three_predictor_matrices) == 3)
for this_predictor_matrix in three_predictor_matrices:
if (this_predictor_matrix is None):
continue
error_checking.assert_is_numpy_array_without_nan(this_predictor_matrix)
these_flags = numpy.array([(m is not None) for m in three_predictor_matrices], dtype=bool)
have_predictors_indices = numpy.where(these_flags)[0]
activation_tensor = None
for k in neuron_indices[::(- 1)]:
if (activation_tensor is None):
activation_tensor = model_object.get_layer(name=layer_name).output[(..., k)]
else:
activation_tensor = activation_tensor[(..., k)]
loss_tensor = ((activation_tensor - ideal_activation) ** 2)
num_examples = three_predictor_matrices[have_predictors_indices[0]].shape[0]
saliency_matrices = ([None] * len(have_predictors_indices))
for i in range(0, num_examples, NUM_EXAMPLES_PER_BATCH):
first_index = i
last_index = min([(i + NUM_EXAMPLES_PER_BATCH), num_examples])
these_matrices = saliency_utils.do_saliency_calculations(model_object=model_object, loss_tensor=loss_tensor, list_of_input_matrices=[three_predictor_matrices[k][(first_index:last_index, ...)] for k in have_predictors_indices])
for j in range(len(have_predictors_indices)):
if (saliency_matrices[j] is None):
these_dim = numpy.array(((num_examples,) + these_matrices[j].shape[1:]), dtype=int)
saliency_matrices[j] = numpy.full(these_dim, numpy.nan)
saliency_matrices[j][(first_index:last_index, ...)] = these_matrices[j]
three_saliency_matrices = ([None] * 3)
for (i, j) in enumerate(have_predictors_indices):
three_saliency_matrices[j] = saliency_matrices[i]
return three_saliency_matrices<|docstring|>Computes saliency maps with respect to activation of one neuron.
The "relevant neuron" is that whose activation will be used in the numerator
of the saliency equation. In other words, if the relevant neuron is n,
the saliency of each predictor x will be d(a_n) / dx, where a_n is the
activation of n.
:param model_object: Trained neural net (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param three_predictor_matrices: length-3 list, where each element is either
None or a numpy array of predictors. Predictors must be formatted in
the same way as for training.
:param layer_name: Name of layer with relevant neuron.
:param neuron_indices: 1-D numpy array with indices of relevant neuron.
Must have length D - 1, where D = number of dimensions in layer output.
The first dimension is the batch dimension, which always has length
`None` in Keras.
:param ideal_activation: Ideal neuron activation, used to define loss
function. The loss function will be
(neuron_activation - ideal_activation)**2.
:return: three_saliency_matrices: length-3 list, where each element is
either None or a numpy array of saliency values.
three_saliency_matrices[i] will have the same shape as
three_predictor_matrices[i].<|endoftext|> |
6a4eab573131d9f4234103ca3cf0e87f2feaa7d22eab281eef9f182fe4161314 | def write_composite_file(netcdf_file_name, three_saliency_matrices, three_input_grad_matrices, three_predictor_matrices, model_file_name, use_pmm, pmm_max_percentile_level=None):
'Writes composite saliency map to NetCDF file.\n\n :param netcdf_file_name: Path to output file.\n :param three_saliency_matrices: length-3 list, where each element is either\n None or a numpy array of saliency values. three_saliency_matrices[i]\n should have the same shape as the [i]th input tensor to the model, but\n without the first axis, which is the example axis.\n :param three_input_grad_matrices: Same as `three_saliency_matrices` but with\n input-times-gradient values instead.\n :param three_predictor_matrices: Same as `three_saliency_matrices` but with\n predictor values instead. Predictor values must be formatted the same\n way as for training, e.g., normalized here if they are normalized for\n training.\n :param model_file_name: Path to file with neural net used to create saliency\n maps (readable by `neural_net.read_model`).\n :param use_pmm: Boolean flag. If True (False), maps were composited via\n probability-matched means (a simple average).\n :param pmm_max_percentile_level: Max percentile level for\n probability-matched means (PMM). If PMM was not used, leave this alone.\n '
error_checking.assert_is_list(three_saliency_matrices)
error_checking.assert_is_list(three_input_grad_matrices)
error_checking.assert_is_list(three_predictor_matrices)
assert (len(three_saliency_matrices) == 3)
assert (len(three_input_grad_matrices) == 3)
assert (len(three_predictor_matrices) == 3)
for i in range(len(three_saliency_matrices)):
if (three_saliency_matrices[i] is None):
assert (three_input_grad_matrices[i] is None)
assert (three_predictor_matrices[i] is None)
continue
error_checking.assert_is_numpy_array_without_nan(three_saliency_matrices[i])
error_checking.assert_is_numpy_array_without_nan(three_input_grad_matrices[i])
error_checking.assert_is_numpy_array_without_nan(three_predictor_matrices[i])
expected_dim = numpy.array(three_saliency_matrices[i].shape, dtype=int)
error_checking.assert_is_numpy_array(three_saliency_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_numpy_array(three_input_grad_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_numpy_array(three_predictor_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_string(model_file_name)
error_checking.assert_is_boolean(use_pmm)
if use_pmm:
error_checking.assert_is_geq(pmm_max_percentile_level, 90.0)
error_checking.assert_is_leq(pmm_max_percentile_level, 100.0)
else:
pmm_max_percentile_level = (- 1.0)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
dataset_object = netCDF4.Dataset(netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
dataset_object.setncattr(MODEL_FILE_KEY, model_file_name)
dataset_object.setncattr(USE_PMM_KEY, int(use_pmm))
dataset_object.setncattr(PMM_MAX_PERCENTILE_KEY, pmm_max_percentile_level)
num_satellite_lag_times = None
if (three_saliency_matrices[0] is not None):
num_grid_rows = three_saliency_matrices[0].shape[0]
num_grid_columns = three_saliency_matrices[0].shape[1]
num_satellite_lag_times = three_saliency_matrices[0].shape[2]
num_gridded_satellite_channels = three_saliency_matrices[0].shape[3]
dataset_object.createDimension(GRID_ROW_DIMENSION_KEY, num_grid_rows)
dataset_object.createDimension(GRID_COLUMN_DIMENSION_KEY, num_grid_columns)
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
dataset_object.createDimension(GRIDDED_SATELLITE_CHANNEL_KEY, num_gridded_satellite_channels)
these_dim = (GRID_ROW_DIMENSION_KEY, GRID_COLUMN_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY, GRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(GRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[0]
dataset_object.createVariable(GRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[0]
dataset_object.createVariable(GRIDDED_SATELLITE_PREDICTORS_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_PREDICTORS_KEY][:] = three_predictor_matrices[0]
if (three_saliency_matrices[1] is not None):
if (num_satellite_lag_times is None):
num_satellite_lag_times = three_saliency_matrices[1].shape[0]
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
else:
assert (num_satellite_lag_times == three_saliency_matrices[1].shape[0])
num_ungridded_satellite_channels = three_saliency_matrices[1].shape[1]
dataset_object.createDimension(UNGRIDDED_SATELLITE_CHANNEL_KEY, num_ungridded_satellite_channels)
these_dim = (SATELLITE_LAG_TIME_KEY, UNGRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(UNGRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[1]
dataset_object.createVariable(UNGRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[1]
dataset_object.createVariable(UNGRIDDED_SATELLITE_PREDICTORS_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_PREDICTORS_KEY][:] = three_predictor_matrices[1]
if (three_saliency_matrices[2] is not None):
num_ships_lag_times = three_saliency_matrices[2].shape[0]
num_ships_channels = three_saliency_matrices[2].shape[1]
dataset_object.createDimension(SHIPS_LAG_TIME_KEY, num_ships_lag_times)
dataset_object.createDimension(SHIPS_CHANNEL_KEY, num_ships_channels)
these_dim = (SHIPS_LAG_TIME_KEY, SHIPS_CHANNEL_KEY)
dataset_object.createVariable(SHIPS_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_SALIENCY_KEY][:] = three_saliency_matrices[2]
dataset_object.createVariable(SHIPS_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:] = three_input_grad_matrices[2]
dataset_object.createVariable(SHIPS_PREDICTORS_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_PREDICTORS_KEY][:] = three_predictor_matrices[2]
dataset_object.close() | Writes composite saliency map to NetCDF file.
:param netcdf_file_name: Path to output file.
:param three_saliency_matrices: length-3 list, where each element is either
None or a numpy array of saliency values. three_saliency_matrices[i]
should have the same shape as the [i]th input tensor to the model, but
without the first axis, which is the example axis.
:param three_input_grad_matrices: Same as `three_saliency_matrices` but with
input-times-gradient values instead.
:param three_predictor_matrices: Same as `three_saliency_matrices` but with
predictor values instead. Predictor values must be formatted the same
way as for training, e.g., normalized here if they are normalized for
training.
:param model_file_name: Path to file with neural net used to create saliency
maps (readable by `neural_net.read_model`).
:param use_pmm: Boolean flag. If True (False), maps were composited via
probability-matched means (a simple average).
:param pmm_max_percentile_level: Max percentile level for
probability-matched means (PMM). If PMM was not used, leave this alone. | ml4tc/machine_learning/saliency.py | write_composite_file | thunderhoser/ml4tc | 2 | python | def write_composite_file(netcdf_file_name, three_saliency_matrices, three_input_grad_matrices, three_predictor_matrices, model_file_name, use_pmm, pmm_max_percentile_level=None):
'Writes composite saliency map to NetCDF file.\n\n :param netcdf_file_name: Path to output file.\n :param three_saliency_matrices: length-3 list, where each element is either\n None or a numpy array of saliency values. three_saliency_matrices[i]\n should have the same shape as the [i]th input tensor to the model, but\n without the first axis, which is the example axis.\n :param three_input_grad_matrices: Same as `three_saliency_matrices` but with\n input-times-gradient values instead.\n :param three_predictor_matrices: Same as `three_saliency_matrices` but with\n predictor values instead. Predictor values must be formatted the same\n way as for training, e.g., normalized here if they are normalized for\n training.\n :param model_file_name: Path to file with neural net used to create saliency\n maps (readable by `neural_net.read_model`).\n :param use_pmm: Boolean flag. If True (False), maps were composited via\n probability-matched means (a simple average).\n :param pmm_max_percentile_level: Max percentile level for\n probability-matched means (PMM). If PMM was not used, leave this alone.\n '
error_checking.assert_is_list(three_saliency_matrices)
error_checking.assert_is_list(three_input_grad_matrices)
error_checking.assert_is_list(three_predictor_matrices)
assert (len(three_saliency_matrices) == 3)
assert (len(three_input_grad_matrices) == 3)
assert (len(three_predictor_matrices) == 3)
for i in range(len(three_saliency_matrices)):
if (three_saliency_matrices[i] is None):
assert (three_input_grad_matrices[i] is None)
assert (three_predictor_matrices[i] is None)
continue
error_checking.assert_is_numpy_array_without_nan(three_saliency_matrices[i])
error_checking.assert_is_numpy_array_without_nan(three_input_grad_matrices[i])
error_checking.assert_is_numpy_array_without_nan(three_predictor_matrices[i])
expected_dim = numpy.array(three_saliency_matrices[i].shape, dtype=int)
error_checking.assert_is_numpy_array(three_saliency_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_numpy_array(three_input_grad_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_numpy_array(three_predictor_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_string(model_file_name)
error_checking.assert_is_boolean(use_pmm)
if use_pmm:
error_checking.assert_is_geq(pmm_max_percentile_level, 90.0)
error_checking.assert_is_leq(pmm_max_percentile_level, 100.0)
else:
pmm_max_percentile_level = (- 1.0)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
dataset_object = netCDF4.Dataset(netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
dataset_object.setncattr(MODEL_FILE_KEY, model_file_name)
dataset_object.setncattr(USE_PMM_KEY, int(use_pmm))
dataset_object.setncattr(PMM_MAX_PERCENTILE_KEY, pmm_max_percentile_level)
num_satellite_lag_times = None
if (three_saliency_matrices[0] is not None):
num_grid_rows = three_saliency_matrices[0].shape[0]
num_grid_columns = three_saliency_matrices[0].shape[1]
num_satellite_lag_times = three_saliency_matrices[0].shape[2]
num_gridded_satellite_channels = three_saliency_matrices[0].shape[3]
dataset_object.createDimension(GRID_ROW_DIMENSION_KEY, num_grid_rows)
dataset_object.createDimension(GRID_COLUMN_DIMENSION_KEY, num_grid_columns)
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
dataset_object.createDimension(GRIDDED_SATELLITE_CHANNEL_KEY, num_gridded_satellite_channels)
these_dim = (GRID_ROW_DIMENSION_KEY, GRID_COLUMN_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY, GRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(GRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[0]
dataset_object.createVariable(GRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[0]
dataset_object.createVariable(GRIDDED_SATELLITE_PREDICTORS_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_PREDICTORS_KEY][:] = three_predictor_matrices[0]
if (three_saliency_matrices[1] is not None):
if (num_satellite_lag_times is None):
num_satellite_lag_times = three_saliency_matrices[1].shape[0]
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
else:
assert (num_satellite_lag_times == three_saliency_matrices[1].shape[0])
num_ungridded_satellite_channels = three_saliency_matrices[1].shape[1]
dataset_object.createDimension(UNGRIDDED_SATELLITE_CHANNEL_KEY, num_ungridded_satellite_channels)
these_dim = (SATELLITE_LAG_TIME_KEY, UNGRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(UNGRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[1]
dataset_object.createVariable(UNGRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[1]
dataset_object.createVariable(UNGRIDDED_SATELLITE_PREDICTORS_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_PREDICTORS_KEY][:] = three_predictor_matrices[1]
if (three_saliency_matrices[2] is not None):
num_ships_lag_times = three_saliency_matrices[2].shape[0]
num_ships_channels = three_saliency_matrices[2].shape[1]
dataset_object.createDimension(SHIPS_LAG_TIME_KEY, num_ships_lag_times)
dataset_object.createDimension(SHIPS_CHANNEL_KEY, num_ships_channels)
these_dim = (SHIPS_LAG_TIME_KEY, SHIPS_CHANNEL_KEY)
dataset_object.createVariable(SHIPS_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_SALIENCY_KEY][:] = three_saliency_matrices[2]
dataset_object.createVariable(SHIPS_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:] = three_input_grad_matrices[2]
dataset_object.createVariable(SHIPS_PREDICTORS_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_PREDICTORS_KEY][:] = three_predictor_matrices[2]
dataset_object.close() | def write_composite_file(netcdf_file_name, three_saliency_matrices, three_input_grad_matrices, three_predictor_matrices, model_file_name, use_pmm, pmm_max_percentile_level=None):
'Writes composite saliency map to NetCDF file.\n\n :param netcdf_file_name: Path to output file.\n :param three_saliency_matrices: length-3 list, where each element is either\n None or a numpy array of saliency values. three_saliency_matrices[i]\n should have the same shape as the [i]th input tensor to the model, but\n without the first axis, which is the example axis.\n :param three_input_grad_matrices: Same as `three_saliency_matrices` but with\n input-times-gradient values instead.\n :param three_predictor_matrices: Same as `three_saliency_matrices` but with\n predictor values instead. Predictor values must be formatted the same\n way as for training, e.g., normalized here if they are normalized for\n training.\n :param model_file_name: Path to file with neural net used to create saliency\n maps (readable by `neural_net.read_model`).\n :param use_pmm: Boolean flag. If True (False), maps were composited via\n probability-matched means (a simple average).\n :param pmm_max_percentile_level: Max percentile level for\n probability-matched means (PMM). If PMM was not used, leave this alone.\n '
error_checking.assert_is_list(three_saliency_matrices)
error_checking.assert_is_list(three_input_grad_matrices)
error_checking.assert_is_list(three_predictor_matrices)
assert (len(three_saliency_matrices) == 3)
assert (len(three_input_grad_matrices) == 3)
assert (len(three_predictor_matrices) == 3)
for i in range(len(three_saliency_matrices)):
if (three_saliency_matrices[i] is None):
assert (three_input_grad_matrices[i] is None)
assert (three_predictor_matrices[i] is None)
continue
error_checking.assert_is_numpy_array_without_nan(three_saliency_matrices[i])
error_checking.assert_is_numpy_array_without_nan(three_input_grad_matrices[i])
error_checking.assert_is_numpy_array_without_nan(three_predictor_matrices[i])
expected_dim = numpy.array(three_saliency_matrices[i].shape, dtype=int)
error_checking.assert_is_numpy_array(three_saliency_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_numpy_array(three_input_grad_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_numpy_array(three_predictor_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_string(model_file_name)
error_checking.assert_is_boolean(use_pmm)
if use_pmm:
error_checking.assert_is_geq(pmm_max_percentile_level, 90.0)
error_checking.assert_is_leq(pmm_max_percentile_level, 100.0)
else:
pmm_max_percentile_level = (- 1.0)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
dataset_object = netCDF4.Dataset(netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
dataset_object.setncattr(MODEL_FILE_KEY, model_file_name)
dataset_object.setncattr(USE_PMM_KEY, int(use_pmm))
dataset_object.setncattr(PMM_MAX_PERCENTILE_KEY, pmm_max_percentile_level)
num_satellite_lag_times = None
if (three_saliency_matrices[0] is not None):
num_grid_rows = three_saliency_matrices[0].shape[0]
num_grid_columns = three_saliency_matrices[0].shape[1]
num_satellite_lag_times = three_saliency_matrices[0].shape[2]
num_gridded_satellite_channels = three_saliency_matrices[0].shape[3]
dataset_object.createDimension(GRID_ROW_DIMENSION_KEY, num_grid_rows)
dataset_object.createDimension(GRID_COLUMN_DIMENSION_KEY, num_grid_columns)
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
dataset_object.createDimension(GRIDDED_SATELLITE_CHANNEL_KEY, num_gridded_satellite_channels)
these_dim = (GRID_ROW_DIMENSION_KEY, GRID_COLUMN_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY, GRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(GRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[0]
dataset_object.createVariable(GRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[0]
dataset_object.createVariable(GRIDDED_SATELLITE_PREDICTORS_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_PREDICTORS_KEY][:] = three_predictor_matrices[0]
if (three_saliency_matrices[1] is not None):
if (num_satellite_lag_times is None):
num_satellite_lag_times = three_saliency_matrices[1].shape[0]
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
else:
assert (num_satellite_lag_times == three_saliency_matrices[1].shape[0])
num_ungridded_satellite_channels = three_saliency_matrices[1].shape[1]
dataset_object.createDimension(UNGRIDDED_SATELLITE_CHANNEL_KEY, num_ungridded_satellite_channels)
these_dim = (SATELLITE_LAG_TIME_KEY, UNGRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(UNGRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[1]
dataset_object.createVariable(UNGRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[1]
dataset_object.createVariable(UNGRIDDED_SATELLITE_PREDICTORS_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_PREDICTORS_KEY][:] = three_predictor_matrices[1]
if (three_saliency_matrices[2] is not None):
num_ships_lag_times = three_saliency_matrices[2].shape[0]
num_ships_channels = three_saliency_matrices[2].shape[1]
dataset_object.createDimension(SHIPS_LAG_TIME_KEY, num_ships_lag_times)
dataset_object.createDimension(SHIPS_CHANNEL_KEY, num_ships_channels)
these_dim = (SHIPS_LAG_TIME_KEY, SHIPS_CHANNEL_KEY)
dataset_object.createVariable(SHIPS_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_SALIENCY_KEY][:] = three_saliency_matrices[2]
dataset_object.createVariable(SHIPS_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:] = three_input_grad_matrices[2]
dataset_object.createVariable(SHIPS_PREDICTORS_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_PREDICTORS_KEY][:] = three_predictor_matrices[2]
dataset_object.close()<|docstring|>Writes composite saliency map to NetCDF file.
:param netcdf_file_name: Path to output file.
:param three_saliency_matrices: length-3 list, where each element is either
None or a numpy array of saliency values. three_saliency_matrices[i]
should have the same shape as the [i]th input tensor to the model, but
without the first axis, which is the example axis.
:param three_input_grad_matrices: Same as `three_saliency_matrices` but with
input-times-gradient values instead.
:param three_predictor_matrices: Same as `three_saliency_matrices` but with
predictor values instead. Predictor values must be formatted the same
way as for training, e.g., normalized here if they are normalized for
training.
:param model_file_name: Path to file with neural net used to create saliency
maps (readable by `neural_net.read_model`).
:param use_pmm: Boolean flag. If True (False), maps were composited via
probability-matched means (a simple average).
:param pmm_max_percentile_level: Max percentile level for
probability-matched means (PMM). If PMM was not used, leave this alone.<|endoftext|> |
97ddf8c469e193e8224d38058694692752cf33609e2d7e615a4d350fe6168e5c | def read_composite_file(netcdf_file_name):
"Reads composite saliency map from NetCDF file.\n\n :param netcdf_file_name: Path to input file.\n :return: saliency_dict: Dictionary with the following keys.\n saliency_dict['three_saliency_matrices']: See doc for\n `write_composite_file`.\n saliency_dict['three_input_grad_matrices']: Same.\n saliency_dict['three_predictor_matrices']: Same.\n saliency_dict['model_file_name']: Same.\n saliency_dict['use_pmm']: Same.\n saliency_dict['pmm_max_percentile_level']: Same.\n "
dataset_object = netCDF4.Dataset(netcdf_file_name)
three_saliency_matrices = []
three_input_grad_matrices = []
three_predictor_matrices = []
if (GRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
three_predictor_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_PREDICTORS_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
three_predictor_matrices.append(None)
if (UNGRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
three_predictor_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_PREDICTORS_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
three_predictor_matrices.append(None)
if (SHIPS_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[SHIPS_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:])
three_predictor_matrices.append(dataset_object.variables[SHIPS_PREDICTORS_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
three_predictor_matrices.append(None)
saliency_dict = {THREE_SALIENCY_KEY: three_saliency_matrices, THREE_INPUT_GRAD_KEY: three_input_grad_matrices, THREE_PREDICTORS_KEY: three_predictor_matrices, MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)), USE_PMM_KEY: bool(getattr(dataset_object, USE_PMM_KEY)), PMM_MAX_PERCENTILE_KEY: float(getattr(dataset_object, PMM_MAX_PERCENTILE_KEY))}
dataset_object.close()
return saliency_dict | Reads composite saliency map from NetCDF file.
:param netcdf_file_name: Path to input file.
:return: saliency_dict: Dictionary with the following keys.
saliency_dict['three_saliency_matrices']: See doc for
`write_composite_file`.
saliency_dict['three_input_grad_matrices']: Same.
saliency_dict['three_predictor_matrices']: Same.
saliency_dict['model_file_name']: Same.
saliency_dict['use_pmm']: Same.
saliency_dict['pmm_max_percentile_level']: Same. | ml4tc/machine_learning/saliency.py | read_composite_file | thunderhoser/ml4tc | 2 | python | def read_composite_file(netcdf_file_name):
"Reads composite saliency map from NetCDF file.\n\n :param netcdf_file_name: Path to input file.\n :return: saliency_dict: Dictionary with the following keys.\n saliency_dict['three_saliency_matrices']: See doc for\n `write_composite_file`.\n saliency_dict['three_input_grad_matrices']: Same.\n saliency_dict['three_predictor_matrices']: Same.\n saliency_dict['model_file_name']: Same.\n saliency_dict['use_pmm']: Same.\n saliency_dict['pmm_max_percentile_level']: Same.\n "
dataset_object = netCDF4.Dataset(netcdf_file_name)
three_saliency_matrices = []
three_input_grad_matrices = []
three_predictor_matrices = []
if (GRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
three_predictor_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_PREDICTORS_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
three_predictor_matrices.append(None)
if (UNGRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
three_predictor_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_PREDICTORS_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
three_predictor_matrices.append(None)
if (SHIPS_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[SHIPS_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:])
three_predictor_matrices.append(dataset_object.variables[SHIPS_PREDICTORS_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
three_predictor_matrices.append(None)
saliency_dict = {THREE_SALIENCY_KEY: three_saliency_matrices, THREE_INPUT_GRAD_KEY: three_input_grad_matrices, THREE_PREDICTORS_KEY: three_predictor_matrices, MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)), USE_PMM_KEY: bool(getattr(dataset_object, USE_PMM_KEY)), PMM_MAX_PERCENTILE_KEY: float(getattr(dataset_object, PMM_MAX_PERCENTILE_KEY))}
dataset_object.close()
return saliency_dict | def read_composite_file(netcdf_file_name):
"Reads composite saliency map from NetCDF file.\n\n :param netcdf_file_name: Path to input file.\n :return: saliency_dict: Dictionary with the following keys.\n saliency_dict['three_saliency_matrices']: See doc for\n `write_composite_file`.\n saliency_dict['three_input_grad_matrices']: Same.\n saliency_dict['three_predictor_matrices']: Same.\n saliency_dict['model_file_name']: Same.\n saliency_dict['use_pmm']: Same.\n saliency_dict['pmm_max_percentile_level']: Same.\n "
dataset_object = netCDF4.Dataset(netcdf_file_name)
three_saliency_matrices = []
three_input_grad_matrices = []
three_predictor_matrices = []
if (GRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
three_predictor_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_PREDICTORS_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
three_predictor_matrices.append(None)
if (UNGRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
three_predictor_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_PREDICTORS_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
three_predictor_matrices.append(None)
if (SHIPS_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[SHIPS_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:])
three_predictor_matrices.append(dataset_object.variables[SHIPS_PREDICTORS_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
three_predictor_matrices.append(None)
saliency_dict = {THREE_SALIENCY_KEY: three_saliency_matrices, THREE_INPUT_GRAD_KEY: three_input_grad_matrices, THREE_PREDICTORS_KEY: three_predictor_matrices, MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)), USE_PMM_KEY: bool(getattr(dataset_object, USE_PMM_KEY)), PMM_MAX_PERCENTILE_KEY: float(getattr(dataset_object, PMM_MAX_PERCENTILE_KEY))}
dataset_object.close()
return saliency_dict<|docstring|>Reads composite saliency map from NetCDF file.
:param netcdf_file_name: Path to input file.
:return: saliency_dict: Dictionary with the following keys.
saliency_dict['three_saliency_matrices']: See doc for
`write_composite_file`.
saliency_dict['three_input_grad_matrices']: Same.
saliency_dict['three_predictor_matrices']: Same.
saliency_dict['model_file_name']: Same.
saliency_dict['use_pmm']: Same.
saliency_dict['pmm_max_percentile_level']: Same.<|endoftext|> |
4b5a4335c6a5182f84976689071b194b803a5e4f7fb0540363d0836439e234ac | def write_file(netcdf_file_name, three_saliency_matrices, three_input_grad_matrices, cyclone_id_strings, init_times_unix_sec, model_file_name, layer_name, neuron_indices, ideal_activation):
'Writes saliency maps to NetCDF file.\n\n E = number of examples\n\n :param netcdf_file_name: Path to output file.\n :param three_saliency_matrices: length-3 list, where each element is either\n None or a numpy array of saliency values. three_saliency_matrices[i]\n should have the same shape as the [i]th input tensor to the model.\n Also, the first axis of each numpy array must have length E.\n :param three_input_grad_matrices: Same as `three_saliency_matrices` but with\n input-times-gradient values instead.\n :param cyclone_id_strings: length-E list of cyclone IDs.\n :param init_times_unix_sec: length-E numpy array of forecast-init times.\n :param model_file_name: Path to file with neural net used to create saliency\n maps (readable by `neural_net.read_model`).\n :param layer_name: See doc for `get_saliency_one_neuron`.\n :param neuron_indices: Same.\n :param ideal_activation: Same.\n '
check_metadata(layer_name=layer_name, neuron_indices=neuron_indices, ideal_activation=ideal_activation)
error_checking.assert_is_list(three_saliency_matrices)
error_checking.assert_is_list(three_input_grad_matrices)
assert (len(three_saliency_matrices) == 3)
assert (len(three_input_grad_matrices) == 3)
num_examples = (- 1)
for i in range(len(three_saliency_matrices)):
if (three_saliency_matrices[i] is None):
assert (three_input_grad_matrices[i] is None)
continue
error_checking.assert_is_numpy_array_without_nan(three_saliency_matrices[i])
error_checking.assert_is_numpy_array_without_nan(three_input_grad_matrices[i])
if (i == 0):
num_examples = three_saliency_matrices[i].shape[0]
expected_dim = numpy.array(((num_examples,) + three_saliency_matrices[i].shape[1:]), dtype=int)
error_checking.assert_is_numpy_array(three_saliency_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_numpy_array(three_input_grad_matrices[i], exact_dimensions=expected_dim)
expected_dim = numpy.array([num_examples], dtype=int)
error_checking.assert_is_string_list(cyclone_id_strings)
error_checking.assert_is_numpy_array(numpy.array(cyclone_id_strings), exact_dimensions=expected_dim)
error_checking.assert_is_integer_numpy_array(init_times_unix_sec)
error_checking.assert_is_numpy_array(init_times_unix_sec, exact_dimensions=expected_dim)
error_checking.assert_is_string(model_file_name)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
dataset_object = netCDF4.Dataset(netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
dataset_object.setncattr(MODEL_FILE_KEY, model_file_name)
dataset_object.setncattr(LAYER_NAME_KEY, layer_name)
dataset_object.setncattr(NEURON_INDICES_KEY, neuron_indices)
dataset_object.setncattr(IDEAL_ACTIVATION_KEY, ideal_activation)
dataset_object.createDimension(EXAMPLE_DIMENSION_KEY, num_examples)
num_satellite_lag_times = None
if (three_saliency_matrices[0] is not None):
num_grid_rows = three_saliency_matrices[0].shape[1]
num_grid_columns = three_saliency_matrices[0].shape[2]
num_satellite_lag_times = three_saliency_matrices[0].shape[3]
num_gridded_satellite_channels = three_saliency_matrices[0].shape[4]
dataset_object.createDimension(GRID_ROW_DIMENSION_KEY, num_grid_rows)
dataset_object.createDimension(GRID_COLUMN_DIMENSION_KEY, num_grid_columns)
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
dataset_object.createDimension(GRIDDED_SATELLITE_CHANNEL_KEY, num_gridded_satellite_channels)
these_dim = (EXAMPLE_DIMENSION_KEY, GRID_ROW_DIMENSION_KEY, GRID_COLUMN_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY, GRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(GRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[0]
dataset_object.createVariable(GRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[0]
if (three_saliency_matrices[1] is not None):
if (num_satellite_lag_times is None):
num_satellite_lag_times = three_saliency_matrices[1].shape[1]
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
else:
assert (num_satellite_lag_times == three_saliency_matrices[1].shape[1])
num_ungridded_satellite_channels = three_saliency_matrices[1].shape[2]
dataset_object.createDimension(UNGRIDDED_SATELLITE_CHANNEL_KEY, num_ungridded_satellite_channels)
these_dim = (EXAMPLE_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY, UNGRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(UNGRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[1]
dataset_object.createVariable(UNGRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[1]
if (three_saliency_matrices[2] is not None):
num_ships_lag_times = three_saliency_matrices[2].shape[1]
num_ships_channels = three_saliency_matrices[2].shape[2]
dataset_object.createDimension(SHIPS_LAG_TIME_KEY, num_ships_lag_times)
dataset_object.createDimension(SHIPS_CHANNEL_KEY, num_ships_channels)
these_dim = (EXAMPLE_DIMENSION_KEY, SHIPS_LAG_TIME_KEY, SHIPS_CHANNEL_KEY)
dataset_object.createVariable(SHIPS_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_SALIENCY_KEY][:] = three_saliency_matrices[2]
dataset_object.createVariable(SHIPS_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:] = three_input_grad_matrices[2]
if (num_examples == 0):
num_id_characters = 1
else:
num_id_characters = numpy.max(numpy.array([len(id) for id in cyclone_id_strings]))
dataset_object.createDimension(CYCLONE_ID_CHAR_DIM_KEY, num_id_characters)
this_string_format = 'S{0:d}'.format(num_id_characters)
cyclone_ids_char_array = netCDF4.stringtochar(numpy.array(cyclone_id_strings, dtype=this_string_format))
dataset_object.createVariable(CYCLONE_IDS_KEY, datatype='S1', dimensions=(EXAMPLE_DIMENSION_KEY, CYCLONE_ID_CHAR_DIM_KEY))
dataset_object.variables[CYCLONE_IDS_KEY][:] = numpy.array(cyclone_ids_char_array)
dataset_object.createVariable(INIT_TIMES_KEY, datatype=numpy.int32, dimensions=EXAMPLE_DIMENSION_KEY)
dataset_object.variables[INIT_TIMES_KEY][:] = init_times_unix_sec
dataset_object.close() | Writes saliency maps to NetCDF file.
E = number of examples
:param netcdf_file_name: Path to output file.
:param three_saliency_matrices: length-3 list, where each element is either
None or a numpy array of saliency values. three_saliency_matrices[i]
should have the same shape as the [i]th input tensor to the model.
Also, the first axis of each numpy array must have length E.
:param three_input_grad_matrices: Same as `three_saliency_matrices` but with
input-times-gradient values instead.
:param cyclone_id_strings: length-E list of cyclone IDs.
:param init_times_unix_sec: length-E numpy array of forecast-init times.
:param model_file_name: Path to file with neural net used to create saliency
maps (readable by `neural_net.read_model`).
:param layer_name: See doc for `get_saliency_one_neuron`.
:param neuron_indices: Same.
:param ideal_activation: Same. | ml4tc/machine_learning/saliency.py | write_file | thunderhoser/ml4tc | 2 | python | def write_file(netcdf_file_name, three_saliency_matrices, three_input_grad_matrices, cyclone_id_strings, init_times_unix_sec, model_file_name, layer_name, neuron_indices, ideal_activation):
'Writes saliency maps to NetCDF file.\n\n E = number of examples\n\n :param netcdf_file_name: Path to output file.\n :param three_saliency_matrices: length-3 list, where each element is either\n None or a numpy array of saliency values. three_saliency_matrices[i]\n should have the same shape as the [i]th input tensor to the model.\n Also, the first axis of each numpy array must have length E.\n :param three_input_grad_matrices: Same as `three_saliency_matrices` but with\n input-times-gradient values instead.\n :param cyclone_id_strings: length-E list of cyclone IDs.\n :param init_times_unix_sec: length-E numpy array of forecast-init times.\n :param model_file_name: Path to file with neural net used to create saliency\n maps (readable by `neural_net.read_model`).\n :param layer_name: See doc for `get_saliency_one_neuron`.\n :param neuron_indices: Same.\n :param ideal_activation: Same.\n '
check_metadata(layer_name=layer_name, neuron_indices=neuron_indices, ideal_activation=ideal_activation)
error_checking.assert_is_list(three_saliency_matrices)
error_checking.assert_is_list(three_input_grad_matrices)
assert (len(three_saliency_matrices) == 3)
assert (len(three_input_grad_matrices) == 3)
num_examples = (- 1)
for i in range(len(three_saliency_matrices)):
if (three_saliency_matrices[i] is None):
assert (three_input_grad_matrices[i] is None)
continue
error_checking.assert_is_numpy_array_without_nan(three_saliency_matrices[i])
error_checking.assert_is_numpy_array_without_nan(three_input_grad_matrices[i])
if (i == 0):
num_examples = three_saliency_matrices[i].shape[0]
expected_dim = numpy.array(((num_examples,) + three_saliency_matrices[i].shape[1:]), dtype=int)
error_checking.assert_is_numpy_array(three_saliency_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_numpy_array(three_input_grad_matrices[i], exact_dimensions=expected_dim)
expected_dim = numpy.array([num_examples], dtype=int)
error_checking.assert_is_string_list(cyclone_id_strings)
error_checking.assert_is_numpy_array(numpy.array(cyclone_id_strings), exact_dimensions=expected_dim)
error_checking.assert_is_integer_numpy_array(init_times_unix_sec)
error_checking.assert_is_numpy_array(init_times_unix_sec, exact_dimensions=expected_dim)
error_checking.assert_is_string(model_file_name)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
dataset_object = netCDF4.Dataset(netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
dataset_object.setncattr(MODEL_FILE_KEY, model_file_name)
dataset_object.setncattr(LAYER_NAME_KEY, layer_name)
dataset_object.setncattr(NEURON_INDICES_KEY, neuron_indices)
dataset_object.setncattr(IDEAL_ACTIVATION_KEY, ideal_activation)
dataset_object.createDimension(EXAMPLE_DIMENSION_KEY, num_examples)
num_satellite_lag_times = None
if (three_saliency_matrices[0] is not None):
num_grid_rows = three_saliency_matrices[0].shape[1]
num_grid_columns = three_saliency_matrices[0].shape[2]
num_satellite_lag_times = three_saliency_matrices[0].shape[3]
num_gridded_satellite_channels = three_saliency_matrices[0].shape[4]
dataset_object.createDimension(GRID_ROW_DIMENSION_KEY, num_grid_rows)
dataset_object.createDimension(GRID_COLUMN_DIMENSION_KEY, num_grid_columns)
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
dataset_object.createDimension(GRIDDED_SATELLITE_CHANNEL_KEY, num_gridded_satellite_channels)
these_dim = (EXAMPLE_DIMENSION_KEY, GRID_ROW_DIMENSION_KEY, GRID_COLUMN_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY, GRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(GRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[0]
dataset_object.createVariable(GRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[0]
if (three_saliency_matrices[1] is not None):
if (num_satellite_lag_times is None):
num_satellite_lag_times = three_saliency_matrices[1].shape[1]
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
else:
assert (num_satellite_lag_times == three_saliency_matrices[1].shape[1])
num_ungridded_satellite_channels = three_saliency_matrices[1].shape[2]
dataset_object.createDimension(UNGRIDDED_SATELLITE_CHANNEL_KEY, num_ungridded_satellite_channels)
these_dim = (EXAMPLE_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY, UNGRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(UNGRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[1]
dataset_object.createVariable(UNGRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[1]
if (three_saliency_matrices[2] is not None):
num_ships_lag_times = three_saliency_matrices[2].shape[1]
num_ships_channels = three_saliency_matrices[2].shape[2]
dataset_object.createDimension(SHIPS_LAG_TIME_KEY, num_ships_lag_times)
dataset_object.createDimension(SHIPS_CHANNEL_KEY, num_ships_channels)
these_dim = (EXAMPLE_DIMENSION_KEY, SHIPS_LAG_TIME_KEY, SHIPS_CHANNEL_KEY)
dataset_object.createVariable(SHIPS_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_SALIENCY_KEY][:] = three_saliency_matrices[2]
dataset_object.createVariable(SHIPS_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:] = three_input_grad_matrices[2]
if (num_examples == 0):
num_id_characters = 1
else:
num_id_characters = numpy.max(numpy.array([len(id) for id in cyclone_id_strings]))
dataset_object.createDimension(CYCLONE_ID_CHAR_DIM_KEY, num_id_characters)
this_string_format = 'S{0:d}'.format(num_id_characters)
cyclone_ids_char_array = netCDF4.stringtochar(numpy.array(cyclone_id_strings, dtype=this_string_format))
dataset_object.createVariable(CYCLONE_IDS_KEY, datatype='S1', dimensions=(EXAMPLE_DIMENSION_KEY, CYCLONE_ID_CHAR_DIM_KEY))
dataset_object.variables[CYCLONE_IDS_KEY][:] = numpy.array(cyclone_ids_char_array)
dataset_object.createVariable(INIT_TIMES_KEY, datatype=numpy.int32, dimensions=EXAMPLE_DIMENSION_KEY)
dataset_object.variables[INIT_TIMES_KEY][:] = init_times_unix_sec
dataset_object.close() | def write_file(netcdf_file_name, three_saliency_matrices, three_input_grad_matrices, cyclone_id_strings, init_times_unix_sec, model_file_name, layer_name, neuron_indices, ideal_activation):
'Writes saliency maps to NetCDF file.\n\n E = number of examples\n\n :param netcdf_file_name: Path to output file.\n :param three_saliency_matrices: length-3 list, where each element is either\n None or a numpy array of saliency values. three_saliency_matrices[i]\n should have the same shape as the [i]th input tensor to the model.\n Also, the first axis of each numpy array must have length E.\n :param three_input_grad_matrices: Same as `three_saliency_matrices` but with\n input-times-gradient values instead.\n :param cyclone_id_strings: length-E list of cyclone IDs.\n :param init_times_unix_sec: length-E numpy array of forecast-init times.\n :param model_file_name: Path to file with neural net used to create saliency\n maps (readable by `neural_net.read_model`).\n :param layer_name: See doc for `get_saliency_one_neuron`.\n :param neuron_indices: Same.\n :param ideal_activation: Same.\n '
check_metadata(layer_name=layer_name, neuron_indices=neuron_indices, ideal_activation=ideal_activation)
error_checking.assert_is_list(three_saliency_matrices)
error_checking.assert_is_list(three_input_grad_matrices)
assert (len(three_saliency_matrices) == 3)
assert (len(three_input_grad_matrices) == 3)
num_examples = (- 1)
for i in range(len(three_saliency_matrices)):
if (three_saliency_matrices[i] is None):
assert (three_input_grad_matrices[i] is None)
continue
error_checking.assert_is_numpy_array_without_nan(three_saliency_matrices[i])
error_checking.assert_is_numpy_array_without_nan(three_input_grad_matrices[i])
if (i == 0):
num_examples = three_saliency_matrices[i].shape[0]
expected_dim = numpy.array(((num_examples,) + three_saliency_matrices[i].shape[1:]), dtype=int)
error_checking.assert_is_numpy_array(three_saliency_matrices[i], exact_dimensions=expected_dim)
error_checking.assert_is_numpy_array(three_input_grad_matrices[i], exact_dimensions=expected_dim)
expected_dim = numpy.array([num_examples], dtype=int)
error_checking.assert_is_string_list(cyclone_id_strings)
error_checking.assert_is_numpy_array(numpy.array(cyclone_id_strings), exact_dimensions=expected_dim)
error_checking.assert_is_integer_numpy_array(init_times_unix_sec)
error_checking.assert_is_numpy_array(init_times_unix_sec, exact_dimensions=expected_dim)
error_checking.assert_is_string(model_file_name)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
dataset_object = netCDF4.Dataset(netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
dataset_object.setncattr(MODEL_FILE_KEY, model_file_name)
dataset_object.setncattr(LAYER_NAME_KEY, layer_name)
dataset_object.setncattr(NEURON_INDICES_KEY, neuron_indices)
dataset_object.setncattr(IDEAL_ACTIVATION_KEY, ideal_activation)
dataset_object.createDimension(EXAMPLE_DIMENSION_KEY, num_examples)
num_satellite_lag_times = None
if (three_saliency_matrices[0] is not None):
num_grid_rows = three_saliency_matrices[0].shape[1]
num_grid_columns = three_saliency_matrices[0].shape[2]
num_satellite_lag_times = three_saliency_matrices[0].shape[3]
num_gridded_satellite_channels = three_saliency_matrices[0].shape[4]
dataset_object.createDimension(GRID_ROW_DIMENSION_KEY, num_grid_rows)
dataset_object.createDimension(GRID_COLUMN_DIMENSION_KEY, num_grid_columns)
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
dataset_object.createDimension(GRIDDED_SATELLITE_CHANNEL_KEY, num_gridded_satellite_channels)
these_dim = (EXAMPLE_DIMENSION_KEY, GRID_ROW_DIMENSION_KEY, GRID_COLUMN_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY, GRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(GRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[0]
dataset_object.createVariable(GRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[0]
if (three_saliency_matrices[1] is not None):
if (num_satellite_lag_times is None):
num_satellite_lag_times = three_saliency_matrices[1].shape[1]
dataset_object.createDimension(SATELLITE_LAG_TIME_KEY, num_satellite_lag_times)
else:
assert (num_satellite_lag_times == three_saliency_matrices[1].shape[1])
num_ungridded_satellite_channels = three_saliency_matrices[1].shape[2]
dataset_object.createDimension(UNGRIDDED_SATELLITE_CHANNEL_KEY, num_ungridded_satellite_channels)
these_dim = (EXAMPLE_DIMENSION_KEY, SATELLITE_LAG_TIME_KEY, UNGRIDDED_SATELLITE_CHANNEL_KEY)
dataset_object.createVariable(UNGRIDDED_SATELLITE_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:] = three_saliency_matrices[1]
dataset_object.createVariable(UNGRIDDED_SATELLITE_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:] = three_input_grad_matrices[1]
if (three_saliency_matrices[2] is not None):
num_ships_lag_times = three_saliency_matrices[2].shape[1]
num_ships_channels = three_saliency_matrices[2].shape[2]
dataset_object.createDimension(SHIPS_LAG_TIME_KEY, num_ships_lag_times)
dataset_object.createDimension(SHIPS_CHANNEL_KEY, num_ships_channels)
these_dim = (EXAMPLE_DIMENSION_KEY, SHIPS_LAG_TIME_KEY, SHIPS_CHANNEL_KEY)
dataset_object.createVariable(SHIPS_SALIENCY_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_SALIENCY_KEY][:] = three_saliency_matrices[2]
dataset_object.createVariable(SHIPS_INPUT_GRAD_KEY, datatype=numpy.float32, dimensions=these_dim)
dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:] = three_input_grad_matrices[2]
if (num_examples == 0):
num_id_characters = 1
else:
num_id_characters = numpy.max(numpy.array([len(id) for id in cyclone_id_strings]))
dataset_object.createDimension(CYCLONE_ID_CHAR_DIM_KEY, num_id_characters)
this_string_format = 'S{0:d}'.format(num_id_characters)
cyclone_ids_char_array = netCDF4.stringtochar(numpy.array(cyclone_id_strings, dtype=this_string_format))
dataset_object.createVariable(CYCLONE_IDS_KEY, datatype='S1', dimensions=(EXAMPLE_DIMENSION_KEY, CYCLONE_ID_CHAR_DIM_KEY))
dataset_object.variables[CYCLONE_IDS_KEY][:] = numpy.array(cyclone_ids_char_array)
dataset_object.createVariable(INIT_TIMES_KEY, datatype=numpy.int32, dimensions=EXAMPLE_DIMENSION_KEY)
dataset_object.variables[INIT_TIMES_KEY][:] = init_times_unix_sec
dataset_object.close()<|docstring|>Writes saliency maps to NetCDF file.
E = number of examples
:param netcdf_file_name: Path to output file.
:param three_saliency_matrices: length-3 list, where each element is either
None or a numpy array of saliency values. three_saliency_matrices[i]
should have the same shape as the [i]th input tensor to the model.
Also, the first axis of each numpy array must have length E.
:param three_input_grad_matrices: Same as `three_saliency_matrices` but with
input-times-gradient values instead.
:param cyclone_id_strings: length-E list of cyclone IDs.
:param init_times_unix_sec: length-E numpy array of forecast-init times.
:param model_file_name: Path to file with neural net used to create saliency
maps (readable by `neural_net.read_model`).
:param layer_name: See doc for `get_saliency_one_neuron`.
:param neuron_indices: Same.
:param ideal_activation: Same.<|endoftext|> |
77ec04955554a5fa54ac3d44673ac318b776b7b11190419bf08ee417a8bf4d5d | def read_file(netcdf_file_name):
"Reads saliency maps from NetCDF file.\n\n :param netcdf_file_name: Path to input file.\n :return: saliency_dict: Dictionary with the following keys.\n saliency_dict['three_saliency_matrices']: See doc for `write_file`.\n saliency_dict['three_input_grad_matrices']: Same.\n saliency_dict['cyclone_id_strings']: Same.\n saliency_dict['init_times_unix_sec']: Same.\n saliency_dict['model_file_name']: Same.\n saliency_dict['layer_name']: Same.\n saliency_dict['neuron_indices']: Same.\n saliency_dict['ideal_activation']: Same.\n "
dataset_object = netCDF4.Dataset(netcdf_file_name)
three_saliency_matrices = []
three_input_grad_matrices = []
if (GRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
if (UNGRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
if (SHIPS_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[SHIPS_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
saliency_dict = {THREE_SALIENCY_KEY: three_saliency_matrices, THREE_INPUT_GRAD_KEY: three_input_grad_matrices, CYCLONE_IDS_KEY: [str(id) for id in netCDF4.chartostring(dataset_object.variables[CYCLONE_IDS_KEY][:])], INIT_TIMES_KEY: dataset_object.variables[INIT_TIMES_KEY][:], MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)), LAYER_NAME_KEY: str(getattr(dataset_object, LAYER_NAME_KEY)), NEURON_INDICES_KEY: numpy.array(getattr(dataset_object, NEURON_INDICES_KEY), dtype=int), IDEAL_ACTIVATION_KEY: getattr(dataset_object, IDEAL_ACTIVATION_KEY)}
dataset_object.close()
return saliency_dict | Reads saliency maps from NetCDF file.
:param netcdf_file_name: Path to input file.
:return: saliency_dict: Dictionary with the following keys.
saliency_dict['three_saliency_matrices']: See doc for `write_file`.
saliency_dict['three_input_grad_matrices']: Same.
saliency_dict['cyclone_id_strings']: Same.
saliency_dict['init_times_unix_sec']: Same.
saliency_dict['model_file_name']: Same.
saliency_dict['layer_name']: Same.
saliency_dict['neuron_indices']: Same.
saliency_dict['ideal_activation']: Same. | ml4tc/machine_learning/saliency.py | read_file | thunderhoser/ml4tc | 2 | python | def read_file(netcdf_file_name):
"Reads saliency maps from NetCDF file.\n\n :param netcdf_file_name: Path to input file.\n :return: saliency_dict: Dictionary with the following keys.\n saliency_dict['three_saliency_matrices']: See doc for `write_file`.\n saliency_dict['three_input_grad_matrices']: Same.\n saliency_dict['cyclone_id_strings']: Same.\n saliency_dict['init_times_unix_sec']: Same.\n saliency_dict['model_file_name']: Same.\n saliency_dict['layer_name']: Same.\n saliency_dict['neuron_indices']: Same.\n saliency_dict['ideal_activation']: Same.\n "
dataset_object = netCDF4.Dataset(netcdf_file_name)
three_saliency_matrices = []
three_input_grad_matrices = []
if (GRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
if (UNGRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
if (SHIPS_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[SHIPS_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
saliency_dict = {THREE_SALIENCY_KEY: three_saliency_matrices, THREE_INPUT_GRAD_KEY: three_input_grad_matrices, CYCLONE_IDS_KEY: [str(id) for id in netCDF4.chartostring(dataset_object.variables[CYCLONE_IDS_KEY][:])], INIT_TIMES_KEY: dataset_object.variables[INIT_TIMES_KEY][:], MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)), LAYER_NAME_KEY: str(getattr(dataset_object, LAYER_NAME_KEY)), NEURON_INDICES_KEY: numpy.array(getattr(dataset_object, NEURON_INDICES_KEY), dtype=int), IDEAL_ACTIVATION_KEY: getattr(dataset_object, IDEAL_ACTIVATION_KEY)}
dataset_object.close()
return saliency_dict | def read_file(netcdf_file_name):
"Reads saliency maps from NetCDF file.\n\n :param netcdf_file_name: Path to input file.\n :return: saliency_dict: Dictionary with the following keys.\n saliency_dict['three_saliency_matrices']: See doc for `write_file`.\n saliency_dict['three_input_grad_matrices']: Same.\n saliency_dict['cyclone_id_strings']: Same.\n saliency_dict['init_times_unix_sec']: Same.\n saliency_dict['model_file_name']: Same.\n saliency_dict['layer_name']: Same.\n saliency_dict['neuron_indices']: Same.\n saliency_dict['ideal_activation']: Same.\n "
dataset_object = netCDF4.Dataset(netcdf_file_name)
three_saliency_matrices = []
three_input_grad_matrices = []
if (GRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[GRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
if (UNGRIDDED_SATELLITE_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[UNGRIDDED_SATELLITE_INPUT_GRAD_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
if (SHIPS_SALIENCY_KEY in dataset_object.variables):
three_saliency_matrices.append(dataset_object.variables[SHIPS_SALIENCY_KEY][:])
three_input_grad_matrices.append(dataset_object.variables[SHIPS_INPUT_GRAD_KEY][:])
else:
three_saliency_matrices.append(None)
three_input_grad_matrices.append(None)
saliency_dict = {THREE_SALIENCY_KEY: three_saliency_matrices, THREE_INPUT_GRAD_KEY: three_input_grad_matrices, CYCLONE_IDS_KEY: [str(id) for id in netCDF4.chartostring(dataset_object.variables[CYCLONE_IDS_KEY][:])], INIT_TIMES_KEY: dataset_object.variables[INIT_TIMES_KEY][:], MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)), LAYER_NAME_KEY: str(getattr(dataset_object, LAYER_NAME_KEY)), NEURON_INDICES_KEY: numpy.array(getattr(dataset_object, NEURON_INDICES_KEY), dtype=int), IDEAL_ACTIVATION_KEY: getattr(dataset_object, IDEAL_ACTIVATION_KEY)}
dataset_object.close()
return saliency_dict<|docstring|>Reads saliency maps from NetCDF file.
:param netcdf_file_name: Path to input file.
:return: saliency_dict: Dictionary with the following keys.
saliency_dict['three_saliency_matrices']: See doc for `write_file`.
saliency_dict['three_input_grad_matrices']: Same.
saliency_dict['cyclone_id_strings']: Same.
saliency_dict['init_times_unix_sec']: Same.
saliency_dict['model_file_name']: Same.
saliency_dict['layer_name']: Same.
saliency_dict['neuron_indices']: Same.
saliency_dict['ideal_activation']: Same.<|endoftext|> |
95375aedb115f017568215e9ec0d3721e8fccf4e90b110ae27bb16d75508a3c9 | def fit_eval(self, x, y, validation_data=None, mc=False, verbose=0, epochs=1, metric='mse', **config):
'\n fit_eval will build a model at the first time it is built\n config will be updated for the second or later times with only non-model-arch\n params be functional\n TODO: check the updated params and decide if the model is needed to be rebuilt\n '
x = self._reshape_input(x)
y = self._reshape_input(y)
def update_config():
config.setdefault('past_seq_len', x.shape[(- 2)])
config.setdefault('future_seq_len', y.shape[(- 2)])
config.setdefault('input_feature_num', x.shape[(- 1)])
config.setdefault('output_feature_num', y.shape[(- 1)])
if (not self.model_built):
update_config()
self.build(config)
else:
tmp_config = self.config.copy()
tmp_config.update(config)
self._check_config(**tmp_config)
self.config.update(config)
epoch_losses = []
(x, y, validation_data) = PytorchBaseModel.covert_input(x, y, validation_data)
for i in range(epochs):
train_loss = self._train_epoch(x, y)
epoch_losses.append(train_loss)
train_stats = {'loss': np.mean(epoch_losses), 'last_loss': epoch_losses[(- 1)]}
assert (validation_data is not None), 'You must input validation data!'
val_stats = self._validate(validation_data[0], validation_data[1], metric=metric)
self.onnx_model_built = False
return val_stats[metric] | fit_eval will build a model at the first time it is built
config will be updated for the second or later times with only non-model-arch
params be functional
TODO: check the updated params and decide if the model is needed to be rebuilt | pyzoo/zoo/automl/model/base_pytorch_model.py | fit_eval | OpheliaLjh/analytics-zoo | 4 | python | def fit_eval(self, x, y, validation_data=None, mc=False, verbose=0, epochs=1, metric='mse', **config):
'\n fit_eval will build a model at the first time it is built\n config will be updated for the second or later times with only non-model-arch\n params be functional\n TODO: check the updated params and decide if the model is needed to be rebuilt\n '
x = self._reshape_input(x)
y = self._reshape_input(y)
def update_config():
config.setdefault('past_seq_len', x.shape[(- 2)])
config.setdefault('future_seq_len', y.shape[(- 2)])
config.setdefault('input_feature_num', x.shape[(- 1)])
config.setdefault('output_feature_num', y.shape[(- 1)])
if (not self.model_built):
update_config()
self.build(config)
else:
tmp_config = self.config.copy()
tmp_config.update(config)
self._check_config(**tmp_config)
self.config.update(config)
epoch_losses = []
(x, y, validation_data) = PytorchBaseModel.covert_input(x, y, validation_data)
for i in range(epochs):
train_loss = self._train_epoch(x, y)
epoch_losses.append(train_loss)
train_stats = {'loss': np.mean(epoch_losses), 'last_loss': epoch_losses[(- 1)]}
assert (validation_data is not None), 'You must input validation data!'
val_stats = self._validate(validation_data[0], validation_data[1], metric=metric)
self.onnx_model_built = False
return val_stats[metric] | def fit_eval(self, x, y, validation_data=None, mc=False, verbose=0, epochs=1, metric='mse', **config):
'\n fit_eval will build a model at the first time it is built\n config will be updated for the second or later times with only non-model-arch\n params be functional\n TODO: check the updated params and decide if the model is needed to be rebuilt\n '
x = self._reshape_input(x)
y = self._reshape_input(y)
def update_config():
config.setdefault('past_seq_len', x.shape[(- 2)])
config.setdefault('future_seq_len', y.shape[(- 2)])
config.setdefault('input_feature_num', x.shape[(- 1)])
config.setdefault('output_feature_num', y.shape[(- 1)])
if (not self.model_built):
update_config()
self.build(config)
else:
tmp_config = self.config.copy()
tmp_config.update(config)
self._check_config(**tmp_config)
self.config.update(config)
epoch_losses = []
(x, y, validation_data) = PytorchBaseModel.covert_input(x, y, validation_data)
for i in range(epochs):
train_loss = self._train_epoch(x, y)
epoch_losses.append(train_loss)
train_stats = {'loss': np.mean(epoch_losses), 'last_loss': epoch_losses[(- 1)]}
assert (validation_data is not None), 'You must input validation data!'
val_stats = self._validate(validation_data[0], validation_data[1], metric=metric)
self.onnx_model_built = False
return val_stats[metric]<|docstring|>fit_eval will build a model at the first time it is built
config will be updated for the second or later times with only non-model-arch
params be functional
TODO: check the updated params and decide if the model is needed to be rebuilt<|endoftext|> |
d70cd16c2e6f7ab6e2b4037763da3b2125a3dd26721d7475ea0098a78b9419aa | @classmethod
def run(cls, benchmark, _, idfile: str) -> None:
'Generate random results\n\n Args:\n benchmark: the `lib.benchmark.base.Benchmark` object that has\n ordered running a series.\n _: ignored.\n idfile: the output file to store the results.\n '
if (('rw' in benchmark.oneseries) and ('rw' in benchmark.oneseries['rw'])):
output = {'read': [cls.__random_point() for _ in range(3)], 'write': [cls.__random_point() for _ in range(3)]}
else:
output = [cls.__random_point() for _ in range(3)]
with open(idfile, 'w', encoding='utf-8') as file:
json.dump(output, file, indent=4) | Generate random results
Args:
benchmark: the `lib.benchmark.base.Benchmark` object that has
ordered running a series.
_: ignored.
idfile: the output file to store the results. | tools/perf/lib/benchmark/runner/dummy.py | run | sinkinben/rpma | 2 | python | @classmethod
def run(cls, benchmark, _, idfile: str) -> None:
'Generate random results\n\n Args:\n benchmark: the `lib.benchmark.base.Benchmark` object that has\n ordered running a series.\n _: ignored.\n idfile: the output file to store the results.\n '
if (('rw' in benchmark.oneseries) and ('rw' in benchmark.oneseries['rw'])):
output = {'read': [cls.__random_point() for _ in range(3)], 'write': [cls.__random_point() for _ in range(3)]}
else:
output = [cls.__random_point() for _ in range(3)]
with open(idfile, 'w', encoding='utf-8') as file:
json.dump(output, file, indent=4) | @classmethod
def run(cls, benchmark, _, idfile: str) -> None:
'Generate random results\n\n Args:\n benchmark: the `lib.benchmark.base.Benchmark` object that has\n ordered running a series.\n _: ignored.\n idfile: the output file to store the results.\n '
if (('rw' in benchmark.oneseries) and ('rw' in benchmark.oneseries['rw'])):
output = {'read': [cls.__random_point() for _ in range(3)], 'write': [cls.__random_point() for _ in range(3)]}
else:
output = [cls.__random_point() for _ in range(3)]
with open(idfile, 'w', encoding='utf-8') as file:
json.dump(output, file, indent=4)<|docstring|>Generate random results
Args:
benchmark: the `lib.benchmark.base.Benchmark` object that has
ordered running a series.
_: ignored.
idfile: the output file to store the results.<|endoftext|> |
1eb303deec444b045491b70fcc3e76194b26b02e62c69f8105b44bb8b1de8003 | def discreteSampling(weights, domain, nrSamples):
'Samples from a discrete probability distribution.\n \n Parameters\n ----------\n weights : 1-D array_like\n Probability mass function.\n domain : 1-D array_like\n Categories or indices.\n nrSamples : int\n Number of samples.\n \n Returns\n -------\n domain : 1-D array_like\n Sampled categories.\n \n Examples\n --------\n >>> w = np.random.rand(10)\n >>> w /= np.sum(w)\n >>> ind = discreteSampling(w, np.arange(10), 2)\n '
weights /= np.sum(weights)
bins = np.cumsum(weights)
return domain[np.digitize(np.random.random_sample(nrSamples), bins)] | Samples from a discrete probability distribution.
Parameters
----------
weights : 1-D array_like
Probability mass function.
domain : 1-D array_like
Categories or indices.
nrSamples : int
Number of samples.
Returns
-------
domain : 1-D array_like
Sampled categories.
Examples
--------
>>> w = np.random.rand(10)
>>> w /= np.sum(w)
>>> ind = discreteSampling(w, np.arange(10), 2) | src/pyResampling.py | discreteSampling | can-cs/pyResampling | 0 | python | def discreteSampling(weights, domain, nrSamples):
'Samples from a discrete probability distribution.\n \n Parameters\n ----------\n weights : 1-D array_like\n Probability mass function.\n domain : 1-D array_like\n Categories or indices.\n nrSamples : int\n Number of samples.\n \n Returns\n -------\n domain : 1-D array_like\n Sampled categories.\n \n Examples\n --------\n >>> w = np.random.rand(10)\n >>> w /= np.sum(w)\n >>> ind = discreteSampling(w, np.arange(10), 2)\n '
weights /= np.sum(weights)
bins = np.cumsum(weights)
return domain[np.digitize(np.random.random_sample(nrSamples), bins)] | def discreteSampling(weights, domain, nrSamples):
'Samples from a discrete probability distribution.\n \n Parameters\n ----------\n weights : 1-D array_like\n Probability mass function.\n domain : 1-D array_like\n Categories or indices.\n nrSamples : int\n Number of samples.\n \n Returns\n -------\n domain : 1-D array_like\n Sampled categories.\n \n Examples\n --------\n >>> w = np.random.rand(10)\n >>> w /= np.sum(w)\n >>> ind = discreteSampling(w, np.arange(10), 2)\n '
weights /= np.sum(weights)
bins = np.cumsum(weights)
return domain[np.digitize(np.random.random_sample(nrSamples), bins)]<|docstring|>Samples from a discrete probability distribution.
Parameters
----------
weights : 1-D array_like
Probability mass function.
domain : 1-D array_like
Categories or indices.
nrSamples : int
Number of samples.
Returns
-------
domain : 1-D array_like
Sampled categories.
Examples
--------
>>> w = np.random.rand(10)
>>> w /= np.sum(w)
>>> ind = discreteSampling(w, np.arange(10), 2)<|endoftext|> |
c9dfaa81f25edf4d3fd2b74d88449b6d9450930b636650e035482bcbc6132a7e | def resampling(w, scheme='mult'):
"Resampling of particle indices.\n \n Parameters\n ----------\n w : 1-D array_like\n Normalized weights\n scheme : string\n Resampling scheme to use:\n \n mult : Multinomial resampling\n \n res : Residual resampling\n \n strat : Stratified resampling\n \n sys : Systematic resampling\n \n Returns\n -------\n ind : 1-D array_like\n Indices of resampled particles.\n \n Examples\n --------\n >>> w = np.random.rand(10)\n >>> w /= np.sum(w)\n >>> ind = resampling(w, scheme='res')\n "
N = w.shape[0]
ind = np.arange(N)
if (scheme == 'mult'):
ind = discreteSampling(w, np.arange(N), N)
elif (scheme == 'res'):
R = np.sum(np.floor((N * w)))
if (R == N):
ind = np.arange(N)
else:
wBar = (((N * w) - np.floor((N * w))) / (N - R))
Ni = (np.floor((N * w)) + np.random.multinomial((N - R), wBar))
iter = 0
for i in range(N):
ind[iter:(iter + Ni[i])] = i
iter += Ni[i]
elif (scheme == 'strat'):
u = ((np.arange(N) + np.random.rand(N)) / N)
wc = np.cumsum(w)
ind = np.arange(N)[np.digitize(u, wc)]
elif (scheme == 'sys'):
u = ((np.arange(N) + np.random.rand(1)) / N)
wc = np.cumsum(w)
k = 0
for i in range(N):
while (wc[k] < u[i]):
k += 1
ind[i] = k
else:
raise Exception('No such resampling scheme.')
return ind | Resampling of particle indices.
Parameters
----------
w : 1-D array_like
Normalized weights
scheme : string
Resampling scheme to use:
mult : Multinomial resampling
res : Residual resampling
strat : Stratified resampling
sys : Systematic resampling
Returns
-------
ind : 1-D array_like
Indices of resampled particles.
Examples
--------
>>> w = np.random.rand(10)
>>> w /= np.sum(w)
>>> ind = resampling(w, scheme='res') | src/pyResampling.py | resampling | can-cs/pyResampling | 0 | python | def resampling(w, scheme='mult'):
"Resampling of particle indices.\n \n Parameters\n ----------\n w : 1-D array_like\n Normalized weights\n scheme : string\n Resampling scheme to use:\n \n mult : Multinomial resampling\n \n res : Residual resampling\n \n strat : Stratified resampling\n \n sys : Systematic resampling\n \n Returns\n -------\n ind : 1-D array_like\n Indices of resampled particles.\n \n Examples\n --------\n >>> w = np.random.rand(10)\n >>> w /= np.sum(w)\n >>> ind = resampling(w, scheme='res')\n "
N = w.shape[0]
ind = np.arange(N)
if (scheme == 'mult'):
ind = discreteSampling(w, np.arange(N), N)
elif (scheme == 'res'):
R = np.sum(np.floor((N * w)))
if (R == N):
ind = np.arange(N)
else:
wBar = (((N * w) - np.floor((N * w))) / (N - R))
Ni = (np.floor((N * w)) + np.random.multinomial((N - R), wBar))
iter = 0
for i in range(N):
ind[iter:(iter + Ni[i])] = i
iter += Ni[i]
elif (scheme == 'strat'):
u = ((np.arange(N) + np.random.rand(N)) / N)
wc = np.cumsum(w)
ind = np.arange(N)[np.digitize(u, wc)]
elif (scheme == 'sys'):
u = ((np.arange(N) + np.random.rand(1)) / N)
wc = np.cumsum(w)
k = 0
for i in range(N):
while (wc[k] < u[i]):
k += 1
ind[i] = k
else:
raise Exception('No such resampling scheme.')
return ind | def resampling(w, scheme='mult'):
"Resampling of particle indices.\n \n Parameters\n ----------\n w : 1-D array_like\n Normalized weights\n scheme : string\n Resampling scheme to use:\n \n mult : Multinomial resampling\n \n res : Residual resampling\n \n strat : Stratified resampling\n \n sys : Systematic resampling\n \n Returns\n -------\n ind : 1-D array_like\n Indices of resampled particles.\n \n Examples\n --------\n >>> w = np.random.rand(10)\n >>> w /= np.sum(w)\n >>> ind = resampling(w, scheme='res')\n "
N = w.shape[0]
ind = np.arange(N)
if (scheme == 'mult'):
ind = discreteSampling(w, np.arange(N), N)
elif (scheme == 'res'):
R = np.sum(np.floor((N * w)))
if (R == N):
ind = np.arange(N)
else:
wBar = (((N * w) - np.floor((N * w))) / (N - R))
Ni = (np.floor((N * w)) + np.random.multinomial((N - R), wBar))
iter = 0
for i in range(N):
ind[iter:(iter + Ni[i])] = i
iter += Ni[i]
elif (scheme == 'strat'):
u = ((np.arange(N) + np.random.rand(N)) / N)
wc = np.cumsum(w)
ind = np.arange(N)[np.digitize(u, wc)]
elif (scheme == 'sys'):
u = ((np.arange(N) + np.random.rand(1)) / N)
wc = np.cumsum(w)
k = 0
for i in range(N):
while (wc[k] < u[i]):
k += 1
ind[i] = k
else:
raise Exception('No such resampling scheme.')
return ind<|docstring|>Resampling of particle indices.
Parameters
----------
w : 1-D array_like
Normalized weights
scheme : string
Resampling scheme to use:
mult : Multinomial resampling
res : Residual resampling
strat : Stratified resampling
sys : Systematic resampling
Returns
-------
ind : 1-D array_like
Indices of resampled particles.
Examples
--------
>>> w = np.random.rand(10)
>>> w /= np.sum(w)
>>> ind = resampling(w, scheme='res')<|endoftext|> |
233df30edca811f1bf3ceef12eb47fe29bcc05c537ad09964e3fe1a506205227 | @click.command()
@click.argument('config_file')
def run(config_file):
'This program is the starting point for every neural_network. It pulls together the configuration and all necessary\n neural_network classes to load\n\n '
config = load_config(config_file)
config_global = config['global']
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
seed = config_global.get('random_seed', 1)
np.random.seed(seed)
tf.set_random_seed(seed)
with tf.Session(config=sess_config) as sess:
data_module = config['data-module']
model_module = config['model-module']
training_module = config['training-module']
evaluation_module = config.get('evaluation-module', None)
DataClass = importlib.import_module(data_module).component
ModelClass = importlib.import_module(model_module).component
TrainingClass = importlib.import_module(training_module).component
EvaluationClass = (importlib.import_module(evaluation_module).component if evaluation_module else None)
logger = logging.getLogger('neural_network')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(config['logger']['level'])
handler_stdout.setFormatter(formatter)
logger.addHandler(handler_stdout)
if ('path' in config['logger']):
log_fn = os.path.abspath(config['logger']['path'])
log_dir = os.path.dirname(log_fn)
if (not os.path.exists(log_dir)):
os.makedirs(log_dir)
handler_file = logging.FileHandler(log_fn)
handler_file.setLevel(config['logger']['level'])
handler_file.setFormatter(formatter)
logger.addHandler(handler_file)
logger.setLevel(config['logger']['level'])
data = DataClass(config['data'], config_global, logger)
logger.info('Setting up the data')
data.setup()
model = ModelClass(config['model'], config_global, logger)
logger.info('Building the model')
model.build(data, sess)
mode = config_global['mode']
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
if (mode == 'train'):
logger.info('Training mode')
training = TrainingClass(config['training'], config_global, logger)
logger.info('Starting the training process')
training.start(model, data, sess)
elif (mode == 'predict'):
logger.info('Evaluation mode')
assert (evaluation_module is not None), 'No eval module -- check the config file!'
evaluation = EvaluationClass(config['evaluation'], config_global, logger)
evaluation.start_prediction(model, data, sess, saver)
else:
logger.warning(('Check the operation mode in the config file: %s' % mode))
logger.info('DONE') | This program is the starting point for every neural_network. It pulls together the configuration and all necessary
neural_network classes to load | nn/run_experiment.py | run | UKPLab/lsdsem2017-story-cloze | 12 | python | @click.command()
@click.argument('config_file')
def run(config_file):
'This program is the starting point for every neural_network. It pulls together the configuration and all necessary\n neural_network classes to load\n\n '
config = load_config(config_file)
config_global = config['global']
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
seed = config_global.get('random_seed', 1)
np.random.seed(seed)
tf.set_random_seed(seed)
with tf.Session(config=sess_config) as sess:
data_module = config['data-module']
model_module = config['model-module']
training_module = config['training-module']
evaluation_module = config.get('evaluation-module', None)
DataClass = importlib.import_module(data_module).component
ModelClass = importlib.import_module(model_module).component
TrainingClass = importlib.import_module(training_module).component
EvaluationClass = (importlib.import_module(evaluation_module).component if evaluation_module else None)
logger = logging.getLogger('neural_network')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(config['logger']['level'])
handler_stdout.setFormatter(formatter)
logger.addHandler(handler_stdout)
if ('path' in config['logger']):
log_fn = os.path.abspath(config['logger']['path'])
log_dir = os.path.dirname(log_fn)
if (not os.path.exists(log_dir)):
os.makedirs(log_dir)
handler_file = logging.FileHandler(log_fn)
handler_file.setLevel(config['logger']['level'])
handler_file.setFormatter(formatter)
logger.addHandler(handler_file)
logger.setLevel(config['logger']['level'])
data = DataClass(config['data'], config_global, logger)
logger.info('Setting up the data')
data.setup()
model = ModelClass(config['model'], config_global, logger)
logger.info('Building the model')
model.build(data, sess)
mode = config_global['mode']
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
if (mode == 'train'):
logger.info('Training mode')
training = TrainingClass(config['training'], config_global, logger)
logger.info('Starting the training process')
training.start(model, data, sess)
elif (mode == 'predict'):
logger.info('Evaluation mode')
assert (evaluation_module is not None), 'No eval module -- check the config file!'
evaluation = EvaluationClass(config['evaluation'], config_global, logger)
evaluation.start_prediction(model, data, sess, saver)
else:
logger.warning(('Check the operation mode in the config file: %s' % mode))
logger.info('DONE') | @click.command()
@click.argument('config_file')
def run(config_file):
'This program is the starting point for every neural_network. It pulls together the configuration and all necessary\n neural_network classes to load\n\n '
config = load_config(config_file)
config_global = config['global']
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
seed = config_global.get('random_seed', 1)
np.random.seed(seed)
tf.set_random_seed(seed)
with tf.Session(config=sess_config) as sess:
data_module = config['data-module']
model_module = config['model-module']
training_module = config['training-module']
evaluation_module = config.get('evaluation-module', None)
DataClass = importlib.import_module(data_module).component
ModelClass = importlib.import_module(model_module).component
TrainingClass = importlib.import_module(training_module).component
EvaluationClass = (importlib.import_module(evaluation_module).component if evaluation_module else None)
logger = logging.getLogger('neural_network')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(config['logger']['level'])
handler_stdout.setFormatter(formatter)
logger.addHandler(handler_stdout)
if ('path' in config['logger']):
log_fn = os.path.abspath(config['logger']['path'])
log_dir = os.path.dirname(log_fn)
if (not os.path.exists(log_dir)):
os.makedirs(log_dir)
handler_file = logging.FileHandler(log_fn)
handler_file.setLevel(config['logger']['level'])
handler_file.setFormatter(formatter)
logger.addHandler(handler_file)
logger.setLevel(config['logger']['level'])
data = DataClass(config['data'], config_global, logger)
logger.info('Setting up the data')
data.setup()
model = ModelClass(config['model'], config_global, logger)
logger.info('Building the model')
model.build(data, sess)
mode = config_global['mode']
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
if (mode == 'train'):
logger.info('Training mode')
training = TrainingClass(config['training'], config_global, logger)
logger.info('Starting the training process')
training.start(model, data, sess)
elif (mode == 'predict'):
logger.info('Evaluation mode')
assert (evaluation_module is not None), 'No eval module -- check the config file!'
evaluation = EvaluationClass(config['evaluation'], config_global, logger)
evaluation.start_prediction(model, data, sess, saver)
else:
logger.warning(('Check the operation mode in the config file: %s' % mode))
logger.info('DONE')<|docstring|>This program is the starting point for every neural_network. It pulls together the configuration and all necessary
neural_network classes to load<|endoftext|> |
80a493239328fe84f715c7701a69e7cd1c809a8ebee5aba4b616330b15210f4b | def numJewelsInStones(self, J, S):
'\n :type J: str\n :type S: str\n :rtype: int\n '
count = 0
for jewel in J:
for stone in S:
if (jewel == stone):
count += 1
return count | :type J: str
:type S: str
:rtype: int | algorithm/leetcode/2018-03-25.py | numJewelsInStones | mhoonjeon/problemsolving | 0 | python | def numJewelsInStones(self, J, S):
'\n :type J: str\n :type S: str\n :rtype: int\n '
count = 0
for jewel in J:
for stone in S:
if (jewel == stone):
count += 1
return count | def numJewelsInStones(self, J, S):
'\n :type J: str\n :type S: str\n :rtype: int\n '
count = 0
for jewel in J:
for stone in S:
if (jewel == stone):
count += 1
return count<|docstring|>:type J: str
:type S: str
:rtype: int<|endoftext|> |
0fdfe218951efa08bf7ccc278266528fde7d2ad4a9303e0f8bae060bdb30e292 | def numberOfLines(self, widths, S):
'\n :type widths: List[int]\n :type S: str\n :rtype: List[int]\n '
lines = 1
line_width = 0
for ch in S:
index = (ord(ch) - ord('a'))
if ((line_width + widths[index]) <= 100):
line_width += widths[index]
else:
lines += 1
line_width = widths[index]
return [lines, line_width] | :type widths: List[int]
:type S: str
:rtype: List[int] | algorithm/leetcode/2018-03-25.py | numberOfLines | mhoonjeon/problemsolving | 0 | python | def numberOfLines(self, widths, S):
'\n :type widths: List[int]\n :type S: str\n :rtype: List[int]\n '
lines = 1
line_width = 0
for ch in S:
index = (ord(ch) - ord('a'))
if ((line_width + widths[index]) <= 100):
line_width += widths[index]
else:
lines += 1
line_width = widths[index]
return [lines, line_width] | def numberOfLines(self, widths, S):
'\n :type widths: List[int]\n :type S: str\n :rtype: List[int]\n '
lines = 1
line_width = 0
for ch in S:
index = (ord(ch) - ord('a'))
if ((line_width + widths[index]) <= 100):
line_width += widths[index]
else:
lines += 1
line_width = widths[index]
return [lines, line_width]<|docstring|>:type widths: List[int]
:type S: str
:rtype: List[int]<|endoftext|> |
dcf38366be4da12e81a582f9d1a34d5edfd08243f5f93f4cf9c18866fcb621c2 | def uniqueMorseRepresentations(self, words):
'\n :type words: List[str]\n :rtype: int\n '
word_set = []
for word in words:
s = ''
for ch in word:
s += self.alpha_morse[ch]
word_set.append(s)
return len(list(set(word_set))) | :type words: List[str]
:rtype: int | algorithm/leetcode/2018-03-25.py | uniqueMorseRepresentations | mhoonjeon/problemsolving | 0 | python | def uniqueMorseRepresentations(self, words):
'\n :type words: List[str]\n :rtype: int\n '
word_set = []
for word in words:
s =
for ch in word:
s += self.alpha_morse[ch]
word_set.append(s)
return len(list(set(word_set))) | def uniqueMorseRepresentations(self, words):
'\n :type words: List[str]\n :rtype: int\n '
word_set = []
for word in words:
s =
for ch in word:
s += self.alpha_morse[ch]
word_set.append(s)
return len(list(set(word_set)))<|docstring|>:type words: List[str]
:rtype: int<|endoftext|> |
dd3c81d849de39e6d1754378a3ff733dfb56eff3ccde26ba2c4390103f9fa74a | def _quick_sub_sort_tail(self, start, end):
'循环版本,模拟尾递归,可以大大减少递归栈深度,而且时间复杂度不变'
while (start < end):
pivot = self._rand_partition(start, end)
if ((pivot - start) < (end - pivot)):
self._quick_sub_sort_tail(start, (pivot - 1))
start = (pivot + 1)
else:
self._quick_sub_sort_tail((pivot + 1), end)
end = (pivot - 1) | 循环版本,模拟尾递归,可以大大减少递归栈深度,而且时间复杂度不变 | algorithms/ch02sort/m05_quick_sort.py | _quick_sub_sort_tail | yidao620c/core-algorithm | 819 | python | def _quick_sub_sort_tail(self, start, end):
while (start < end):
pivot = self._rand_partition(start, end)
if ((pivot - start) < (end - pivot)):
self._quick_sub_sort_tail(start, (pivot - 1))
start = (pivot + 1)
else:
self._quick_sub_sort_tail((pivot + 1), end)
end = (pivot - 1) | def _quick_sub_sort_tail(self, start, end):
while (start < end):
pivot = self._rand_partition(start, end)
if ((pivot - start) < (end - pivot)):
self._quick_sub_sort_tail(start, (pivot - 1))
start = (pivot + 1)
else:
self._quick_sub_sort_tail((pivot + 1), end)
end = (pivot - 1)<|docstring|>循环版本,模拟尾递归,可以大大减少递归栈深度,而且时间复杂度不变<|endoftext|> |
39af815b4453e92f45433fcdc89b84d511f02a5dc040abff639a6db948b52c72 | def _rand_partition(self, start, end):
'分解子数组: 随机化版本'
pivot = randint(start, end)
(self.seq[pivot], self.seq[end]) = (self.seq[end], self.seq[pivot])
pivot_value = self.seq[end]
i = (start - 1)
for j in range(start, end):
if (self.seq[j] <= pivot_value):
i += 1
(self.seq[i], self.seq[j]) = (self.seq[j], self.seq[i])
(self.seq[(i + 1)], self.seq[end]) = (self.seq[end], self.seq[(i + 1)])
return (i + 1) | 分解子数组: 随机化版本 | algorithms/ch02sort/m05_quick_sort.py | _rand_partition | yidao620c/core-algorithm | 819 | python | def _rand_partition(self, start, end):
pivot = randint(start, end)
(self.seq[pivot], self.seq[end]) = (self.seq[end], self.seq[pivot])
pivot_value = self.seq[end]
i = (start - 1)
for j in range(start, end):
if (self.seq[j] <= pivot_value):
i += 1
(self.seq[i], self.seq[j]) = (self.seq[j], self.seq[i])
(self.seq[(i + 1)], self.seq[end]) = (self.seq[end], self.seq[(i + 1)])
return (i + 1) | def _rand_partition(self, start, end):
pivot = randint(start, end)
(self.seq[pivot], self.seq[end]) = (self.seq[end], self.seq[pivot])
pivot_value = self.seq[end]
i = (start - 1)
for j in range(start, end):
if (self.seq[j] <= pivot_value):
i += 1
(self.seq[i], self.seq[j]) = (self.seq[j], self.seq[i])
(self.seq[(i + 1)], self.seq[end]) = (self.seq[end], self.seq[(i + 1)])
return (i + 1)<|docstring|>分解子数组: 随机化版本<|endoftext|> |
ab4725ac78e7533804e78e555f1bd0f7aa245feab368e07215c190e8f5cc222c | def _quick_sub_sort_recursive(self, start, end):
'递归版本的'
if (start < end):
q = self._rand_partition(start, end)
self._quick_sub_sort_recursive(start, (q - 1))
self._quick_sub_sort_recursive((q + 1), end) | 递归版本的 | algorithms/ch02sort/m05_quick_sort.py | _quick_sub_sort_recursive | yidao620c/core-algorithm | 819 | python | def _quick_sub_sort_recursive(self, start, end):
if (start < end):
q = self._rand_partition(start, end)
self._quick_sub_sort_recursive(start, (q - 1))
self._quick_sub_sort_recursive((q + 1), end) | def _quick_sub_sort_recursive(self, start, end):
if (start < end):
q = self._rand_partition(start, end)
self._quick_sub_sort_recursive(start, (q - 1))
self._quick_sub_sort_recursive((q + 1), end)<|docstring|>递归版本的<|endoftext|> |
c131ad33c0c4049a4069a8ae7ca337712b87112004bb883aba3e6ad1f1453463 | def prepare_data(self, obj, data):
'\n Hook for modifying outgoing data\n '
return data | Hook for modifying outgoing data | flask_peewee/rest/__init__.py | prepare_data | rammie/flask-peewee | 0 | python | def prepare_data(self, obj, data):
'\n \n '
return data | def prepare_data(self, obj, data):
'\n \n '
return data<|docstring|>Hook for modifying outgoing data<|endoftext|> |
b0f0667412debe7948f51880923a2b8876469524f333a44ae9619a7dc319bcd6 | def __init__(self) -> None:
'Initialize the internal data structure.'
self._src = None
warnings.warn('The QuadraticProgramToIsing class is deprecated and will be removed in a future release. Use the .to_ising() method on a QuadraticProgram object instead.', DeprecationWarning) | Initialize the internal data structure. | qiskit/optimization/converters/quadratic_program_to_ising.py | __init__ | MartenSkogh/qiskit-aqua | 15 | python | def __init__(self) -> None:
self._src = None
warnings.warn('The QuadraticProgramToIsing class is deprecated and will be removed in a future release. Use the .to_ising() method on a QuadraticProgram object instead.', DeprecationWarning) | def __init__(self) -> None:
self._src = None
warnings.warn('The QuadraticProgramToIsing class is deprecated and will be removed in a future release. Use the .to_ising() method on a QuadraticProgram object instead.', DeprecationWarning)<|docstring|>Initialize the internal data structure.<|endoftext|> |
00a0eeab66be0627b86242216b4ef472b8edced6eac97e4b489b4f740effea0b | def encode(self, op: QuadraticProgram) -> Tuple[(OperatorBase, float)]:
'Convert a problem into a qubit operator\n\n Args:\n op: The optimization problem to be converted. Must be an unconstrained problem with\n binary variables only.\n Returns:\n The qubit operator of the problem and the shift value.\n Raises:\n QiskitOptimizationError: If a variable type is not binary.\n QiskitOptimizationError: If constraints exist in the problem.\n '
self._src = op
return self._src.to_ising() | Convert a problem into a qubit operator
Args:
op: The optimization problem to be converted. Must be an unconstrained problem with
binary variables only.
Returns:
The qubit operator of the problem and the shift value.
Raises:
QiskitOptimizationError: If a variable type is not binary.
QiskitOptimizationError: If constraints exist in the problem. | qiskit/optimization/converters/quadratic_program_to_ising.py | encode | MartenSkogh/qiskit-aqua | 15 | python | def encode(self, op: QuadraticProgram) -> Tuple[(OperatorBase, float)]:
'Convert a problem into a qubit operator\n\n Args:\n op: The optimization problem to be converted. Must be an unconstrained problem with\n binary variables only.\n Returns:\n The qubit operator of the problem and the shift value.\n Raises:\n QiskitOptimizationError: If a variable type is not binary.\n QiskitOptimizationError: If constraints exist in the problem.\n '
self._src = op
return self._src.to_ising() | def encode(self, op: QuadraticProgram) -> Tuple[(OperatorBase, float)]:
'Convert a problem into a qubit operator\n\n Args:\n op: The optimization problem to be converted. Must be an unconstrained problem with\n binary variables only.\n Returns:\n The qubit operator of the problem and the shift value.\n Raises:\n QiskitOptimizationError: If a variable type is not binary.\n QiskitOptimizationError: If constraints exist in the problem.\n '
self._src = op
return self._src.to_ising()<|docstring|>Convert a problem into a qubit operator
Args:
op: The optimization problem to be converted. Must be an unconstrained problem with
binary variables only.
Returns:
The qubit operator of the problem and the shift value.
Raises:
QiskitOptimizationError: If a variable type is not binary.
QiskitOptimizationError: If constraints exist in the problem.<|endoftext|> |
acdf5bfd64fb5c3ab557176afbab4616e27ea426d3e195a3342f0ee15ae452ea | def set_fake_reg_key(fake_reg_key: FakeRegistryKey, sub_key: Union[(str, None)]=None, last_modified_ns: Union[(int, None)]=None) -> FakeRegistryKey:
"\n Creates a registry key if it does not exist already\n\n >>> fake_reg_root = FakeRegistryKey()\n >>> assert set_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE').full_key == 'HKEY_LOCAL_MACHINE'\n >>> assert set_fake_reg_key(fake_reg_key=fake_reg_root,\n ... sub_key=r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT').full_key == r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT'\n "
if (last_modified_ns is None):
last_modified_ns = get_windows_timestamp_now()
key_parts_full = fake_reg_key.full_key.split('\\')
if sub_key:
key_parts_sub = sub_key.split('\\')
else:
key_parts_sub = []
data = fake_reg_key
for key_part in key_parts_sub:
key_parts_full.append(key_part)
if (key_part not in data.subkeys):
data.subkeys[key_part] = FakeRegistryKey()
data.subkeys[key_part].full_key = '\\'.join(key_parts_full).strip('\\')
data.subkeys[key_part].last_modified_ns = last_modified_ns
data.subkeys[key_part].parent_fake_registry_key = data
data = data.subkeys[key_part]
return data | Creates a registry key if it does not exist already
>>> fake_reg_root = FakeRegistryKey()
>>> assert set_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE').full_key == 'HKEY_LOCAL_MACHINE'
>>> assert set_fake_reg_key(fake_reg_key=fake_reg_root,
... sub_key=r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT').full_key == r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT' | fake_winreg/fake_reg.py | set_fake_reg_key | bitranox/fake_winreg | 2 | python | def set_fake_reg_key(fake_reg_key: FakeRegistryKey, sub_key: Union[(str, None)]=None, last_modified_ns: Union[(int, None)]=None) -> FakeRegistryKey:
"\n Creates a registry key if it does not exist already\n\n >>> fake_reg_root = FakeRegistryKey()\n >>> assert set_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE').full_key == 'HKEY_LOCAL_MACHINE'\n >>> assert set_fake_reg_key(fake_reg_key=fake_reg_root,\n ... sub_key=r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT').full_key == r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT'\n "
if (last_modified_ns is None):
last_modified_ns = get_windows_timestamp_now()
key_parts_full = fake_reg_key.full_key.split('\\')
if sub_key:
key_parts_sub = sub_key.split('\\')
else:
key_parts_sub = []
data = fake_reg_key
for key_part in key_parts_sub:
key_parts_full.append(key_part)
if (key_part not in data.subkeys):
data.subkeys[key_part] = FakeRegistryKey()
data.subkeys[key_part].full_key = '\\'.join(key_parts_full).strip('\\')
data.subkeys[key_part].last_modified_ns = last_modified_ns
data.subkeys[key_part].parent_fake_registry_key = data
data = data.subkeys[key_part]
return data | def set_fake_reg_key(fake_reg_key: FakeRegistryKey, sub_key: Union[(str, None)]=None, last_modified_ns: Union[(int, None)]=None) -> FakeRegistryKey:
"\n Creates a registry key if it does not exist already\n\n >>> fake_reg_root = FakeRegistryKey()\n >>> assert set_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE').full_key == 'HKEY_LOCAL_MACHINE'\n >>> assert set_fake_reg_key(fake_reg_key=fake_reg_root,\n ... sub_key=r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT').full_key == r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT'\n "
if (last_modified_ns is None):
last_modified_ns = get_windows_timestamp_now()
key_parts_full = fake_reg_key.full_key.split('\\')
if sub_key:
key_parts_sub = sub_key.split('\\')
else:
key_parts_sub = []
data = fake_reg_key
for key_part in key_parts_sub:
key_parts_full.append(key_part)
if (key_part not in data.subkeys):
data.subkeys[key_part] = FakeRegistryKey()
data.subkeys[key_part].full_key = '\\'.join(key_parts_full).strip('\\')
data.subkeys[key_part].last_modified_ns = last_modified_ns
data.subkeys[key_part].parent_fake_registry_key = data
data = data.subkeys[key_part]
return data<|docstring|>Creates a registry key if it does not exist already
>>> fake_reg_root = FakeRegistryKey()
>>> assert set_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE').full_key == 'HKEY_LOCAL_MACHINE'
>>> assert set_fake_reg_key(fake_reg_key=fake_reg_root,
... sub_key=r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT').full_key == r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT'<|endoftext|> |
1196bd53b8923064ebd77c2f4a0fbf49702ed079fbcb5c0f0971b664df377f16 | def get_fake_reg_key(fake_reg_key: FakeRegistryKey, sub_key: str) -> FakeRegistryKey:
'\n >>> # Setup\n >>> fake_reg_root = FakeRegistryKey()\n >>> assert set_fake_reg_key(fake_reg_key=fake_reg_root,\n ... sub_key=r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\').full_key == r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\'\n\n >>> # Test existing Key\n >>> assert get_fake_reg_key(fake_reg_key=fake_reg_root,\n ... sub_key=r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\').full_key == r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\'\n\n >>> # Test not existing Key\n >>> get_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\non_existing\')\n Traceback (most recent call last):\n ...\n FileNotFoundError: subkey not found, key="HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft", subkey="non_existing"\n\n '
current_fake_reg_key = fake_reg_key
if sub_key:
key_parts = sub_key.split('\\')
for key_part in key_parts:
try:
current_fake_reg_key = current_fake_reg_key.subkeys[key_part]
except KeyError:
raise FileNotFoundError(f'subkey not found, key="{current_fake_reg_key.full_key}", subkey="{key_part}"')
return current_fake_reg_key | >>> # Setup
>>> fake_reg_root = FakeRegistryKey()
>>> assert set_fake_reg_key(fake_reg_key=fake_reg_root,
... sub_key=r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT').full_key == r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT'
>>> # Test existing Key
>>> assert get_fake_reg_key(fake_reg_key=fake_reg_root,
... sub_key=r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT').full_key == r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT'
>>> # Test not existing Key
>>> get_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\non_existing')
Traceback (most recent call last):
...
FileNotFoundError: subkey not found, key="HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft", subkey="non_existing" | fake_winreg/fake_reg.py | get_fake_reg_key | bitranox/fake_winreg | 2 | python | def get_fake_reg_key(fake_reg_key: FakeRegistryKey, sub_key: str) -> FakeRegistryKey:
'\n >>> # Setup\n >>> fake_reg_root = FakeRegistryKey()\n >>> assert set_fake_reg_key(fake_reg_key=fake_reg_root,\n ... sub_key=r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\').full_key == r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\'\n\n >>> # Test existing Key\n >>> assert get_fake_reg_key(fake_reg_key=fake_reg_root,\n ... sub_key=r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\').full_key == r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\'\n\n >>> # Test not existing Key\n >>> get_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\non_existing\')\n Traceback (most recent call last):\n ...\n FileNotFoundError: subkey not found, key="HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft", subkey="non_existing"\n\n '
current_fake_reg_key = fake_reg_key
if sub_key:
key_parts = sub_key.split('\\')
for key_part in key_parts:
try:
current_fake_reg_key = current_fake_reg_key.subkeys[key_part]
except KeyError:
raise FileNotFoundError(f'subkey not found, key="{current_fake_reg_key.full_key}", subkey="{key_part}"')
return current_fake_reg_key | def get_fake_reg_key(fake_reg_key: FakeRegistryKey, sub_key: str) -> FakeRegistryKey:
'\n >>> # Setup\n >>> fake_reg_root = FakeRegistryKey()\n >>> assert set_fake_reg_key(fake_reg_key=fake_reg_root,\n ... sub_key=r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\').full_key == r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\'\n\n >>> # Test existing Key\n >>> assert get_fake_reg_key(fake_reg_key=fake_reg_root,\n ... sub_key=r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\').full_key == r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\'\n\n >>> # Test not existing Key\n >>> get_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r\'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\non_existing\')\n Traceback (most recent call last):\n ...\n FileNotFoundError: subkey not found, key="HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft", subkey="non_existing"\n\n '
current_fake_reg_key = fake_reg_key
if sub_key:
key_parts = sub_key.split('\\')
for key_part in key_parts:
try:
current_fake_reg_key = current_fake_reg_key.subkeys[key_part]
except KeyError:
raise FileNotFoundError(f'subkey not found, key="{current_fake_reg_key.full_key}", subkey="{key_part}"')
return current_fake_reg_key<|docstring|>>>> # Setup
>>> fake_reg_root = FakeRegistryKey()
>>> assert set_fake_reg_key(fake_reg_key=fake_reg_root,
... sub_key=r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT').full_key == r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT'
>>> # Test existing Key
>>> assert get_fake_reg_key(fake_reg_key=fake_reg_root,
... sub_key=r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT').full_key == r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT'
>>> # Test not existing Key
>>> get_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\non_existing')
Traceback (most recent call last):
...
FileNotFoundError: subkey not found, key="HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft", subkey="non_existing"<|endoftext|> |
5d7fa99df2cfd977cc865961b2c062065db57e0e562b70adb479248b79cf6826 | def set_fake_reg_value(fake_reg_key: FakeRegistryKey, sub_key: str, value_name: str, value: Union[(None, bytes, str, List[str], int)], value_type: int=REG_SZ, last_modified_ns: Union[(int, None)]=None) -> FakeRegistryValue:
"\n sets the value of the fake key - we create here keys on the fly, but beware of the last_modified_ns time !\n if You need to have correct last_modified_ns time for each subkey, You need to create those keys first\n\n >>> # Setup\n >>> fake_reg_root = FakeRegistryKey()\n >>> fake_reg_key = set_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT')\n\n >>> # Write Value\n >>> fake_reg_value = set_fake_reg_value(fake_reg_key, '', 'CurrentBuild', '18363', REG_SZ)\n >>> assert fake_reg_value.full_key == r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT'\n >>> assert fake_reg_value.value_name == 'CurrentBuild'\n >>> assert fake_reg_value.value == '18363'\n >>> assert fake_reg_value.value_type == REG_SZ\n >>> last_modified_ns = fake_reg_value.last_modified_ns\n\n >>> # Write other Value to the same fake_registry_value :\n >>> time.sleep(0.1)\n >>> fake_reg_value = set_fake_reg_value(fake_reg_key, '', 'CurrentBuild', '18364', REG_MULTI_SZ, last_modified_ns=get_windows_timestamp_now())\n >>> assert fake_reg_value.value == '18364'\n >>> assert fake_reg_value.value_type == REG_MULTI_SZ\n >>> assert fake_reg_value.last_modified_ns != last_modified_ns\n\n "
if (last_modified_ns is None):
last_modified_ns = get_windows_timestamp_now()
fake_reg_key = set_fake_reg_key(fake_reg_key=fake_reg_key, sub_key=sub_key, last_modified_ns=last_modified_ns)
if (value_name not in fake_reg_key.values):
fake_reg_key.values[value_name] = FakeRegistryValue()
fake_reg_value = fake_reg_key.values[value_name]
fake_reg_value.full_key = fake_reg_key.full_key
fake_reg_value.value_name = value_name
else:
fake_reg_value = fake_reg_key.values[value_name]
fake_reg_value.value = value
fake_reg_value.value_type = value_type
fake_reg_value.last_modified_ns = last_modified_ns
return fake_reg_value | sets the value of the fake key - we create here keys on the fly, but beware of the last_modified_ns time !
if You need to have correct last_modified_ns time for each subkey, You need to create those keys first
>>> # Setup
>>> fake_reg_root = FakeRegistryKey()
>>> fake_reg_key = set_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT')
>>> # Write Value
>>> fake_reg_value = set_fake_reg_value(fake_reg_key, '', 'CurrentBuild', '18363', REG_SZ)
>>> assert fake_reg_value.full_key == r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT'
>>> assert fake_reg_value.value_name == 'CurrentBuild'
>>> assert fake_reg_value.value == '18363'
>>> assert fake_reg_value.value_type == REG_SZ
>>> last_modified_ns = fake_reg_value.last_modified_ns
>>> # Write other Value to the same fake_registry_value :
>>> time.sleep(0.1)
>>> fake_reg_value = set_fake_reg_value(fake_reg_key, '', 'CurrentBuild', '18364', REG_MULTI_SZ, last_modified_ns=get_windows_timestamp_now())
>>> assert fake_reg_value.value == '18364'
>>> assert fake_reg_value.value_type == REG_MULTI_SZ
>>> assert fake_reg_value.last_modified_ns != last_modified_ns | fake_winreg/fake_reg.py | set_fake_reg_value | bitranox/fake_winreg | 2 | python | def set_fake_reg_value(fake_reg_key: FakeRegistryKey, sub_key: str, value_name: str, value: Union[(None, bytes, str, List[str], int)], value_type: int=REG_SZ, last_modified_ns: Union[(int, None)]=None) -> FakeRegistryValue:
"\n sets the value of the fake key - we create here keys on the fly, but beware of the last_modified_ns time !\n if You need to have correct last_modified_ns time for each subkey, You need to create those keys first\n\n >>> # Setup\n >>> fake_reg_root = FakeRegistryKey()\n >>> fake_reg_key = set_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT')\n\n >>> # Write Value\n >>> fake_reg_value = set_fake_reg_value(fake_reg_key, , 'CurrentBuild', '18363', REG_SZ)\n >>> assert fake_reg_value.full_key == r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT'\n >>> assert fake_reg_value.value_name == 'CurrentBuild'\n >>> assert fake_reg_value.value == '18363'\n >>> assert fake_reg_value.value_type == REG_SZ\n >>> last_modified_ns = fake_reg_value.last_modified_ns\n\n >>> # Write other Value to the same fake_registry_value :\n >>> time.sleep(0.1)\n >>> fake_reg_value = set_fake_reg_value(fake_reg_key, , 'CurrentBuild', '18364', REG_MULTI_SZ, last_modified_ns=get_windows_timestamp_now())\n >>> assert fake_reg_value.value == '18364'\n >>> assert fake_reg_value.value_type == REG_MULTI_SZ\n >>> assert fake_reg_value.last_modified_ns != last_modified_ns\n\n "
if (last_modified_ns is None):
last_modified_ns = get_windows_timestamp_now()
fake_reg_key = set_fake_reg_key(fake_reg_key=fake_reg_key, sub_key=sub_key, last_modified_ns=last_modified_ns)
if (value_name not in fake_reg_key.values):
fake_reg_key.values[value_name] = FakeRegistryValue()
fake_reg_value = fake_reg_key.values[value_name]
fake_reg_value.full_key = fake_reg_key.full_key
fake_reg_value.value_name = value_name
else:
fake_reg_value = fake_reg_key.values[value_name]
fake_reg_value.value = value
fake_reg_value.value_type = value_type
fake_reg_value.last_modified_ns = last_modified_ns
return fake_reg_value | def set_fake_reg_value(fake_reg_key: FakeRegistryKey, sub_key: str, value_name: str, value: Union[(None, bytes, str, List[str], int)], value_type: int=REG_SZ, last_modified_ns: Union[(int, None)]=None) -> FakeRegistryValue:
"\n sets the value of the fake key - we create here keys on the fly, but beware of the last_modified_ns time !\n if You need to have correct last_modified_ns time for each subkey, You need to create those keys first\n\n >>> # Setup\n >>> fake_reg_root = FakeRegistryKey()\n >>> fake_reg_key = set_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT')\n\n >>> # Write Value\n >>> fake_reg_value = set_fake_reg_value(fake_reg_key, , 'CurrentBuild', '18363', REG_SZ)\n >>> assert fake_reg_value.full_key == r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT'\n >>> assert fake_reg_value.value_name == 'CurrentBuild'\n >>> assert fake_reg_value.value == '18363'\n >>> assert fake_reg_value.value_type == REG_SZ\n >>> last_modified_ns = fake_reg_value.last_modified_ns\n\n >>> # Write other Value to the same fake_registry_value :\n >>> time.sleep(0.1)\n >>> fake_reg_value = set_fake_reg_value(fake_reg_key, , 'CurrentBuild', '18364', REG_MULTI_SZ, last_modified_ns=get_windows_timestamp_now())\n >>> assert fake_reg_value.value == '18364'\n >>> assert fake_reg_value.value_type == REG_MULTI_SZ\n >>> assert fake_reg_value.last_modified_ns != last_modified_ns\n\n "
if (last_modified_ns is None):
last_modified_ns = get_windows_timestamp_now()
fake_reg_key = set_fake_reg_key(fake_reg_key=fake_reg_key, sub_key=sub_key, last_modified_ns=last_modified_ns)
if (value_name not in fake_reg_key.values):
fake_reg_key.values[value_name] = FakeRegistryValue()
fake_reg_value = fake_reg_key.values[value_name]
fake_reg_value.full_key = fake_reg_key.full_key
fake_reg_value.value_name = value_name
else:
fake_reg_value = fake_reg_key.values[value_name]
fake_reg_value.value = value
fake_reg_value.value_type = value_type
fake_reg_value.last_modified_ns = last_modified_ns
return fake_reg_value<|docstring|>sets the value of the fake key - we create here keys on the fly, but beware of the last_modified_ns time !
if You need to have correct last_modified_ns time for each subkey, You need to create those keys first
>>> # Setup
>>> fake_reg_root = FakeRegistryKey()
>>> fake_reg_key = set_fake_reg_key(fake_reg_key=fake_reg_root, sub_key=r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT')
>>> # Write Value
>>> fake_reg_value = set_fake_reg_value(fake_reg_key, '', 'CurrentBuild', '18363', REG_SZ)
>>> assert fake_reg_value.full_key == r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT'
>>> assert fake_reg_value.value_name == 'CurrentBuild'
>>> assert fake_reg_value.value == '18363'
>>> assert fake_reg_value.value_type == REG_SZ
>>> last_modified_ns = fake_reg_value.last_modified_ns
>>> # Write other Value to the same fake_registry_value :
>>> time.sleep(0.1)
>>> fake_reg_value = set_fake_reg_value(fake_reg_key, '', 'CurrentBuild', '18364', REG_MULTI_SZ, last_modified_ns=get_windows_timestamp_now())
>>> assert fake_reg_value.value == '18364'
>>> assert fake_reg_value.value_type == REG_MULTI_SZ
>>> assert fake_reg_value.last_modified_ns != last_modified_ns<|endoftext|> |
ddee84f411db70d5a35870fe4b3e1d8be582f7861888b75034d20ab4cee127c5 | def get_windows_timestamp_now() -> int:
'\n Windows Timestamp in hundreds of ns since 01.01.1601 – 00:00:00 UTC\n\n >>> assert get_windows_timestamp_now() > 10000\n >>> save_time = get_windows_timestamp_now()\n >>> time.sleep(0.1)\n >>> assert get_windows_timestamp_now() > save_time\n\n '
linux_timestamp_100ns = int((time.time() * 10000000.0))
linux_windows_diff_100ns = int((11644473600 * 10000000.0))
windows_timestamp_100ns = (linux_timestamp_100ns + linux_windows_diff_100ns)
return windows_timestamp_100ns | Windows Timestamp in hundreds of ns since 01.01.1601 – 00:00:00 UTC
>>> assert get_windows_timestamp_now() > 10000
>>> save_time = get_windows_timestamp_now()
>>> time.sleep(0.1)
>>> assert get_windows_timestamp_now() > save_time | fake_winreg/fake_reg.py | get_windows_timestamp_now | bitranox/fake_winreg | 2 | python | def get_windows_timestamp_now() -> int:
'\n Windows Timestamp in hundreds of ns since 01.01.1601 – 00:00:00 UTC\n\n >>> assert get_windows_timestamp_now() > 10000\n >>> save_time = get_windows_timestamp_now()\n >>> time.sleep(0.1)\n >>> assert get_windows_timestamp_now() > save_time\n\n '
linux_timestamp_100ns = int((time.time() * 10000000.0))
linux_windows_diff_100ns = int((11644473600 * 10000000.0))
windows_timestamp_100ns = (linux_timestamp_100ns + linux_windows_diff_100ns)
return windows_timestamp_100ns | def get_windows_timestamp_now() -> int:
'\n Windows Timestamp in hundreds of ns since 01.01.1601 – 00:00:00 UTC\n\n >>> assert get_windows_timestamp_now() > 10000\n >>> save_time = get_windows_timestamp_now()\n >>> time.sleep(0.1)\n >>> assert get_windows_timestamp_now() > save_time\n\n '
linux_timestamp_100ns = int((time.time() * 10000000.0))
linux_windows_diff_100ns = int((11644473600 * 10000000.0))
windows_timestamp_100ns = (linux_timestamp_100ns + linux_windows_diff_100ns)
return windows_timestamp_100ns<|docstring|>Windows Timestamp in hundreds of ns since 01.01.1601 – 00:00:00 UTC
>>> assert get_windows_timestamp_now() > 10000
>>> save_time = get_windows_timestamp_now()
>>> time.sleep(0.1)
>>> assert get_windows_timestamp_now() > save_time<|endoftext|> |
8138cb165b34430b3f2d0c74195497fea3421b51dea0a9f29f22045834f01835 | def __init__(self) -> None:
'\n >>> fake_reg_root = FakeRegistryKey()\n '
self.full_key: str = ''
self.parent_fake_registry_key: Optional[FakeRegistryKey] = None
self.subkeys: Dict[(str, FakeRegistryKey)] = dict()
self.values: Dict[(str, FakeRegistryValue)] = dict()
self.last_modified_ns: int = 0 | >>> fake_reg_root = FakeRegistryKey() | fake_winreg/fake_reg.py | __init__ | bitranox/fake_winreg | 2 | python | def __init__(self) -> None:
'\n \n '
self.full_key: str =
self.parent_fake_registry_key: Optional[FakeRegistryKey] = None
self.subkeys: Dict[(str, FakeRegistryKey)] = dict()
self.values: Dict[(str, FakeRegistryValue)] = dict()
self.last_modified_ns: int = 0 | def __init__(self) -> None:
'\n \n '
self.full_key: str =
self.parent_fake_registry_key: Optional[FakeRegistryKey] = None
self.subkeys: Dict[(str, FakeRegistryKey)] = dict()
self.values: Dict[(str, FakeRegistryValue)] = dict()
self.last_modified_ns: int = 0<|docstring|>>>> fake_reg_root = FakeRegistryKey()<|endoftext|> |
811d6eb948c6d707f41ba144b12b104aa691e065f073cdd81996a7c874a91a34 | def __init__(self) -> None:
'\n >>> fake_reg_value = FakeRegistryValue()\n '
self.full_key: str = ''
self.value_name: str = ''
self.value: RegData = ''
self.value_type: int = REG_SZ
self.access: int = 0
self.last_modified_ns: Union[(None, int)] = None | >>> fake_reg_value = FakeRegistryValue() | fake_winreg/fake_reg.py | __init__ | bitranox/fake_winreg | 2 | python | def __init__(self) -> None:
'\n \n '
self.full_key: str =
self.value_name: str =
self.value: RegData =
self.value_type: int = REG_SZ
self.access: int = 0
self.last_modified_ns: Union[(None, int)] = None | def __init__(self) -> None:
'\n \n '
self.full_key: str =
self.value_name: str =
self.value: RegData =
self.value_type: int = REG_SZ
self.access: int = 0
self.last_modified_ns: Union[(None, int)] = None<|docstring|>>>> fake_reg_value = FakeRegistryValue()<|endoftext|> |
32fca93c44b1c47f1ac51dd51103c2f700dbac0f1d7688aa749eba3e11545ffd | def restrict2ROI(img, vertices):
'\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n '
mask = np.zeros_like(img)
if (len(img.shape) > 2):
channel_count = img.shape[2]
ignore_mask_color = ((255,) * channel_count)
else:
ignore_mask_color = 255
cv2.fillPoly(mask, np.int32([vertices]), ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image | Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points. | P2_subroutines.py | restrict2ROI | felipeqda/CarND-Advanced-Lane-Lines | 0 | python | def restrict2ROI(img, vertices):
'\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n '
mask = np.zeros_like(img)
if (len(img.shape) > 2):
channel_count = img.shape[2]
ignore_mask_color = ((255,) * channel_count)
else:
ignore_mask_color = 255
cv2.fillPoly(mask, np.int32([vertices]), ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image | def restrict2ROI(img, vertices):
'\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n '
mask = np.zeros_like(img)
if (len(img.shape) > 2):
channel_count = img.shape[2]
ignore_mask_color = ((255,) * channel_count)
else:
ignore_mask_color = 255
cv2.fillPoly(mask, np.int32([vertices]), ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image<|docstring|>Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.<|endoftext|> |
567d2dbe70cd6ec9c6ca1e10d092517c63138d227019b8768ba5c28b85ea5a83 | def gaussian_blur(img, kernel_size):
'Applies a Gaussian Noise kernel'
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) | Applies a Gaussian Noise kernel | P2_subroutines.py | gaussian_blur | felipeqda/CarND-Advanced-Lane-Lines | 0 | python | def gaussian_blur(img, kernel_size):
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) | def gaussian_blur(img, kernel_size):
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)<|docstring|>Applies a Gaussian Noise kernel<|endoftext|> |
9998f1991fff90f37103d018878f9c7040b21f7500b0a043666dc6a216551545 | def calibrateCamera(FORCE_REDO=False):
' Load images, get the corner positions in image and generate\n the calibration matrix and the distortion coefficients.\n if FORCE_REDO == False; reads previously saved .npz file, if available\n '
if (os.path.isfile('cal_para.npz') and (FORCE_REDO == False)):
cal_para = np.load('cal_para.npz')
cal_mtx = cal_para['cal_mtx']
dist_coef = cal_para['dist_coef']
cal_para.close()
else:
cal_images = glob.glob('camera_cal/*.jpg')
chessb_corners = (9, 6)
chessb_knownpoints = np.zeros([(chessb_corners[0] * chessb_corners[1]), 3], dtype=np.float32)
chessb_knownpoints[(:, 0:2)] = np.mgrid[(0:chessb_corners[0], 0:chessb_corners[1])].T.reshape((- 1), 2)
img_points_list = []
known_points_list = []
for img_path in cal_images:
image = mpimg.imread(img_path)
(Ny, Nx, _) = np.shape(image)
grayscl = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(ret, img_corners) = cv2.findChessboardCorners(grayscl, chessb_corners, None)
if ret:
known_points_list.append(chessb_knownpoints)
img_points_list.append(img_corners)
(ret, cal_mtx, dist_coef, rvecs, tvecs) = cv2.calibrateCamera(known_points_list, img_points_list, (Nx, Ny), None, None)
np.savez('cal_para.npz', cal_mtx=cal_mtx, dist_coef=dist_coef, chessb_corners=chessb_corners)
output_dir = ((('output_images' + os.sep) + 'calibration') + os.sep)
os.makedirs(output_dir, exist_ok=True)
for img_path in cal_images:
image = mpimg.imread(img_path)
cal_image = cv2.undistort(image, cal_mtx, dist_coef, None, cal_mtx)
img_basename = os.path.basename(img_path).split('.jpg')[0]
fig = plt.figure(num=1)
plt.clf()
fig.canvas.set_window_title('Input Image')
plt.imshow(image)
plt.savefig(((output_dir + img_basename) + '.jpg'), format='jpg')
fig = plt.figure(num=2)
plt.clf()
fig.canvas.set_window_title('Input Image after Calibration')
plt.imshow(cal_image)
plt.savefig(((output_dir + img_basename) + '_output.jpg'), format='jpg')
plt.close('all')
return (cal_mtx, dist_coef) | Load images, get the corner positions in image and generate
the calibration matrix and the distortion coefficients.
if FORCE_REDO == False; reads previously saved .npz file, if available | P2_subroutines.py | calibrateCamera | felipeqda/CarND-Advanced-Lane-Lines | 0 | python | def calibrateCamera(FORCE_REDO=False):
' Load images, get the corner positions in image and generate\n the calibration matrix and the distortion coefficients.\n if FORCE_REDO == False; reads previously saved .npz file, if available\n '
if (os.path.isfile('cal_para.npz') and (FORCE_REDO == False)):
cal_para = np.load('cal_para.npz')
cal_mtx = cal_para['cal_mtx']
dist_coef = cal_para['dist_coef']
cal_para.close()
else:
cal_images = glob.glob('camera_cal/*.jpg')
chessb_corners = (9, 6)
chessb_knownpoints = np.zeros([(chessb_corners[0] * chessb_corners[1]), 3], dtype=np.float32)
chessb_knownpoints[(:, 0:2)] = np.mgrid[(0:chessb_corners[0], 0:chessb_corners[1])].T.reshape((- 1), 2)
img_points_list = []
known_points_list = []
for img_path in cal_images:
image = mpimg.imread(img_path)
(Ny, Nx, _) = np.shape(image)
grayscl = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(ret, img_corners) = cv2.findChessboardCorners(grayscl, chessb_corners, None)
if ret:
known_points_list.append(chessb_knownpoints)
img_points_list.append(img_corners)
(ret, cal_mtx, dist_coef, rvecs, tvecs) = cv2.calibrateCamera(known_points_list, img_points_list, (Nx, Ny), None, None)
np.savez('cal_para.npz', cal_mtx=cal_mtx, dist_coef=dist_coef, chessb_corners=chessb_corners)
output_dir = ((('output_images' + os.sep) + 'calibration') + os.sep)
os.makedirs(output_dir, exist_ok=True)
for img_path in cal_images:
image = mpimg.imread(img_path)
cal_image = cv2.undistort(image, cal_mtx, dist_coef, None, cal_mtx)
img_basename = os.path.basename(img_path).split('.jpg')[0]
fig = plt.figure(num=1)
plt.clf()
fig.canvas.set_window_title('Input Image')
plt.imshow(image)
plt.savefig(((output_dir + img_basename) + '.jpg'), format='jpg')
fig = plt.figure(num=2)
plt.clf()
fig.canvas.set_window_title('Input Image after Calibration')
plt.imshow(cal_image)
plt.savefig(((output_dir + img_basename) + '_output.jpg'), format='jpg')
plt.close('all')
return (cal_mtx, dist_coef) | def calibrateCamera(FORCE_REDO=False):
' Load images, get the corner positions in image and generate\n the calibration matrix and the distortion coefficients.\n if FORCE_REDO == False; reads previously saved .npz file, if available\n '
if (os.path.isfile('cal_para.npz') and (FORCE_REDO == False)):
cal_para = np.load('cal_para.npz')
cal_mtx = cal_para['cal_mtx']
dist_coef = cal_para['dist_coef']
cal_para.close()
else:
cal_images = glob.glob('camera_cal/*.jpg')
chessb_corners = (9, 6)
chessb_knownpoints = np.zeros([(chessb_corners[0] * chessb_corners[1]), 3], dtype=np.float32)
chessb_knownpoints[(:, 0:2)] = np.mgrid[(0:chessb_corners[0], 0:chessb_corners[1])].T.reshape((- 1), 2)
img_points_list = []
known_points_list = []
for img_path in cal_images:
image = mpimg.imread(img_path)
(Ny, Nx, _) = np.shape(image)
grayscl = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(ret, img_corners) = cv2.findChessboardCorners(grayscl, chessb_corners, None)
if ret:
known_points_list.append(chessb_knownpoints)
img_points_list.append(img_corners)
(ret, cal_mtx, dist_coef, rvecs, tvecs) = cv2.calibrateCamera(known_points_list, img_points_list, (Nx, Ny), None, None)
np.savez('cal_para.npz', cal_mtx=cal_mtx, dist_coef=dist_coef, chessb_corners=chessb_corners)
output_dir = ((('output_images' + os.sep) + 'calibration') + os.sep)
os.makedirs(output_dir, exist_ok=True)
for img_path in cal_images:
image = mpimg.imread(img_path)
cal_image = cv2.undistort(image, cal_mtx, dist_coef, None, cal_mtx)
img_basename = os.path.basename(img_path).split('.jpg')[0]
fig = plt.figure(num=1)
plt.clf()
fig.canvas.set_window_title('Input Image')
plt.imshow(image)
plt.savefig(((output_dir + img_basename) + '.jpg'), format='jpg')
fig = plt.figure(num=2)
plt.clf()
fig.canvas.set_window_title('Input Image after Calibration')
plt.imshow(cal_image)
plt.savefig(((output_dir + img_basename) + '_output.jpg'), format='jpg')
plt.close('all')
return (cal_mtx, dist_coef)<|docstring|>Load images, get the corner positions in image and generate
the calibration matrix and the distortion coefficients.
if FORCE_REDO == False; reads previously saved .npz file, if available<|endoftext|> |
fedd0a09a146c11034a20609e65120978827438d0914f30f127119408cf0ef8a | def lanepxmask(img_RGB, sobel_kernel=7):
' Take RGB image, perform necessary color transformation /gradient calculations\n and output the detected lane pixels mask, alongside an RGB composition of the 3 sub-masks (added)\n for visualization\n '
MORPH_ENHANCE = True
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
img_HLS = cv2.cvtColor(img_RGB, cv2.COLOR_RGB2HLS)
S_thd = (200, 255)
S_mask = ((img_HLS[(:, :, 2)] > S_thd[0]) & (img_HLS[(:, :, 2)] <= S_thd[1]))
S_mask[(img_HLS[(:, :, 1)] < 50)] = False
if MORPH_ENHANCE:
S_mask = (cv2.morphologyEx((255 * np.uint8(S_mask)), cv2.MORPH_CLOSE, kernel, iterations=2) > 0)
S_mask = (cv2.dilate((255 * np.uint8(S_mask)), kernel) > 0)
S_gradx_mask = abs_sobel_thresh(img_HLS[(:, :, 2)], orient='x', thresh=(20, 100), sobel_kernel=sobel_kernel, GRAY_INPUT=True)
S_mask[(img_HLS[(:, :, 1)] < 50)] = False
if MORPH_ENHANCE:
S_gradx_mask = (cv2.morphologyEx((255 * np.uint8(S_gradx_mask)), cv2.MORPH_CLOSE, kernel, iterations=2) > 0)
S_gradx_mask = (cv2.dilate((255 * np.uint8(S_gradx_mask)), kernel) > 0)
gradx_mask = abs_sobel_thresh(img_RGB, orient='x', thresh=(20, 100), sobel_kernel=sobel_kernel)
dark_borders = (cv2.blur(img_HLS[(:, :, 1)], (15, 15)) < cv2.blur(img_HLS[(:, :, 1)], (17, 17)))
dark_borders = cv2.morphologyEx((255 * np.uint8(dark_borders)), cv2.MORPH_OPEN, kernel, iterations=2)
dark_regions = cv2.morphologyEx((255 * np.uint8((img_HLS[(:, :, 1)] < np.mean(img_HLS[(:, :, 1)])))), cv2.MORPH_OPEN, kernel, iterations=2)
dark = cv2.morphologyEx((255 * np.uint8(((dark_borders == 255) | (dark_regions == 255)))), cv2.MORPH_OPEN, kernel, iterations=3)
dark = cv2.dilate(dark, np.ones([sobel_kernel, sobel_kernel]), iterations=2)
white = cv2.inRange(gaussian_blur(img_RGB, 5), np.uint8([180, 180, 180]), np.uint8([255, 255, 255]))
yellow = (((img_RGB[(:, :, 0)] > 160) & (img_RGB[(:, :, 1)] > 160)) & (img_RGB[(:, :, 2)] < 120))
dark[(white == 255)] = 0
gradx_mask[(dark == 255)] = False
if MORPH_ENHANCE:
gradx_mask = (cv2.morphologyEx((255 * np.uint8(gradx_mask)), cv2.MORPH_OPEN, kernel, iterations=2) > 0)
S_mask[(white == 255)] = True
S_mask[yellow] = True
S_mask[(dark_regions == 255)] = False
S_gradx_mask[(dark_regions == 255)] = False
mask = ((S_gradx_mask | gradx_mask) | S_mask)
color_binary_mask = (np.dstack((S_mask, gradx_mask, S_gradx_mask)) * 255)
return (mask, color_binary_mask) | Take RGB image, perform necessary color transformation /gradient calculations
and output the detected lane pixels mask, alongside an RGB composition of the 3 sub-masks (added)
for visualization | P2_subroutines.py | lanepxmask | felipeqda/CarND-Advanced-Lane-Lines | 0 | python | def lanepxmask(img_RGB, sobel_kernel=7):
' Take RGB image, perform necessary color transformation /gradient calculations\n and output the detected lane pixels mask, alongside an RGB composition of the 3 sub-masks (added)\n for visualization\n '
MORPH_ENHANCE = True
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
img_HLS = cv2.cvtColor(img_RGB, cv2.COLOR_RGB2HLS)
S_thd = (200, 255)
S_mask = ((img_HLS[(:, :, 2)] > S_thd[0]) & (img_HLS[(:, :, 2)] <= S_thd[1]))
S_mask[(img_HLS[(:, :, 1)] < 50)] = False
if MORPH_ENHANCE:
S_mask = (cv2.morphologyEx((255 * np.uint8(S_mask)), cv2.MORPH_CLOSE, kernel, iterations=2) > 0)
S_mask = (cv2.dilate((255 * np.uint8(S_mask)), kernel) > 0)
S_gradx_mask = abs_sobel_thresh(img_HLS[(:, :, 2)], orient='x', thresh=(20, 100), sobel_kernel=sobel_kernel, GRAY_INPUT=True)
S_mask[(img_HLS[(:, :, 1)] < 50)] = False
if MORPH_ENHANCE:
S_gradx_mask = (cv2.morphologyEx((255 * np.uint8(S_gradx_mask)), cv2.MORPH_CLOSE, kernel, iterations=2) > 0)
S_gradx_mask = (cv2.dilate((255 * np.uint8(S_gradx_mask)), kernel) > 0)
gradx_mask = abs_sobel_thresh(img_RGB, orient='x', thresh=(20, 100), sobel_kernel=sobel_kernel)
dark_borders = (cv2.blur(img_HLS[(:, :, 1)], (15, 15)) < cv2.blur(img_HLS[(:, :, 1)], (17, 17)))
dark_borders = cv2.morphologyEx((255 * np.uint8(dark_borders)), cv2.MORPH_OPEN, kernel, iterations=2)
dark_regions = cv2.morphologyEx((255 * np.uint8((img_HLS[(:, :, 1)] < np.mean(img_HLS[(:, :, 1)])))), cv2.MORPH_OPEN, kernel, iterations=2)
dark = cv2.morphologyEx((255 * np.uint8(((dark_borders == 255) | (dark_regions == 255)))), cv2.MORPH_OPEN, kernel, iterations=3)
dark = cv2.dilate(dark, np.ones([sobel_kernel, sobel_kernel]), iterations=2)
white = cv2.inRange(gaussian_blur(img_RGB, 5), np.uint8([180, 180, 180]), np.uint8([255, 255, 255]))
yellow = (((img_RGB[(:, :, 0)] > 160) & (img_RGB[(:, :, 1)] > 160)) & (img_RGB[(:, :, 2)] < 120))
dark[(white == 255)] = 0
gradx_mask[(dark == 255)] = False
if MORPH_ENHANCE:
gradx_mask = (cv2.morphologyEx((255 * np.uint8(gradx_mask)), cv2.MORPH_OPEN, kernel, iterations=2) > 0)
S_mask[(white == 255)] = True
S_mask[yellow] = True
S_mask[(dark_regions == 255)] = False
S_gradx_mask[(dark_regions == 255)] = False
mask = ((S_gradx_mask | gradx_mask) | S_mask)
color_binary_mask = (np.dstack((S_mask, gradx_mask, S_gradx_mask)) * 255)
return (mask, color_binary_mask) | def lanepxmask(img_RGB, sobel_kernel=7):
' Take RGB image, perform necessary color transformation /gradient calculations\n and output the detected lane pixels mask, alongside an RGB composition of the 3 sub-masks (added)\n for visualization\n '
MORPH_ENHANCE = True
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
img_HLS = cv2.cvtColor(img_RGB, cv2.COLOR_RGB2HLS)
S_thd = (200, 255)
S_mask = ((img_HLS[(:, :, 2)] > S_thd[0]) & (img_HLS[(:, :, 2)] <= S_thd[1]))
S_mask[(img_HLS[(:, :, 1)] < 50)] = False
if MORPH_ENHANCE:
S_mask = (cv2.morphologyEx((255 * np.uint8(S_mask)), cv2.MORPH_CLOSE, kernel, iterations=2) > 0)
S_mask = (cv2.dilate((255 * np.uint8(S_mask)), kernel) > 0)
S_gradx_mask = abs_sobel_thresh(img_HLS[(:, :, 2)], orient='x', thresh=(20, 100), sobel_kernel=sobel_kernel, GRAY_INPUT=True)
S_mask[(img_HLS[(:, :, 1)] < 50)] = False
if MORPH_ENHANCE:
S_gradx_mask = (cv2.morphologyEx((255 * np.uint8(S_gradx_mask)), cv2.MORPH_CLOSE, kernel, iterations=2) > 0)
S_gradx_mask = (cv2.dilate((255 * np.uint8(S_gradx_mask)), kernel) > 0)
gradx_mask = abs_sobel_thresh(img_RGB, orient='x', thresh=(20, 100), sobel_kernel=sobel_kernel)
dark_borders = (cv2.blur(img_HLS[(:, :, 1)], (15, 15)) < cv2.blur(img_HLS[(:, :, 1)], (17, 17)))
dark_borders = cv2.morphologyEx((255 * np.uint8(dark_borders)), cv2.MORPH_OPEN, kernel, iterations=2)
dark_regions = cv2.morphologyEx((255 * np.uint8((img_HLS[(:, :, 1)] < np.mean(img_HLS[(:, :, 1)])))), cv2.MORPH_OPEN, kernel, iterations=2)
dark = cv2.morphologyEx((255 * np.uint8(((dark_borders == 255) | (dark_regions == 255)))), cv2.MORPH_OPEN, kernel, iterations=3)
dark = cv2.dilate(dark, np.ones([sobel_kernel, sobel_kernel]), iterations=2)
white = cv2.inRange(gaussian_blur(img_RGB, 5), np.uint8([180, 180, 180]), np.uint8([255, 255, 255]))
yellow = (((img_RGB[(:, :, 0)] > 160) & (img_RGB[(:, :, 1)] > 160)) & (img_RGB[(:, :, 2)] < 120))
dark[(white == 255)] = 0
gradx_mask[(dark == 255)] = False
if MORPH_ENHANCE:
gradx_mask = (cv2.morphologyEx((255 * np.uint8(gradx_mask)), cv2.MORPH_OPEN, kernel, iterations=2) > 0)
S_mask[(white == 255)] = True
S_mask[yellow] = True
S_mask[(dark_regions == 255)] = False
S_gradx_mask[(dark_regions == 255)] = False
mask = ((S_gradx_mask | gradx_mask) | S_mask)
color_binary_mask = (np.dstack((S_mask, gradx_mask, S_gradx_mask)) * 255)
return (mask, color_binary_mask)<|docstring|>Take RGB image, perform necessary color transformation /gradient calculations
and output the detected lane pixels mask, alongside an RGB composition of the 3 sub-masks (added)
for visualization<|endoftext|> |
6dbe582070fbf432b4eb9d3eaa6d3b2906aab0b565050e4102c2f6480b329004 | def color_preprocessing(img_RGB, GET_BOX=False):
' Apply color-based pre-processing of frames'
box_size = 30
box_ystep = 80
box_vertices = (box_size * np.array([((- 1), (- 1)), ((- 1), 1), (1, 1), (1, (- 1))], dtype=np.int32))
(x_box, y_box) = closePolygon(box_vertices)
image_new = np.copy(img_RGB)
(Ny, Nx) = np.shape(img_RGB)[0:2]
for i_box in range(3):
box = img_RGB[(((Ny - (i_box * box_ystep)) - (2 * box_size)):(Ny - (i_box * box_ystep)), ((Nx // 2) - box_size):((Nx // 2) + box_size), :)]
avg_color = np.mean(box, axis=(0, 1))
mask = cv2.inRange(gaussian_blur(img_RGB, 7), np.uint8((avg_color - 25)), np.uint8((avg_color + 25)))
image_new = cv2.bitwise_and(image_new, image_new, mask=(255 - mask))
mask = cv2.inRange(gaussian_blur(img_RGB, 7), np.uint8([0, 0, 0]), np.uint8([50, 50, 50]))
image_new = cv2.bitwise_and(image_new, image_new, mask=(255 - mask))
mask = cv2.inRange(gaussian_blur(img_RGB, 7), np.uint8([80, 80, 80]), np.uint8([120, 120, 120]))
image_new = cv2.bitwise_and(image_new, image_new, mask=(255 - mask))
if GET_BOX:
return (image_new, x_box, y_box, box_size, box_ystep)
else:
return image_new | Apply color-based pre-processing of frames | P2_subroutines.py | color_preprocessing | felipeqda/CarND-Advanced-Lane-Lines | 0 | python | def color_preprocessing(img_RGB, GET_BOX=False):
' '
box_size = 30
box_ystep = 80
box_vertices = (box_size * np.array([((- 1), (- 1)), ((- 1), 1), (1, 1), (1, (- 1))], dtype=np.int32))
(x_box, y_box) = closePolygon(box_vertices)
image_new = np.copy(img_RGB)
(Ny, Nx) = np.shape(img_RGB)[0:2]
for i_box in range(3):
box = img_RGB[(((Ny - (i_box * box_ystep)) - (2 * box_size)):(Ny - (i_box * box_ystep)), ((Nx // 2) - box_size):((Nx // 2) + box_size), :)]
avg_color = np.mean(box, axis=(0, 1))
mask = cv2.inRange(gaussian_blur(img_RGB, 7), np.uint8((avg_color - 25)), np.uint8((avg_color + 25)))
image_new = cv2.bitwise_and(image_new, image_new, mask=(255 - mask))
mask = cv2.inRange(gaussian_blur(img_RGB, 7), np.uint8([0, 0, 0]), np.uint8([50, 50, 50]))
image_new = cv2.bitwise_and(image_new, image_new, mask=(255 - mask))
mask = cv2.inRange(gaussian_blur(img_RGB, 7), np.uint8([80, 80, 80]), np.uint8([120, 120, 120]))
image_new = cv2.bitwise_and(image_new, image_new, mask=(255 - mask))
if GET_BOX:
return (image_new, x_box, y_box, box_size, box_ystep)
else:
return image_new | def color_preprocessing(img_RGB, GET_BOX=False):
' '
box_size = 30
box_ystep = 80
box_vertices = (box_size * np.array([((- 1), (- 1)), ((- 1), 1), (1, 1), (1, (- 1))], dtype=np.int32))
(x_box, y_box) = closePolygon(box_vertices)
image_new = np.copy(img_RGB)
(Ny, Nx) = np.shape(img_RGB)[0:2]
for i_box in range(3):
box = img_RGB[(((Ny - (i_box * box_ystep)) - (2 * box_size)):(Ny - (i_box * box_ystep)), ((Nx // 2) - box_size):((Nx // 2) + box_size), :)]
avg_color = np.mean(box, axis=(0, 1))
mask = cv2.inRange(gaussian_blur(img_RGB, 7), np.uint8((avg_color - 25)), np.uint8((avg_color + 25)))
image_new = cv2.bitwise_and(image_new, image_new, mask=(255 - mask))
mask = cv2.inRange(gaussian_blur(img_RGB, 7), np.uint8([0, 0, 0]), np.uint8([50, 50, 50]))
image_new = cv2.bitwise_and(image_new, image_new, mask=(255 - mask))
mask = cv2.inRange(gaussian_blur(img_RGB, 7), np.uint8([80, 80, 80]), np.uint8([120, 120, 120]))
image_new = cv2.bitwise_and(image_new, image_new, mask=(255 - mask))
if GET_BOX:
return (image_new, x_box, y_box, box_size, box_ystep)
else:
return image_new<|docstring|>Apply color-based pre-processing of frames<|endoftext|> |
d5f0d3348eabedfba5b87b81f232fde45c9d12bfebb40050ee9c9e18b621fe5b | def weight_fit_cfs(left, right):
' judge fit quality, providing weights and a weighted average of the coefficients\n inputs are LaneLine objects '
cfs = np.vstack((left.cf, right.cf))
cf_MSE = np.vstack((left.MSE, right.MSE))
w1 = (np.sum(cf_MSE) / cf_MSE)
w2 = np.reshape((np.array([left.Npix, right.Npix]) / (left.Npix + right.Npix)), [2, 1])
w = (w1 * w2)
cf_avg = (np.mean((w * cfs), axis=0) / np.mean(w, axis=0))
return (w, cf_avg) | judge fit quality, providing weights and a weighted average of the coefficients
inputs are LaneLine objects | P2_subroutines.py | weight_fit_cfs | felipeqda/CarND-Advanced-Lane-Lines | 0 | python | def weight_fit_cfs(left, right):
' judge fit quality, providing weights and a weighted average of the coefficients\n inputs are LaneLine objects '
cfs = np.vstack((left.cf, right.cf))
cf_MSE = np.vstack((left.MSE, right.MSE))
w1 = (np.sum(cf_MSE) / cf_MSE)
w2 = np.reshape((np.array([left.Npix, right.Npix]) / (left.Npix + right.Npix)), [2, 1])
w = (w1 * w2)
cf_avg = (np.mean((w * cfs), axis=0) / np.mean(w, axis=0))
return (w, cf_avg) | def weight_fit_cfs(left, right):
' judge fit quality, providing weights and a weighted average of the coefficients\n inputs are LaneLine objects '
cfs = np.vstack((left.cf, right.cf))
cf_MSE = np.vstack((left.MSE, right.MSE))
w1 = (np.sum(cf_MSE) / cf_MSE)
w2 = np.reshape((np.array([left.Npix, right.Npix]) / (left.Npix + right.Npix)), [2, 1])
w = (w1 * w2)
cf_avg = (np.mean((w * cfs), axis=0) / np.mean(w, axis=0))
return (w, cf_avg)<|docstring|>judge fit quality, providing weights and a weighted average of the coefficients
inputs are LaneLine objects<|endoftext|> |
0f1ea2098a5ed6473a726d508e2dfbfa620cc3409c0bd07c83729221ab0a750b | def find_lane_xy_frommask(mask_input, nwindows=9, margin=100, minpix=50, NO_IMG=False):
' Take the input mask and perform a sliding window search\n Return the coordinates of the located pixels, polynomial coefficients and optionally an image showing the\n windows/detections\n **Parameters/Keywords:\n nwindows ==> Choose the number of sliding windows\n margin ==> Set the width of the windows +/- margin\n minpix ==> Set minimum number of pixels found to recenter window\n NO_IMG ==> do not calculate the diagnose output image (used in pipeline)'
mask_input = (255 * np.uint8((mask_input / np.max(mask_input))))
kernel = np.ones((3, 3), np.uint8)
mask_mostreliable = cv2.morphologyEx((255 * np.uint8(mask_input)), cv2.MORPH_OPEN, kernel, iterations=2)
(ret, labels) = cv2.connectedComponents(mask_mostreliable)
region_idx_map = [(labels == j).nonzero() for j in range(1, (np.max(labels) + 1))]
histogram = np.sum(mask_input[((mask_input.shape[0] // 2):, :)], axis=0)
(Ny, Nx) = mask_input.shape[0:2]
midpoint = np.int((Nx // 2))
leftx_base = (np.argmax(histogram[(Nx // 10):midpoint]) + (Nx // 10))
rightx_base = (np.argmax(histogram[midpoint:(Nx - (Nx // 10))]) + midpoint)
window_height = np.int((Ny // nwindows))
nonzero = mask_input.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_inds = [[], []]
right_lane_inds = [[], []]
if (NO_IMG == False):
out_img = np.dstack((mask_input, mask_input, mask_input))
else:
out_img = None
ymin_good_left = np.nan
ymin_good_right = np.nan
for window in range(nwindows):
win_y_low = (Ny - ((window + 1) * window_height))
win_y_high = (Ny - (window * window_height))
win_xleft_low = (leftx_current - margin)
win_xleft_high = (leftx_current + margin)
win_xright_low = (rightx_current - margin)
win_xright_high = (rightx_current + margin)
if (NO_IMG == False):
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
good_left_inds = np.where(((((nonzerox >= win_xleft_low) & (nonzerox <= win_xleft_high)) & (nonzeroy >= win_y_low)) & (nonzeroy <= win_y_high)))[0]
good_right_inds = np.where(((((nonzerox >= win_xright_low) & (nonzerox <= win_xright_high)) & (nonzeroy >= win_y_low)) & (nonzeroy <= win_y_high)))[0]
left_lane_inds[0].append(nonzerox[good_left_inds])
left_lane_inds[1].append(nonzeroy[good_left_inds])
right_lane_inds[0].append(nonzerox[good_right_inds])
right_lane_inds[1].append(nonzeroy[good_right_inds])
labels_in_left = np.unique(labels[(nonzeroy[good_left_inds], nonzerox[good_left_inds])])
labels_in_left = labels_in_left[(labels_in_left > 0)]
if (np.size(labels_in_left) > 0):
for k in labels_in_left:
(yreg_left, xreg_left) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
reg_good_idx = np.where(((yreg_left >= win_y_low) & (yreg_left <= win_y_high)))[0]
left_lane_inds[0].append(xreg_left[reg_good_idx])
left_lane_inds[1].append(yreg_left[reg_good_idx])
ymin_good_left = np.nanmin(np.concatenate(([ymin_good_left], yreg_left[reg_good_idx])))
labels_in_right = np.unique(labels[(nonzeroy[good_right_inds], nonzerox[good_right_inds])])
labels_in_right = labels_in_right[(labels_in_right > 0)]
if (np.size(labels_in_right) > 0):
for k in labels_in_right:
(yreg_right, xreg_right) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
reg_good_idx = np.where(((yreg_right >= win_y_low) & (yreg_right <= win_y_high)))[0]
right_lane_inds[0].append(xreg_right[reg_good_idx])
right_lane_inds[1].append(yreg_right[reg_good_idx])
ymin_good_right = np.nanmin(np.concatenate(([ymin_good_right], yreg_right[reg_good_idx])))
if ((np.size(np.concatenate(left_lane_inds[1])) >= minpix) and ((np.max(np.concatenate(left_lane_inds[1])) - np.min(np.concatenate(left_lane_inds[1]))) > minpix)):
order = ((1 * (window < 3)) + (2 * (window >= 3)))
polycf_left = np.polyfit(np.concatenate(left_lane_inds[1]), np.concatenate(left_lane_inds[0]), order)
leftx_current = np.int(np.round(np.polyval(polycf_left, ((0.5 * (win_y_low + win_y_high)) - window_height))))
if ((np.size(np.concatenate(right_lane_inds[1])) >= minpix) and ((np.max(np.concatenate(right_lane_inds[1])) - np.min(np.concatenate(right_lane_inds[1]))) > minpix)):
order = ((1 * (window < 3)) + (2 * (window >= 3)))
polycf_right = np.polyfit(np.concatenate(right_lane_inds[1]), np.concatenate(right_lane_inds[0]), order)
rightx_current = np.int(np.round(np.polyval(polycf_right, ((0.5 * (win_y_low + win_y_high)) - window_height))))
PLOT_METHOD = False
if (PLOT_METHOD and (window == 4)):
stop()
plt.figure(num=1)
plt.clf()
plt.imshow(out_img)
y = np.arange((win_y_low - window_height), win_y_high)
plt.plot(np.polyval(polycf_left, y), y, 'r--')
plt.plot(np.repeat(leftx_current, 2), np.repeat(((0.5 * (win_y_low + win_y_high)) - window_height), 2), 'b+')
plt.plot(np.polyval(polycf_right, y), y, 'r--')
plt.plot(np.repeat(rightx_current, 2), np.repeat(((0.5 * (win_y_low + win_y_high)) - window_height), 2), 'b+')
plt.pause(20)
stop()
leftx = np.concatenate(left_lane_inds[0])
lefty = np.concatenate(left_lane_inds[1])
rightx = np.concatenate(right_lane_inds[0])
righty = np.concatenate(right_lane_inds[1])
(polycf_left, sqr_error_left, _, _, _) = np.polyfit(lefty, leftx, 2, full=True, w=(lefty / Ny))
MSE_left = np.sqrt((sqr_error_left[0] / np.size(lefty)))
(polycf_right, sqr_error_right, _, _, _) = np.polyfit(righty, rightx, 2, full=True, w=(righty / Ny))
MSE_right = np.sqrt((sqr_error_right[0] / np.size(righty)))
lane_annotation = np.zeros([Ny, Nx, 3], dtype=np.uint8)
lane_annotation[(lefty, leftx)] = [255, 0, 0]
lane_annotation[(righty, rightx)] = [0, 0, 255]
if (NO_IMG == False):
out_img[(lefty, leftx)] = [255, 0, 0]
out_img[(righty, rightx)] = [0, 0, 255]
ploty = np.linspace(0, (Ny - 1), Ny)
left_fitx = np.polyval(polycf_left, ploty)
right_fitx = np.polyval(polycf_right, ploty)
if (plt.get_backend() == 'Agg'):
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
leftlane = LaneLine(leftx, lefty, polycf_left, MSE_left, ymin_good_left)
rightlane = LaneLine(rightx, righty, polycf_right, MSE_right, ymin_good_right)
return (leftlane, rightlane, lane_annotation, out_img) | Take the input mask and perform a sliding window search
Return the coordinates of the located pixels, polynomial coefficients and optionally an image showing the
windows/detections
**Parameters/Keywords:
nwindows ==> Choose the number of sliding windows
margin ==> Set the width of the windows +/- margin
minpix ==> Set minimum number of pixels found to recenter window
NO_IMG ==> do not calculate the diagnose output image (used in pipeline) | P2_subroutines.py | find_lane_xy_frommask | felipeqda/CarND-Advanced-Lane-Lines | 0 | python | def find_lane_xy_frommask(mask_input, nwindows=9, margin=100, minpix=50, NO_IMG=False):
' Take the input mask and perform a sliding window search\n Return the coordinates of the located pixels, polynomial coefficients and optionally an image showing the\n windows/detections\n **Parameters/Keywords:\n nwindows ==> Choose the number of sliding windows\n margin ==> Set the width of the windows +/- margin\n minpix ==> Set minimum number of pixels found to recenter window\n NO_IMG ==> do not calculate the diagnose output image (used in pipeline)'
mask_input = (255 * np.uint8((mask_input / np.max(mask_input))))
kernel = np.ones((3, 3), np.uint8)
mask_mostreliable = cv2.morphologyEx((255 * np.uint8(mask_input)), cv2.MORPH_OPEN, kernel, iterations=2)
(ret, labels) = cv2.connectedComponents(mask_mostreliable)
region_idx_map = [(labels == j).nonzero() for j in range(1, (np.max(labels) + 1))]
histogram = np.sum(mask_input[((mask_input.shape[0] // 2):, :)], axis=0)
(Ny, Nx) = mask_input.shape[0:2]
midpoint = np.int((Nx // 2))
leftx_base = (np.argmax(histogram[(Nx // 10):midpoint]) + (Nx // 10))
rightx_base = (np.argmax(histogram[midpoint:(Nx - (Nx // 10))]) + midpoint)
window_height = np.int((Ny // nwindows))
nonzero = mask_input.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_inds = [[], []]
right_lane_inds = [[], []]
if (NO_IMG == False):
out_img = np.dstack((mask_input, mask_input, mask_input))
else:
out_img = None
ymin_good_left = np.nan
ymin_good_right = np.nan
for window in range(nwindows):
win_y_low = (Ny - ((window + 1) * window_height))
win_y_high = (Ny - (window * window_height))
win_xleft_low = (leftx_current - margin)
win_xleft_high = (leftx_current + margin)
win_xright_low = (rightx_current - margin)
win_xright_high = (rightx_current + margin)
if (NO_IMG == False):
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
good_left_inds = np.where(((((nonzerox >= win_xleft_low) & (nonzerox <= win_xleft_high)) & (nonzeroy >= win_y_low)) & (nonzeroy <= win_y_high)))[0]
good_right_inds = np.where(((((nonzerox >= win_xright_low) & (nonzerox <= win_xright_high)) & (nonzeroy >= win_y_low)) & (nonzeroy <= win_y_high)))[0]
left_lane_inds[0].append(nonzerox[good_left_inds])
left_lane_inds[1].append(nonzeroy[good_left_inds])
right_lane_inds[0].append(nonzerox[good_right_inds])
right_lane_inds[1].append(nonzeroy[good_right_inds])
labels_in_left = np.unique(labels[(nonzeroy[good_left_inds], nonzerox[good_left_inds])])
labels_in_left = labels_in_left[(labels_in_left > 0)]
if (np.size(labels_in_left) > 0):
for k in labels_in_left:
(yreg_left, xreg_left) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
reg_good_idx = np.where(((yreg_left >= win_y_low) & (yreg_left <= win_y_high)))[0]
left_lane_inds[0].append(xreg_left[reg_good_idx])
left_lane_inds[1].append(yreg_left[reg_good_idx])
ymin_good_left = np.nanmin(np.concatenate(([ymin_good_left], yreg_left[reg_good_idx])))
labels_in_right = np.unique(labels[(nonzeroy[good_right_inds], nonzerox[good_right_inds])])
labels_in_right = labels_in_right[(labels_in_right > 0)]
if (np.size(labels_in_right) > 0):
for k in labels_in_right:
(yreg_right, xreg_right) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
reg_good_idx = np.where(((yreg_right >= win_y_low) & (yreg_right <= win_y_high)))[0]
right_lane_inds[0].append(xreg_right[reg_good_idx])
right_lane_inds[1].append(yreg_right[reg_good_idx])
ymin_good_right = np.nanmin(np.concatenate(([ymin_good_right], yreg_right[reg_good_idx])))
if ((np.size(np.concatenate(left_lane_inds[1])) >= minpix) and ((np.max(np.concatenate(left_lane_inds[1])) - np.min(np.concatenate(left_lane_inds[1]))) > minpix)):
order = ((1 * (window < 3)) + (2 * (window >= 3)))
polycf_left = np.polyfit(np.concatenate(left_lane_inds[1]), np.concatenate(left_lane_inds[0]), order)
leftx_current = np.int(np.round(np.polyval(polycf_left, ((0.5 * (win_y_low + win_y_high)) - window_height))))
if ((np.size(np.concatenate(right_lane_inds[1])) >= minpix) and ((np.max(np.concatenate(right_lane_inds[1])) - np.min(np.concatenate(right_lane_inds[1]))) > minpix)):
order = ((1 * (window < 3)) + (2 * (window >= 3)))
polycf_right = np.polyfit(np.concatenate(right_lane_inds[1]), np.concatenate(right_lane_inds[0]), order)
rightx_current = np.int(np.round(np.polyval(polycf_right, ((0.5 * (win_y_low + win_y_high)) - window_height))))
PLOT_METHOD = False
if (PLOT_METHOD and (window == 4)):
stop()
plt.figure(num=1)
plt.clf()
plt.imshow(out_img)
y = np.arange((win_y_low - window_height), win_y_high)
plt.plot(np.polyval(polycf_left, y), y, 'r--')
plt.plot(np.repeat(leftx_current, 2), np.repeat(((0.5 * (win_y_low + win_y_high)) - window_height), 2), 'b+')
plt.plot(np.polyval(polycf_right, y), y, 'r--')
plt.plot(np.repeat(rightx_current, 2), np.repeat(((0.5 * (win_y_low + win_y_high)) - window_height), 2), 'b+')
plt.pause(20)
stop()
leftx = np.concatenate(left_lane_inds[0])
lefty = np.concatenate(left_lane_inds[1])
rightx = np.concatenate(right_lane_inds[0])
righty = np.concatenate(right_lane_inds[1])
(polycf_left, sqr_error_left, _, _, _) = np.polyfit(lefty, leftx, 2, full=True, w=(lefty / Ny))
MSE_left = np.sqrt((sqr_error_left[0] / np.size(lefty)))
(polycf_right, sqr_error_right, _, _, _) = np.polyfit(righty, rightx, 2, full=True, w=(righty / Ny))
MSE_right = np.sqrt((sqr_error_right[0] / np.size(righty)))
lane_annotation = np.zeros([Ny, Nx, 3], dtype=np.uint8)
lane_annotation[(lefty, leftx)] = [255, 0, 0]
lane_annotation[(righty, rightx)] = [0, 0, 255]
if (NO_IMG == False):
out_img[(lefty, leftx)] = [255, 0, 0]
out_img[(righty, rightx)] = [0, 0, 255]
ploty = np.linspace(0, (Ny - 1), Ny)
left_fitx = np.polyval(polycf_left, ploty)
right_fitx = np.polyval(polycf_right, ploty)
if (plt.get_backend() == 'Agg'):
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
leftlane = LaneLine(leftx, lefty, polycf_left, MSE_left, ymin_good_left)
rightlane = LaneLine(rightx, righty, polycf_right, MSE_right, ymin_good_right)
return (leftlane, rightlane, lane_annotation, out_img) | def find_lane_xy_frommask(mask_input, nwindows=9, margin=100, minpix=50, NO_IMG=False):
' Take the input mask and perform a sliding window search\n Return the coordinates of the located pixels, polynomial coefficients and optionally an image showing the\n windows/detections\n **Parameters/Keywords:\n nwindows ==> Choose the number of sliding windows\n margin ==> Set the width of the windows +/- margin\n minpix ==> Set minimum number of pixels found to recenter window\n NO_IMG ==> do not calculate the diagnose output image (used in pipeline)'
mask_input = (255 * np.uint8((mask_input / np.max(mask_input))))
kernel = np.ones((3, 3), np.uint8)
mask_mostreliable = cv2.morphologyEx((255 * np.uint8(mask_input)), cv2.MORPH_OPEN, kernel, iterations=2)
(ret, labels) = cv2.connectedComponents(mask_mostreliable)
region_idx_map = [(labels == j).nonzero() for j in range(1, (np.max(labels) + 1))]
histogram = np.sum(mask_input[((mask_input.shape[0] // 2):, :)], axis=0)
(Ny, Nx) = mask_input.shape[0:2]
midpoint = np.int((Nx // 2))
leftx_base = (np.argmax(histogram[(Nx // 10):midpoint]) + (Nx // 10))
rightx_base = (np.argmax(histogram[midpoint:(Nx - (Nx // 10))]) + midpoint)
window_height = np.int((Ny // nwindows))
nonzero = mask_input.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_inds = [[], []]
right_lane_inds = [[], []]
if (NO_IMG == False):
out_img = np.dstack((mask_input, mask_input, mask_input))
else:
out_img = None
ymin_good_left = np.nan
ymin_good_right = np.nan
for window in range(nwindows):
win_y_low = (Ny - ((window + 1) * window_height))
win_y_high = (Ny - (window * window_height))
win_xleft_low = (leftx_current - margin)
win_xleft_high = (leftx_current + margin)
win_xright_low = (rightx_current - margin)
win_xright_high = (rightx_current + margin)
if (NO_IMG == False):
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
good_left_inds = np.where(((((nonzerox >= win_xleft_low) & (nonzerox <= win_xleft_high)) & (nonzeroy >= win_y_low)) & (nonzeroy <= win_y_high)))[0]
good_right_inds = np.where(((((nonzerox >= win_xright_low) & (nonzerox <= win_xright_high)) & (nonzeroy >= win_y_low)) & (nonzeroy <= win_y_high)))[0]
left_lane_inds[0].append(nonzerox[good_left_inds])
left_lane_inds[1].append(nonzeroy[good_left_inds])
right_lane_inds[0].append(nonzerox[good_right_inds])
right_lane_inds[1].append(nonzeroy[good_right_inds])
labels_in_left = np.unique(labels[(nonzeroy[good_left_inds], nonzerox[good_left_inds])])
labels_in_left = labels_in_left[(labels_in_left > 0)]
if (np.size(labels_in_left) > 0):
for k in labels_in_left:
(yreg_left, xreg_left) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
reg_good_idx = np.where(((yreg_left >= win_y_low) & (yreg_left <= win_y_high)))[0]
left_lane_inds[0].append(xreg_left[reg_good_idx])
left_lane_inds[1].append(yreg_left[reg_good_idx])
ymin_good_left = np.nanmin(np.concatenate(([ymin_good_left], yreg_left[reg_good_idx])))
labels_in_right = np.unique(labels[(nonzeroy[good_right_inds], nonzerox[good_right_inds])])
labels_in_right = labels_in_right[(labels_in_right > 0)]
if (np.size(labels_in_right) > 0):
for k in labels_in_right:
(yreg_right, xreg_right) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
reg_good_idx = np.where(((yreg_right >= win_y_low) & (yreg_right <= win_y_high)))[0]
right_lane_inds[0].append(xreg_right[reg_good_idx])
right_lane_inds[1].append(yreg_right[reg_good_idx])
ymin_good_right = np.nanmin(np.concatenate(([ymin_good_right], yreg_right[reg_good_idx])))
if ((np.size(np.concatenate(left_lane_inds[1])) >= minpix) and ((np.max(np.concatenate(left_lane_inds[1])) - np.min(np.concatenate(left_lane_inds[1]))) > minpix)):
order = ((1 * (window < 3)) + (2 * (window >= 3)))
polycf_left = np.polyfit(np.concatenate(left_lane_inds[1]), np.concatenate(left_lane_inds[0]), order)
leftx_current = np.int(np.round(np.polyval(polycf_left, ((0.5 * (win_y_low + win_y_high)) - window_height))))
if ((np.size(np.concatenate(right_lane_inds[1])) >= minpix) and ((np.max(np.concatenate(right_lane_inds[1])) - np.min(np.concatenate(right_lane_inds[1]))) > minpix)):
order = ((1 * (window < 3)) + (2 * (window >= 3)))
polycf_right = np.polyfit(np.concatenate(right_lane_inds[1]), np.concatenate(right_lane_inds[0]), order)
rightx_current = np.int(np.round(np.polyval(polycf_right, ((0.5 * (win_y_low + win_y_high)) - window_height))))
PLOT_METHOD = False
if (PLOT_METHOD and (window == 4)):
stop()
plt.figure(num=1)
plt.clf()
plt.imshow(out_img)
y = np.arange((win_y_low - window_height), win_y_high)
plt.plot(np.polyval(polycf_left, y), y, 'r--')
plt.plot(np.repeat(leftx_current, 2), np.repeat(((0.5 * (win_y_low + win_y_high)) - window_height), 2), 'b+')
plt.plot(np.polyval(polycf_right, y), y, 'r--')
plt.plot(np.repeat(rightx_current, 2), np.repeat(((0.5 * (win_y_low + win_y_high)) - window_height), 2), 'b+')
plt.pause(20)
stop()
leftx = np.concatenate(left_lane_inds[0])
lefty = np.concatenate(left_lane_inds[1])
rightx = np.concatenate(right_lane_inds[0])
righty = np.concatenate(right_lane_inds[1])
(polycf_left, sqr_error_left, _, _, _) = np.polyfit(lefty, leftx, 2, full=True, w=(lefty / Ny))
MSE_left = np.sqrt((sqr_error_left[0] / np.size(lefty)))
(polycf_right, sqr_error_right, _, _, _) = np.polyfit(righty, rightx, 2, full=True, w=(righty / Ny))
MSE_right = np.sqrt((sqr_error_right[0] / np.size(righty)))
lane_annotation = np.zeros([Ny, Nx, 3], dtype=np.uint8)
lane_annotation[(lefty, leftx)] = [255, 0, 0]
lane_annotation[(righty, rightx)] = [0, 0, 255]
if (NO_IMG == False):
out_img[(lefty, leftx)] = [255, 0, 0]
out_img[(righty, rightx)] = [0, 0, 255]
ploty = np.linspace(0, (Ny - 1), Ny)
left_fitx = np.polyval(polycf_left, ploty)
right_fitx = np.polyval(polycf_right, ploty)
if (plt.get_backend() == 'Agg'):
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
leftlane = LaneLine(leftx, lefty, polycf_left, MSE_left, ymin_good_left)
rightlane = LaneLine(rightx, righty, polycf_right, MSE_right, ymin_good_right)
return (leftlane, rightlane, lane_annotation, out_img)<|docstring|>Take the input mask and perform a sliding window search
Return the coordinates of the located pixels, polynomial coefficients and optionally an image showing the
windows/detections
**Parameters/Keywords:
nwindows ==> Choose the number of sliding windows
margin ==> Set the width of the windows +/- margin
minpix ==> Set minimum number of pixels found to recenter window
NO_IMG ==> do not calculate the diagnose output image (used in pipeline)<|endoftext|> |
148bd8038e73111cd854f28ce37b9cce55c2ae5fae729aedc8359dacebf4a74e | def find_lane_xy_frompoly(mask_input, polycf_left, polycf_right, margin=80, NO_IMG=False):
' Take the input mask and perform a search around the polynomial-matching area\n Return the coordinates of the located pixels, polynomial coefficients and optionally an image showing the\n windows/detections\n **Parameters/Keywords:\n margin ==> Set the width of the windows +/- margin\n NO_IMG ==> do not calculate the diagnose output image (used in pipeline) '
mask_input = (255 * np.uint8((mask_input / np.max(mask_input))))
(Ny, Nx) = np.shape(mask_input)
kernel = np.ones((3, 3), np.uint8)
mask_mostreliable = cv2.morphologyEx((255 * np.uint8(mask_input)), cv2.MORPH_OPEN, kernel, iterations=2)
(ret, labels) = cv2.connectedComponents(mask_mostreliable)
region_idx_map = [(labels == j).nonzero() for j in range(1, (np.max(labels) + 1))]
nonzero = mask_input.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
x_nonzeropoly_left = np.polyval(polycf_left, nonzeroy)
left_lane_inds = ((nonzerox > (x_nonzeropoly_left - margin)) & (nonzerox < (x_nonzeropoly_left + margin)))
x_nonzeropoly_right = np.polyval(polycf_right, nonzeroy)
right_lane_inds = ((nonzerox > (x_nonzeropoly_right - margin)) & (nonzerox < (x_nonzeropoly_right + margin)))
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
lane_annotation = np.zeros([Ny, Nx, 3], dtype=np.uint8)
lane_annotation[(lefty, leftx)] = [255, 0, 0]
lane_annotation[(righty, rightx)] = [0, 0, 255]
(leftx_fromlabels, lefty_fromlabels) = ([], [])
labels_in_left = np.unique(labels[(lefty, leftx)])
labels_in_left = labels_in_left[(labels_in_left > 0)]
if (np.size(labels_in_left) > 0):
for k in labels_in_left:
(yreg, xreg) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
leftx_fromlabels.append(xreg)
lefty_fromlabels.append(yreg)
leftx_fromlabels = np.concatenate(leftx_fromlabels)
lefty_fromlabels = np.concatenate(lefty_fromlabels)
leftx = np.concatenate((leftx, leftx_fromlabels))
lefty = np.concatenate((lefty, lefty_fromlabels))
ymin_good_left = np.min(lefty_fromlabels)
else:
ymin_good_left = np.nan
(rightx_fromlabels, righty_fromlabels) = ([], [])
labels_in_right = np.unique(labels[(righty, rightx)])
labels_in_right = labels_in_right[(labels_in_right > 0)]
if (np.size(labels_in_right) > 0):
for k in labels_in_right:
(yreg, xreg) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
rightx_fromlabels.append(xreg)
righty_fromlabels.append(yreg)
rightx_fromlabels = np.concatenate(rightx_fromlabels)
righty_fromlabels = np.concatenate(righty_fromlabels)
rightx = np.concatenate((rightx, rightx_fromlabels))
righty = np.concatenate((righty, righty_fromlabels))
ymin_good_right = np.min(righty_fromlabels)
else:
ymin_good_right = np.nan
(polycf_left, sqr_error_left, _, _, _) = np.polyfit(lefty, leftx, 2, full=True, w=(lefty / Ny))
MSE_left = np.sqrt((sqr_error_left[0] / np.size(lefty)))
(polycf_right, sqr_error_right, _, _, _) = np.polyfit(righty, rightx, 2, full=True, w=(righty / Ny))
MSE_right = np.sqrt((sqr_error_right[0] / np.size(righty)))
ploty = np.linspace(0, (Ny - 1), Ny)
left_fitx = np.polyval(polycf_left, ploty)
right_fitx = np.polyval(polycf_right, ploty)
lane_annotation = np.zeros([Ny, Nx, 3], dtype=np.uint8)
lane_annotation[(lefty, leftx)] = [255, 0, 0]
lane_annotation[(righty, rightx)] = [0, 0, 255]
if (NO_IMG == False):
out_img = (np.dstack((mask_input, mask_input, mask_input)) * 255)
window_img = np.zeros_like(out_img)
out_img[(lefty, leftx)] = [255, 0, 0]
out_img[(righty, rightx)] = [0, 0, 255]
left_line_window1 = np.array([np.transpose(np.vstack([(left_fitx - margin), ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([(left_fitx + margin), ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([(right_fitx - margin), ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([(right_fitx + margin), ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
else:
out_img = None
leftlane = LaneLine(leftx, lefty, polycf_left, MSE_left, ymin_good_left)
rightlane = LaneLine(rightx, righty, polycf_right, MSE_right, ymin_good_right)
return (leftlane, rightlane, lane_annotation, out_img) | Take the input mask and perform a search around the polynomial-matching area
Return the coordinates of the located pixels, polynomial coefficients and optionally an image showing the
windows/detections
**Parameters/Keywords:
margin ==> Set the width of the windows +/- margin
NO_IMG ==> do not calculate the diagnose output image (used in pipeline) | P2_subroutines.py | find_lane_xy_frompoly | felipeqda/CarND-Advanced-Lane-Lines | 0 | python | def find_lane_xy_frompoly(mask_input, polycf_left, polycf_right, margin=80, NO_IMG=False):
' Take the input mask and perform a search around the polynomial-matching area\n Return the coordinates of the located pixels, polynomial coefficients and optionally an image showing the\n windows/detections\n **Parameters/Keywords:\n margin ==> Set the width of the windows +/- margin\n NO_IMG ==> do not calculate the diagnose output image (used in pipeline) '
mask_input = (255 * np.uint8((mask_input / np.max(mask_input))))
(Ny, Nx) = np.shape(mask_input)
kernel = np.ones((3, 3), np.uint8)
mask_mostreliable = cv2.morphologyEx((255 * np.uint8(mask_input)), cv2.MORPH_OPEN, kernel, iterations=2)
(ret, labels) = cv2.connectedComponents(mask_mostreliable)
region_idx_map = [(labels == j).nonzero() for j in range(1, (np.max(labels) + 1))]
nonzero = mask_input.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
x_nonzeropoly_left = np.polyval(polycf_left, nonzeroy)
left_lane_inds = ((nonzerox > (x_nonzeropoly_left - margin)) & (nonzerox < (x_nonzeropoly_left + margin)))
x_nonzeropoly_right = np.polyval(polycf_right, nonzeroy)
right_lane_inds = ((nonzerox > (x_nonzeropoly_right - margin)) & (nonzerox < (x_nonzeropoly_right + margin)))
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
lane_annotation = np.zeros([Ny, Nx, 3], dtype=np.uint8)
lane_annotation[(lefty, leftx)] = [255, 0, 0]
lane_annotation[(righty, rightx)] = [0, 0, 255]
(leftx_fromlabels, lefty_fromlabels) = ([], [])
labels_in_left = np.unique(labels[(lefty, leftx)])
labels_in_left = labels_in_left[(labels_in_left > 0)]
if (np.size(labels_in_left) > 0):
for k in labels_in_left:
(yreg, xreg) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
leftx_fromlabels.append(xreg)
lefty_fromlabels.append(yreg)
leftx_fromlabels = np.concatenate(leftx_fromlabels)
lefty_fromlabels = np.concatenate(lefty_fromlabels)
leftx = np.concatenate((leftx, leftx_fromlabels))
lefty = np.concatenate((lefty, lefty_fromlabels))
ymin_good_left = np.min(lefty_fromlabels)
else:
ymin_good_left = np.nan
(rightx_fromlabels, righty_fromlabels) = ([], [])
labels_in_right = np.unique(labels[(righty, rightx)])
labels_in_right = labels_in_right[(labels_in_right > 0)]
if (np.size(labels_in_right) > 0):
for k in labels_in_right:
(yreg, xreg) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
rightx_fromlabels.append(xreg)
righty_fromlabels.append(yreg)
rightx_fromlabels = np.concatenate(rightx_fromlabels)
righty_fromlabels = np.concatenate(righty_fromlabels)
rightx = np.concatenate((rightx, rightx_fromlabels))
righty = np.concatenate((righty, righty_fromlabels))
ymin_good_right = np.min(righty_fromlabels)
else:
ymin_good_right = np.nan
(polycf_left, sqr_error_left, _, _, _) = np.polyfit(lefty, leftx, 2, full=True, w=(lefty / Ny))
MSE_left = np.sqrt((sqr_error_left[0] / np.size(lefty)))
(polycf_right, sqr_error_right, _, _, _) = np.polyfit(righty, rightx, 2, full=True, w=(righty / Ny))
MSE_right = np.sqrt((sqr_error_right[0] / np.size(righty)))
ploty = np.linspace(0, (Ny - 1), Ny)
left_fitx = np.polyval(polycf_left, ploty)
right_fitx = np.polyval(polycf_right, ploty)
lane_annotation = np.zeros([Ny, Nx, 3], dtype=np.uint8)
lane_annotation[(lefty, leftx)] = [255, 0, 0]
lane_annotation[(righty, rightx)] = [0, 0, 255]
if (NO_IMG == False):
out_img = (np.dstack((mask_input, mask_input, mask_input)) * 255)
window_img = np.zeros_like(out_img)
out_img[(lefty, leftx)] = [255, 0, 0]
out_img[(righty, rightx)] = [0, 0, 255]
left_line_window1 = np.array([np.transpose(np.vstack([(left_fitx - margin), ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([(left_fitx + margin), ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([(right_fitx - margin), ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([(right_fitx + margin), ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
else:
out_img = None
leftlane = LaneLine(leftx, lefty, polycf_left, MSE_left, ymin_good_left)
rightlane = LaneLine(rightx, righty, polycf_right, MSE_right, ymin_good_right)
return (leftlane, rightlane, lane_annotation, out_img) | def find_lane_xy_frompoly(mask_input, polycf_left, polycf_right, margin=80, NO_IMG=False):
' Take the input mask and perform a search around the polynomial-matching area\n Return the coordinates of the located pixels, polynomial coefficients and optionally an image showing the\n windows/detections\n **Parameters/Keywords:\n margin ==> Set the width of the windows +/- margin\n NO_IMG ==> do not calculate the diagnose output image (used in pipeline) '
mask_input = (255 * np.uint8((mask_input / np.max(mask_input))))
(Ny, Nx) = np.shape(mask_input)
kernel = np.ones((3, 3), np.uint8)
mask_mostreliable = cv2.morphologyEx((255 * np.uint8(mask_input)), cv2.MORPH_OPEN, kernel, iterations=2)
(ret, labels) = cv2.connectedComponents(mask_mostreliable)
region_idx_map = [(labels == j).nonzero() for j in range(1, (np.max(labels) + 1))]
nonzero = mask_input.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
x_nonzeropoly_left = np.polyval(polycf_left, nonzeroy)
left_lane_inds = ((nonzerox > (x_nonzeropoly_left - margin)) & (nonzerox < (x_nonzeropoly_left + margin)))
x_nonzeropoly_right = np.polyval(polycf_right, nonzeroy)
right_lane_inds = ((nonzerox > (x_nonzeropoly_right - margin)) & (nonzerox < (x_nonzeropoly_right + margin)))
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
lane_annotation = np.zeros([Ny, Nx, 3], dtype=np.uint8)
lane_annotation[(lefty, leftx)] = [255, 0, 0]
lane_annotation[(righty, rightx)] = [0, 0, 255]
(leftx_fromlabels, lefty_fromlabels) = ([], [])
labels_in_left = np.unique(labels[(lefty, leftx)])
labels_in_left = labels_in_left[(labels_in_left > 0)]
if (np.size(labels_in_left) > 0):
for k in labels_in_left:
(yreg, xreg) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
leftx_fromlabels.append(xreg)
lefty_fromlabels.append(yreg)
leftx_fromlabels = np.concatenate(leftx_fromlabels)
lefty_fromlabels = np.concatenate(lefty_fromlabels)
leftx = np.concatenate((leftx, leftx_fromlabels))
lefty = np.concatenate((lefty, lefty_fromlabels))
ymin_good_left = np.min(lefty_fromlabels)
else:
ymin_good_left = np.nan
(rightx_fromlabels, righty_fromlabels) = ([], [])
labels_in_right = np.unique(labels[(righty, rightx)])
labels_in_right = labels_in_right[(labels_in_right > 0)]
if (np.size(labels_in_right) > 0):
for k in labels_in_right:
(yreg, xreg) = (region_idx_map[(k - 1)][0], region_idx_map[(k - 1)][1])
rightx_fromlabels.append(xreg)
righty_fromlabels.append(yreg)
rightx_fromlabels = np.concatenate(rightx_fromlabels)
righty_fromlabels = np.concatenate(righty_fromlabels)
rightx = np.concatenate((rightx, rightx_fromlabels))
righty = np.concatenate((righty, righty_fromlabels))
ymin_good_right = np.min(righty_fromlabels)
else:
ymin_good_right = np.nan
(polycf_left, sqr_error_left, _, _, _) = np.polyfit(lefty, leftx, 2, full=True, w=(lefty / Ny))
MSE_left = np.sqrt((sqr_error_left[0] / np.size(lefty)))
(polycf_right, sqr_error_right, _, _, _) = np.polyfit(righty, rightx, 2, full=True, w=(righty / Ny))
MSE_right = np.sqrt((sqr_error_right[0] / np.size(righty)))
ploty = np.linspace(0, (Ny - 1), Ny)
left_fitx = np.polyval(polycf_left, ploty)
right_fitx = np.polyval(polycf_right, ploty)
lane_annotation = np.zeros([Ny, Nx, 3], dtype=np.uint8)
lane_annotation[(lefty, leftx)] = [255, 0, 0]
lane_annotation[(righty, rightx)] = [0, 0, 255]
if (NO_IMG == False):
out_img = (np.dstack((mask_input, mask_input, mask_input)) * 255)
window_img = np.zeros_like(out_img)
out_img[(lefty, leftx)] = [255, 0, 0]
out_img[(righty, rightx)] = [0, 0, 255]
left_line_window1 = np.array([np.transpose(np.vstack([(left_fitx - margin), ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([(left_fitx + margin), ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([(right_fitx - margin), ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([(right_fitx + margin), ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
else:
out_img = None
leftlane = LaneLine(leftx, lefty, polycf_left, MSE_left, ymin_good_left)
rightlane = LaneLine(rightx, righty, polycf_right, MSE_right, ymin_good_right)
return (leftlane, rightlane, lane_annotation, out_img)<|docstring|>Take the input mask and perform a search around the polynomial-matching area
Return the coordinates of the located pixels, polynomial coefficients and optionally an image showing the
windows/detections
**Parameters/Keywords:
margin ==> Set the width of the windows +/- margin
NO_IMG ==> do not calculate the diagnose output image (used in pipeline)<|endoftext|> |
7328f87317d9a4def5d5ebc605e4e445fb5419288c443fbc6965dd59bea144e9 | def getlane_annotation(mask_shape, polycf_left, polycf_right, img2annotate=[], xmargin=5, ymin=0, PLOT_LINES=False):
' Give the shape and the polynomial coefficients, return byte mask showing the region inside the two curves\n (plus an optional x-margin). Also add the annotations to an image if it is provided.\n **Parameters/Keywords:\n img2annotate ==> RGB image to annotate, if provided (should match the size of mask_shape!)\n xmargin ==> Set the width of the +/- margin "\n PLOT_LINES ==> Add the polynomial lines to output (only relevant if a img2annnotate is present)'
(Ny, Nx) = mask_shape[0:2]
out_mask = np.zeros([Ny, Nx], dtype=np.uint8)
if (np.size(img2annotate) != 0):
if (np.shape(img2annotate)[0:2] != (Ny, Nx)):
raise Exception('Error: Image to annotate must match mask size!')
img_annotated = np.zeros_like(img2annotate)
else:
img_annotated = None
ploty = np.arange(ymin, Ny)
left_fitx = np.polyval(polycf_left, ploty)
right_fitx = np.polyval(polycf_right, ploty)
left_line_lane = np.array([np.transpose(np.vstack([(left_fitx - xmargin), ploty]))])
right_line_lane = np.array([np.flipud(np.transpose(np.vstack([(right_fitx + xmargin), ploty])))])
lane_pts = np.hstack((left_line_lane, right_line_lane))
cv2.fillPoly(out_mask, np.int_([lane_pts]), 255)
if (np.size(img2annotate) != 0):
cv2.fillPoly(img_annotated, np.int_([lane_pts]), (0, 255, 0))
img_annotated = cv2.addWeighted(img2annotate, 1, img_annotated, 0.3, 0)
if PLOT_LINES:
plt.plot(left_fitx, ploty, color='yellow', linestyle='--')
plt.plot(right_fitx, ploty, color='yellow', linestyle='--')
return (out_mask, img_annotated) | Give the shape and the polynomial coefficients, return byte mask showing the region inside the two curves
(plus an optional x-margin). Also add the annotations to an image if it is provided.
**Parameters/Keywords:
img2annotate ==> RGB image to annotate, if provided (should match the size of mask_shape!)
xmargin ==> Set the width of the +/- margin "
PLOT_LINES ==> Add the polynomial lines to output (only relevant if a img2annnotate is present) | P2_subroutines.py | getlane_annotation | felipeqda/CarND-Advanced-Lane-Lines | 0 | python | def getlane_annotation(mask_shape, polycf_left, polycf_right, img2annotate=[], xmargin=5, ymin=0, PLOT_LINES=False):
' Give the shape and the polynomial coefficients, return byte mask showing the region inside the two curves\n (plus an optional x-margin). Also add the annotations to an image if it is provided.\n **Parameters/Keywords:\n img2annotate ==> RGB image to annotate, if provided (should match the size of mask_shape!)\n xmargin ==> Set the width of the +/- margin "\n PLOT_LINES ==> Add the polynomial lines to output (only relevant if a img2annnotate is present)'
(Ny, Nx) = mask_shape[0:2]
out_mask = np.zeros([Ny, Nx], dtype=np.uint8)
if (np.size(img2annotate) != 0):
if (np.shape(img2annotate)[0:2] != (Ny, Nx)):
raise Exception('Error: Image to annotate must match mask size!')
img_annotated = np.zeros_like(img2annotate)
else:
img_annotated = None
ploty = np.arange(ymin, Ny)
left_fitx = np.polyval(polycf_left, ploty)
right_fitx = np.polyval(polycf_right, ploty)
left_line_lane = np.array([np.transpose(np.vstack([(left_fitx - xmargin), ploty]))])
right_line_lane = np.array([np.flipud(np.transpose(np.vstack([(right_fitx + xmargin), ploty])))])
lane_pts = np.hstack((left_line_lane, right_line_lane))
cv2.fillPoly(out_mask, np.int_([lane_pts]), 255)
if (np.size(img2annotate) != 0):
cv2.fillPoly(img_annotated, np.int_([lane_pts]), (0, 255, 0))
img_annotated = cv2.addWeighted(img2annotate, 1, img_annotated, 0.3, 0)
if PLOT_LINES:
plt.plot(left_fitx, ploty, color='yellow', linestyle='--')
plt.plot(right_fitx, ploty, color='yellow', linestyle='--')
return (out_mask, img_annotated) | def getlane_annotation(mask_shape, polycf_left, polycf_right, img2annotate=[], xmargin=5, ymin=0, PLOT_LINES=False):
' Give the shape and the polynomial coefficients, return byte mask showing the region inside the two curves\n (plus an optional x-margin). Also add the annotations to an image if it is provided.\n **Parameters/Keywords:\n img2annotate ==> RGB image to annotate, if provided (should match the size of mask_shape!)\n xmargin ==> Set the width of the +/- margin "\n PLOT_LINES ==> Add the polynomial lines to output (only relevant if a img2annnotate is present)'
(Ny, Nx) = mask_shape[0:2]
out_mask = np.zeros([Ny, Nx], dtype=np.uint8)
if (np.size(img2annotate) != 0):
if (np.shape(img2annotate)[0:2] != (Ny, Nx)):
raise Exception('Error: Image to annotate must match mask size!')
img_annotated = np.zeros_like(img2annotate)
else:
img_annotated = None
ploty = np.arange(ymin, Ny)
left_fitx = np.polyval(polycf_left, ploty)
right_fitx = np.polyval(polycf_right, ploty)
left_line_lane = np.array([np.transpose(np.vstack([(left_fitx - xmargin), ploty]))])
right_line_lane = np.array([np.flipud(np.transpose(np.vstack([(right_fitx + xmargin), ploty])))])
lane_pts = np.hstack((left_line_lane, right_line_lane))
cv2.fillPoly(out_mask, np.int_([lane_pts]), 255)
if (np.size(img2annotate) != 0):
cv2.fillPoly(img_annotated, np.int_([lane_pts]), (0, 255, 0))
img_annotated = cv2.addWeighted(img2annotate, 1, img_annotated, 0.3, 0)
if PLOT_LINES:
plt.plot(left_fitx, ploty, color='yellow', linestyle='--')
plt.plot(right_fitx, ploty, color='yellow', linestyle='--')
return (out_mask, img_annotated)<|docstring|>Give the shape and the polynomial coefficients, return byte mask showing the region inside the two curves
(plus an optional x-margin). Also add the annotations to an image if it is provided.
**Parameters/Keywords:
img2annotate ==> RGB image to annotate, if provided (should match the size of mask_shape!)
xmargin ==> Set the width of the +/- margin "
PLOT_LINES ==> Add the polynomial lines to output (only relevant if a img2annnotate is present)<|endoftext|> |
368880227f5858458b0b094590c10129580aff9064f54c36417b743182044bc3 | def cf_px2m(poly_cf_px, img_shape):
' Convert from pixel polynomial coefficients (order 2) to m\n x = f(y), with origin at center/bottom of image, positive y upwards!'
(Ny, Nx) = img_shape[0:2]
ym_per_pix = (30 / 720)
xm_per_pix = (3.7 / 700)
(a, b, c) = poly_cf_px
poly_cf_m = np.array([((xm_per_pix / (ym_per_pix ** 2)) * a), (- (((((2 * xm_per_pix) / ym_per_pix) * Ny) * a) + ((xm_per_pix / ym_per_pix) * b))), (xm_per_pix * ((c - (Nx / 2)) + (Ny * (b + (a * Ny)))))])
return poly_cf_m | Convert from pixel polynomial coefficients (order 2) to m
x = f(y), with origin at center/bottom of image, positive y upwards! | P2_subroutines.py | cf_px2m | felipeqda/CarND-Advanced-Lane-Lines | 0 | python | def cf_px2m(poly_cf_px, img_shape):
' Convert from pixel polynomial coefficients (order 2) to m\n x = f(y), with origin at center/bottom of image, positive y upwards!'
(Ny, Nx) = img_shape[0:2]
ym_per_pix = (30 / 720)
xm_per_pix = (3.7 / 700)
(a, b, c) = poly_cf_px
poly_cf_m = np.array([((xm_per_pix / (ym_per_pix ** 2)) * a), (- (((((2 * xm_per_pix) / ym_per_pix) * Ny) * a) + ((xm_per_pix / ym_per_pix) * b))), (xm_per_pix * ((c - (Nx / 2)) + (Ny * (b + (a * Ny)))))])
return poly_cf_m | def cf_px2m(poly_cf_px, img_shape):
' Convert from pixel polynomial coefficients (order 2) to m\n x = f(y), with origin at center/bottom of image, positive y upwards!'
(Ny, Nx) = img_shape[0:2]
ym_per_pix = (30 / 720)
xm_per_pix = (3.7 / 700)
(a, b, c) = poly_cf_px
poly_cf_m = np.array([((xm_per_pix / (ym_per_pix ** 2)) * a), (- (((((2 * xm_per_pix) / ym_per_pix) * Ny) * a) + ((xm_per_pix / ym_per_pix) * b))), (xm_per_pix * ((c - (Nx / 2)) + (Ny * (b + (a * Ny)))))])
return poly_cf_m<|docstring|>Convert from pixel polynomial coefficients (order 2) to m
x = f(y), with origin at center/bottom of image, positive y upwards!<|endoftext|> |
eb25619189b599323e26c3047025f527bdb84d104b2636b2751a67a52dfb20d4 | def get_direction(ball_angle: float) -> int:
'Get direction to navigate robot to face the ball\n\n Args:\n ball_angle (float): Angle between the ball and the robot\n\n Returns:\n int: 0 = forward, -1 = right, 1 = left\n '
if ((ball_angle >= 340) or (ball_angle <= 20)):
return 0
return ((- 1) if (ball_angle < 180) else 1) | Get direction to navigate robot to face the ball
Args:
ball_angle (float): Angle between the ball and the robot
Returns:
int: 0 = forward, -1 = right, 1 = left | controllers/rcj_soccer_team_yellow/utils.py | get_direction | fdmxfarhan/phasemoon | 0 | python | def get_direction(ball_angle: float) -> int:
'Get direction to navigate robot to face the ball\n\n Args:\n ball_angle (float): Angle between the ball and the robot\n\n Returns:\n int: 0 = forward, -1 = right, 1 = left\n '
if ((ball_angle >= 340) or (ball_angle <= 20)):
return 0
return ((- 1) if (ball_angle < 180) else 1) | def get_direction(ball_angle: float) -> int:
'Get direction to navigate robot to face the ball\n\n Args:\n ball_angle (float): Angle between the ball and the robot\n\n Returns:\n int: 0 = forward, -1 = right, 1 = left\n '
if ((ball_angle >= 340) or (ball_angle <= 20)):
return 0
return ((- 1) if (ball_angle < 180) else 1)<|docstring|>Get direction to navigate robot to face the ball
Args:
ball_angle (float): Angle between the ball and the robot
Returns:
int: 0 = forward, -1 = right, 1 = left<|endoftext|> |
9fe124824fabbed4eb4583dd8ef6aa475cf042ae4b3f2aea60cb666a12556b7c | def set_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set border style.\n\n Args:\n style: border style\n\n Raises:\n InvalidParamError: border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setBorderStyle(gui.textframeformat.BORDER_STYLES[style]) | Set border style.
Args:
style: border style
Raises:
InvalidParamError: border style does not exist | prettyqt/gui/texttablecellformat.py | set_border_style | phil65/PrettyQt | 7 | python | def set_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set border style.\n\n Args:\n style: border style\n\n Raises:\n InvalidParamError: border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setBorderStyle(gui.textframeformat.BORDER_STYLES[style]) | def set_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set border style.\n\n Args:\n style: border style\n\n Raises:\n InvalidParamError: border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setBorderStyle(gui.textframeformat.BORDER_STYLES[style])<|docstring|>Set border style.
Args:
style: border style
Raises:
InvalidParamError: border style does not exist<|endoftext|> |
4e84fecee05e0756a08a21fb210c9741f17f4b342d6ac2f29b97f0ab6f119b94 | def set_bottom_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set bottom border style.\n\n Args:\n style: bottom border style\n\n Raises:\n InvalidParamError: bottom border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setBottomBorderStyle(gui.textframeformat.BORDER_STYLES[style]) | Set bottom border style.
Args:
style: bottom border style
Raises:
InvalidParamError: bottom border style does not exist | prettyqt/gui/texttablecellformat.py | set_bottom_border_style | phil65/PrettyQt | 7 | python | def set_bottom_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set bottom border style.\n\n Args:\n style: bottom border style\n\n Raises:\n InvalidParamError: bottom border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setBottomBorderStyle(gui.textframeformat.BORDER_STYLES[style]) | def set_bottom_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set bottom border style.\n\n Args:\n style: bottom border style\n\n Raises:\n InvalidParamError: bottom border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setBottomBorderStyle(gui.textframeformat.BORDER_STYLES[style])<|docstring|>Set bottom border style.
Args:
style: bottom border style
Raises:
InvalidParamError: bottom border style does not exist<|endoftext|> |
51ecdf491c934285ede2074d9bda3c60cf1946ab28f25518afda9b6b21612f42 | def get_bottom_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current bottom border style.\n\n Returns:\n bottom border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.bottomBorderStyle()] | Get the current bottom border style.
Returns:
bottom border style | prettyqt/gui/texttablecellformat.py | get_bottom_border_style | phil65/PrettyQt | 7 | python | def get_bottom_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current bottom border style.\n\n Returns:\n bottom border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.bottomBorderStyle()] | def get_bottom_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current bottom border style.\n\n Returns:\n bottom border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.bottomBorderStyle()]<|docstring|>Get the current bottom border style.
Returns:
bottom border style<|endoftext|> |
dd263f731def0c5237f79ab0f098194f4b12c9f2edfd75d006e321be6f53dc8a | def set_left_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set left border style.\n\n Args:\n style: left border style\n\n Raises:\n InvalidParamError: left border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setLeftBorderStyle(gui.textframeformat.BORDER_STYLES[style]) | Set left border style.
Args:
style: left border style
Raises:
InvalidParamError: left border style does not exist | prettyqt/gui/texttablecellformat.py | set_left_border_style | phil65/PrettyQt | 7 | python | def set_left_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set left border style.\n\n Args:\n style: left border style\n\n Raises:\n InvalidParamError: left border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setLeftBorderStyle(gui.textframeformat.BORDER_STYLES[style]) | def set_left_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set left border style.\n\n Args:\n style: left border style\n\n Raises:\n InvalidParamError: left border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setLeftBorderStyle(gui.textframeformat.BORDER_STYLES[style])<|docstring|>Set left border style.
Args:
style: left border style
Raises:
InvalidParamError: left border style does not exist<|endoftext|> |
5aa787139d44526882947c14b34393f9408f90eb7fa8376db1fc874bd6d9a5c1 | def get_left_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current left border style.\n\n Returns:\n left border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.leftBorderStyle()] | Get the current left border style.
Returns:
left border style | prettyqt/gui/texttablecellformat.py | get_left_border_style | phil65/PrettyQt | 7 | python | def get_left_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current left border style.\n\n Returns:\n left border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.leftBorderStyle()] | def get_left_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current left border style.\n\n Returns:\n left border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.leftBorderStyle()]<|docstring|>Get the current left border style.
Returns:
left border style<|endoftext|> |
c049b04a4cf3ff2b9d4d2e5b999716f09f50d84a82f49264d00ec3d80d222918 | def set_right_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set right border style.\n\n Args:\n style: right border style\n\n Raises:\n InvalidParamError: right border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setRightBorderStyle(gui.textframeformat.BORDER_STYLES[style]) | Set right border style.
Args:
style: right border style
Raises:
InvalidParamError: right border style does not exist | prettyqt/gui/texttablecellformat.py | set_right_border_style | phil65/PrettyQt | 7 | python | def set_right_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set right border style.\n\n Args:\n style: right border style\n\n Raises:\n InvalidParamError: right border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setRightBorderStyle(gui.textframeformat.BORDER_STYLES[style]) | def set_right_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set right border style.\n\n Args:\n style: right border style\n\n Raises:\n InvalidParamError: right border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setRightBorderStyle(gui.textframeformat.BORDER_STYLES[style])<|docstring|>Set right border style.
Args:
style: right border style
Raises:
InvalidParamError: right border style does not exist<|endoftext|> |
3afa918a3ff563c1739f51fe058d0da46b5b40760e6d3a291ec8cd43ad9f0f0f | def get_right_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current right border style.\n\n Returns:\n right border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.rightBorderStyle()] | Get the current right border style.
Returns:
right border style | prettyqt/gui/texttablecellformat.py | get_right_border_style | phil65/PrettyQt | 7 | python | def get_right_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current right border style.\n\n Returns:\n right border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.rightBorderStyle()] | def get_right_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current right border style.\n\n Returns:\n right border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.rightBorderStyle()]<|docstring|>Get the current right border style.
Returns:
right border style<|endoftext|> |
f369010d2a11e94ab7b9a3a6c55eb75bfb757a20c57142a95f53d0dce3198ab2 | def set_top_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set top border style.\n\n Args:\n style: top border style\n\n Raises:\n InvalidParamError: top border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setTopBorderStyle(gui.textframeformat.BORDER_STYLES[style]) | Set top border style.
Args:
style: top border style
Raises:
InvalidParamError: top border style does not exist | prettyqt/gui/texttablecellformat.py | set_top_border_style | phil65/PrettyQt | 7 | python | def set_top_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set top border style.\n\n Args:\n style: top border style\n\n Raises:\n InvalidParamError: top border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setTopBorderStyle(gui.textframeformat.BORDER_STYLES[style]) | def set_top_border_style(self, style: gui.textframeformat.BorderStyleStr):
'Set top border style.\n\n Args:\n style: top border style\n\n Raises:\n InvalidParamError: top border style does not exist\n '
if (style not in gui.textframeformat.BORDER_STYLES):
raise InvalidParamError(style, gui.textframeformat.BORDER_STYLES)
self.setTopBorderStyle(gui.textframeformat.BORDER_STYLES[style])<|docstring|>Set top border style.
Args:
style: top border style
Raises:
InvalidParamError: top border style does not exist<|endoftext|> |
7a1fab1c6d1b005c9c9a2d12e7d551eead52abdecbf34a391a011ec91fa789cf | def get_top_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current top border style.\n\n Returns:\n top border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.topBorderStyle()] | Get the current top border style.
Returns:
top border style | prettyqt/gui/texttablecellformat.py | get_top_border_style | phil65/PrettyQt | 7 | python | def get_top_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current top border style.\n\n Returns:\n top border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.topBorderStyle()] | def get_top_border_style(self) -> gui.textframeformat.BorderStyleStr:
'Get the current top border style.\n\n Returns:\n top border style\n '
return gui.textframeformat.BORDER_STYLES.inverse[self.topBorderStyle()]<|docstring|>Get the current top border style.
Returns:
top border style<|endoftext|> |
2213e7f2714da41e313990dd806665c40f74df8ff181e98eaca1360ab1ffa3d9 | def __init__(self, file_name, timeout=30, delay=0.2, stealing=False):
' Prepare the file locker. Specify the file to lock and optionally\n the maximum timeout and the delay between each attempt to lock.\n '
self.is_locked = False
self.lockfile = os.path.join(os.getcwd(), ('%s.lock' % file_name))
self.fd = None
self.file_name = file_name
self.timeout = timeout
self.delay = delay
self.stealing = stealing
if stealing:
if (os.name != 'posix'):
raise RuntimeError('Detecting a running process by its PID is only supported on a POSIX system.')
import json
self.json = json | Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock. | useful/filelock.py | __init__ | tuttle/python-useful | 7 | python | def __init__(self, file_name, timeout=30, delay=0.2, stealing=False):
' Prepare the file locker. Specify the file to lock and optionally\n the maximum timeout and the delay between each attempt to lock.\n '
self.is_locked = False
self.lockfile = os.path.join(os.getcwd(), ('%s.lock' % file_name))
self.fd = None
self.file_name = file_name
self.timeout = timeout
self.delay = delay
self.stealing = stealing
if stealing:
if (os.name != 'posix'):
raise RuntimeError('Detecting a running process by its PID is only supported on a POSIX system.')
import json
self.json = json | def __init__(self, file_name, timeout=30, delay=0.2, stealing=False):
' Prepare the file locker. Specify the file to lock and optionally\n the maximum timeout and the delay between each attempt to lock.\n '
self.is_locked = False
self.lockfile = os.path.join(os.getcwd(), ('%s.lock' % file_name))
self.fd = None
self.file_name = file_name
self.timeout = timeout
self.delay = delay
self.stealing = stealing
if stealing:
if (os.name != 'posix'):
raise RuntimeError('Detecting a running process by its PID is only supported on a POSIX system.')
import json
self.json = json<|docstring|>Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.<|endoftext|> |
997995282f9dd2094226f65b499af017127f5064364add750004ef818f71d8b3 | def acquire(self):
' Acquire the lock, if possible. If the lock is in use, it check again\n every `wait` seconds. It does this until it either gets the lock or\n exceeds `timeout` number of seconds, in which case it throws\n an exception.\n '
start_time = time.time()
while True:
try:
self.fd = os.open(self.lockfile, ((os.O_CREAT | os.O_EXCL) | os.O_RDWR))
break
except OSError as e:
if (e.errno != errno.EEXIST):
raise
if self.should_steal():
os.unlink(self.lockfile)
continue
if ((time.time() - start_time) >= self.timeout):
msg = ('%d seconds passed.' % self.timeout)
if self.stealing:
msg += (' Lock file: %s. My argv: %r' % (open(self.lockfile).read(512), sys.argv))
raise FileLockTimeoutException(msg)
time.sleep(self.delay)
self.is_locked = True
if self.stealing:
import datetime
info = {'lock_time': datetime.datetime.now().isoformat(), 'pid': os.getpid(), 'argv': sys.argv}
os.write(self.fd, self.json.dumps(info, indent=4).encode('utf-8'))
os.fsync(self.fd) | Acquire the lock, if possible. If the lock is in use, it check again
every `wait` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception. | useful/filelock.py | acquire | tuttle/python-useful | 7 | python | def acquire(self):
' Acquire the lock, if possible. If the lock is in use, it check again\n every `wait` seconds. It does this until it either gets the lock or\n exceeds `timeout` number of seconds, in which case it throws\n an exception.\n '
start_time = time.time()
while True:
try:
self.fd = os.open(self.lockfile, ((os.O_CREAT | os.O_EXCL) | os.O_RDWR))
break
except OSError as e:
if (e.errno != errno.EEXIST):
raise
if self.should_steal():
os.unlink(self.lockfile)
continue
if ((time.time() - start_time) >= self.timeout):
msg = ('%d seconds passed.' % self.timeout)
if self.stealing:
msg += (' Lock file: %s. My argv: %r' % (open(self.lockfile).read(512), sys.argv))
raise FileLockTimeoutException(msg)
time.sleep(self.delay)
self.is_locked = True
if self.stealing:
import datetime
info = {'lock_time': datetime.datetime.now().isoformat(), 'pid': os.getpid(), 'argv': sys.argv}
os.write(self.fd, self.json.dumps(info, indent=4).encode('utf-8'))
os.fsync(self.fd) | def acquire(self):
' Acquire the lock, if possible. If the lock is in use, it check again\n every `wait` seconds. It does this until it either gets the lock or\n exceeds `timeout` number of seconds, in which case it throws\n an exception.\n '
start_time = time.time()
while True:
try:
self.fd = os.open(self.lockfile, ((os.O_CREAT | os.O_EXCL) | os.O_RDWR))
break
except OSError as e:
if (e.errno != errno.EEXIST):
raise
if self.should_steal():
os.unlink(self.lockfile)
continue
if ((time.time() - start_time) >= self.timeout):
msg = ('%d seconds passed.' % self.timeout)
if self.stealing:
msg += (' Lock file: %s. My argv: %r' % (open(self.lockfile).read(512), sys.argv))
raise FileLockTimeoutException(msg)
time.sleep(self.delay)
self.is_locked = True
if self.stealing:
import datetime
info = {'lock_time': datetime.datetime.now().isoformat(), 'pid': os.getpid(), 'argv': sys.argv}
os.write(self.fd, self.json.dumps(info, indent=4).encode('utf-8'))
os.fsync(self.fd)<|docstring|>Acquire the lock, if possible. If the lock is in use, it check again
every `wait` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception.<|endoftext|> |
3cca8d7b23c621f707f5dbc3ca2eaed2c35a3ec6ea85f32633703b9a68257e5a | def release(self):
' Get rid of the lock by deleting the lockfile.\n When working in a `with` statement, this gets automatically\n called at the end.\n '
if self.is_locked:
if (self.fd is not None):
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False | Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end. | useful/filelock.py | release | tuttle/python-useful | 7 | python | def release(self):
' Get rid of the lock by deleting the lockfile.\n When working in a `with` statement, this gets automatically\n called at the end.\n '
if self.is_locked:
if (self.fd is not None):
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False | def release(self):
' Get rid of the lock by deleting the lockfile.\n When working in a `with` statement, this gets automatically\n called at the end.\n '
if self.is_locked:
if (self.fd is not None):
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False<|docstring|>Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end.<|endoftext|> |
940a31c8c8d2897818c23a5d89ad9379a6fa02731e3540af375eb331e3cfa556 | def __enter__(self):
' Activated when used in the with statement.\n Should automatically acquire a lock to be used in the with block.\n '
if (not self.is_locked):
self.acquire()
return self | Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block. | useful/filelock.py | __enter__ | tuttle/python-useful | 7 | python | def __enter__(self):
' Activated when used in the with statement.\n Should automatically acquire a lock to be used in the with block.\n '
if (not self.is_locked):
self.acquire()
return self | def __enter__(self):
' Activated when used in the with statement.\n Should automatically acquire a lock to be used in the with block.\n '
if (not self.is_locked):
self.acquire()
return self<|docstring|>Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block.<|endoftext|> |
ec81bab2cc1c0341532d83545e67a38c5e3201faf5846d1f5acf0b53f9d3be87 | def __exit__(self, exc_type, exc_value, traceback):
" Activated at the end of the with statement.\n It automatically releases the lock if it isn't locked.\n "
if self.is_locked:
self.release() | Activated at the end of the with statement.
It automatically releases the lock if it isn't locked. | useful/filelock.py | __exit__ | tuttle/python-useful | 7 | python | def __exit__(self, exc_type, exc_value, traceback):
" Activated at the end of the with statement.\n It automatically releases the lock if it isn't locked.\n "
if self.is_locked:
self.release() | def __exit__(self, exc_type, exc_value, traceback):
" Activated at the end of the with statement.\n It automatically releases the lock if it isn't locked.\n "
if self.is_locked:
self.release()<|docstring|>Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.<|endoftext|> |
5d4311be612c9baababcc02d5aeac6a7c27403cbabf04459950928043657a002 | def __del__(self):
" Make sure that the FileLock instance doesn't leave a lockfile\n lying around.\n "
self.release() | Make sure that the FileLock instance doesn't leave a lockfile
lying around. | useful/filelock.py | __del__ | tuttle/python-useful | 7 | python | def __del__(self):
" Make sure that the FileLock instance doesn't leave a lockfile\n lying around.\n "
self.release() | def __del__(self):
" Make sure that the FileLock instance doesn't leave a lockfile\n lying around.\n "
self.release()<|docstring|>Make sure that the FileLock instance doesn't leave a lockfile
lying around.<|endoftext|> |
ec4f1bbcaaa6be54f018cb9b6a06156fa3af56156b919530288cb5f53620abf4 | def __call__(self, func):
'\n Support for using the instance as decorator. The entire function will be protected.\n '
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner | Support for using the instance as decorator. The entire function will be protected. | useful/filelock.py | __call__ | tuttle/python-useful | 7 | python | def __call__(self, func):
'\n \n '
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner | def __call__(self, func):
'\n \n '
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner<|docstring|>Support for using the instance as decorator. The entire function will be protected.<|endoftext|> |
ce9c3eb48cc54efd9cfd750acb9d6a5cc7c9d4b70546074caec94b232fe06dab | def GetParams(self):
'Testing engine with the same tensor repeated as output via identity.'
input_name = 'input'
input_dims = [100, 32]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtypes.float32, shape=input_dims, name=input_name)
b = self._ConstOp((32, 4))
x1 = math_ops.matmul(x, b)
b = self._ConstOp((1, 4))
x1 = (x1 + b)
out1 = array_ops.identity(x1, name='output1')
out2 = array_ops.identity(x1, name='output2')
iden1 = array_ops.identity(x1)
out3 = array_ops.identity(iden1, name='output3')
return trt_test.TfTrtIntegrationTestParams(gdef=g.as_graph_def(), input_names=[input_name], input_dims=[[input_dims]], output_names=['output1', 'output2', 'output3'], expected_output_dims=[([[100, 4]] * 3)]) | Testing engine with the same tensor repeated as output via identity. | tensorflow/python/compiler/tensorrt/test/identity_output_test.py | GetParams | 2hyan8/tensorflow | 36 | python | def GetParams(self):
input_name = 'input'
input_dims = [100, 32]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtypes.float32, shape=input_dims, name=input_name)
b = self._ConstOp((32, 4))
x1 = math_ops.matmul(x, b)
b = self._ConstOp((1, 4))
x1 = (x1 + b)
out1 = array_ops.identity(x1, name='output1')
out2 = array_ops.identity(x1, name='output2')
iden1 = array_ops.identity(x1)
out3 = array_ops.identity(iden1, name='output3')
return trt_test.TfTrtIntegrationTestParams(gdef=g.as_graph_def(), input_names=[input_name], input_dims=[[input_dims]], output_names=['output1', 'output2', 'output3'], expected_output_dims=[([[100, 4]] * 3)]) | def GetParams(self):
input_name = 'input'
input_dims = [100, 32]
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtypes.float32, shape=input_dims, name=input_name)
b = self._ConstOp((32, 4))
x1 = math_ops.matmul(x, b)
b = self._ConstOp((1, 4))
x1 = (x1 + b)
out1 = array_ops.identity(x1, name='output1')
out2 = array_ops.identity(x1, name='output2')
iden1 = array_ops.identity(x1)
out3 = array_ops.identity(iden1, name='output3')
return trt_test.TfTrtIntegrationTestParams(gdef=g.as_graph_def(), input_names=[input_name], input_dims=[[input_dims]], output_names=['output1', 'output2', 'output3'], expected_output_dims=[([[100, 4]] * 3)])<|docstring|>Testing engine with the same tensor repeated as output via identity.<|endoftext|> |
b8d6e0955969ca4cf3bbb11a67542acd2c2fe7d505f2994006ea6d6db4ec61b5 | def ExpectedEnginesToBuild(self, run_params):
'Return the expected engines to build.'
return ['TRTEngineOp_0'] | Return the expected engines to build. | tensorflow/python/compiler/tensorrt/test/identity_output_test.py | ExpectedEnginesToBuild | 2hyan8/tensorflow | 36 | python | def ExpectedEnginesToBuild(self, run_params):
return ['TRTEngineOp_0'] | def ExpectedEnginesToBuild(self, run_params):
return ['TRTEngineOp_0']<|docstring|>Return the expected engines to build.<|endoftext|> |
78ee1a6603d0b62b884429280dd209e5b65667d11bfe02c9fb3f8d7f084d9569 | def session_expired(self):
'The session has ended due to session expiration'
if self.expiry_time:
return (self.expiry_time <= timezone.now())
return False | The session has ended due to session expiration | tracking/models.py | session_expired | yassam/django-tracking2 | 0 | python | def session_expired(self):
if self.expiry_time:
return (self.expiry_time <= timezone.now())
return False | def session_expired(self):
if self.expiry_time:
return (self.expiry_time <= timezone.now())
return False<|docstring|>The session has ended due to session expiration<|endoftext|> |
3cd51f3a6b5298db332e6d93d22f5b0ef5fddcd0cdb7b342818bd4a19ea7be5f | def session_ended(self):
'The session has ended due to an explicit logout'
return bool(self.end_time) | The session has ended due to an explicit logout | tracking/models.py | session_ended | yassam/django-tracking2 | 0 | python | def session_ended(self):
return bool(self.end_time) | def session_ended(self):
return bool(self.end_time)<|docstring|>The session has ended due to an explicit logout<|endoftext|> |
c6ac0ffd6d7b640767da5e7ff56c7c15f846ba953b2173b676984fa4d2f9ad94 | @property
def last_time(self):
'datetime of last time visited - start_time + time_on_site'
return (self.start_time + timedelta(seconds=self.time_on_site)) | datetime of last time visited - start_time + time_on_site | tracking/models.py | last_time | yassam/django-tracking2 | 0 | python | @property
def last_time(self):
return (self.start_time + timedelta(seconds=self.time_on_site)) | @property
def last_time(self):
return (self.start_time + timedelta(seconds=self.time_on_site))<|docstring|>datetime of last time visited - start_time + time_on_site<|endoftext|> |
22030fc976f4294f47e1485792c59d46edc20fe83be35d2af00e0efce8947455 | @property
def geoip_data(self):
"Attempts to retrieve MaxMind GeoIP data based upon the visitor's IP"
if ((not HAS_GEOIP) or (not TRACK_USING_GEOIP)):
return
if (not hasattr(self, '_geoip_data')):
self._geoip_data = None
try:
gip = GeoIP(cache=GEOIP_CACHE_TYPE)
self._geoip_data = gip.city(self.ip_address)
except GeoIPException:
log.error(('Error getting GeoIP data for IP "%s": %s' % (self.ip_address, traceback.format_exc())))
return self._geoip_data | Attempts to retrieve MaxMind GeoIP data based upon the visitor's IP | tracking/models.py | geoip_data | yassam/django-tracking2 | 0 | python | @property
def geoip_data(self):
if ((not HAS_GEOIP) or (not TRACK_USING_GEOIP)):
return
if (not hasattr(self, '_geoip_data')):
self._geoip_data = None
try:
gip = GeoIP(cache=GEOIP_CACHE_TYPE)
self._geoip_data = gip.city(self.ip_address)
except GeoIPException:
log.error(('Error getting GeoIP data for IP "%s": %s' % (self.ip_address, traceback.format_exc())))
return self._geoip_data | @property
def geoip_data(self):
if ((not HAS_GEOIP) or (not TRACK_USING_GEOIP)):
return
if (not hasattr(self, '_geoip_data')):
self._geoip_data = None
try:
gip = GeoIP(cache=GEOIP_CACHE_TYPE)
self._geoip_data = gip.city(self.ip_address)
except GeoIPException:
log.error(('Error getting GeoIP data for IP "%s": %s' % (self.ip_address, traceback.format_exc())))
return self._geoip_data<|docstring|>Attempts to retrieve MaxMind GeoIP data based upon the visitor's IP<|endoftext|> |
ca3b83d46df29f4680ca63ccc927357957e8f6a1511c48b22a882ddf0606ac0d | @property
def platform(self):
'\n Returns string describing browser platform. Falls back to agent\n string if either user_agents module not found, or\n TRACK_PARSE_AGENT is False\n '
if (not user_agents):
return self.user_agent
if (not hasattr(self, '_platform_string')):
platform = user_agents.parse(self.user_agent)
self._platform_string = ('%s %d %s %s ' % (platform.browser.family, platform.browser.version[0], platform.os.family, platform.os.version_string))
if platform.device.family:
self._platform_string += platform.device.family
return self._platform_string | Returns string describing browser platform. Falls back to agent
string if either user_agents module not found, or
TRACK_PARSE_AGENT is False | tracking/models.py | platform | yassam/django-tracking2 | 0 | python | @property
def platform(self):
'\n Returns string describing browser platform. Falls back to agent\n string if either user_agents module not found, or\n TRACK_PARSE_AGENT is False\n '
if (not user_agents):
return self.user_agent
if (not hasattr(self, '_platform_string')):
platform = user_agents.parse(self.user_agent)
self._platform_string = ('%s %d %s %s ' % (platform.browser.family, platform.browser.version[0], platform.os.family, platform.os.version_string))
if platform.device.family:
self._platform_string += platform.device.family
return self._platform_string | @property
def platform(self):
'\n Returns string describing browser platform. Falls back to agent\n string if either user_agents module not found, or\n TRACK_PARSE_AGENT is False\n '
if (not user_agents):
return self.user_agent
if (not hasattr(self, '_platform_string')):
platform = user_agents.parse(self.user_agent)
self._platform_string = ('%s %d %s %s ' % (platform.browser.family, platform.browser.version[0], platform.os.family, platform.os.version_string))
if platform.device.family:
self._platform_string += platform.device.family
return self._platform_string<|docstring|>Returns string describing browser platform. Falls back to agent
string if either user_agents module not found, or
TRACK_PARSE_AGENT is False<|endoftext|> |
bbdb08ad55cf49ed0ebed5f76b89e4b8306d795335be28e3fbcee9c0009c516b | def task_hutch_install():
'\n Hutch: Compile and install the Hutch extension.\n '
return {'actions': [(lambda : os.chdir('cmudb/extensions/hutch/')), 'sudo PYTHONPATH=../../tscout:$PYTHONPATH python3 tscout_feature_gen.py', 'PG_CONFIG=%(pg_config)s make clean -j', 'PG_CONFIG=%(pg_config)s make -j', 'PG_CONFIG=%(pg_config)s make install -j', (lambda : os.chdir(doit.get_initial_workdir()))], 'verbosity': VERBOSITY_DEFAULT, 'uptodate': [False], 'params': [{'name': 'pg_config', 'long': 'pg_config', 'help': 'The location of the pg_config binary.', 'default': '../../../build/bin/pg_config'}]} | Hutch: Compile and install the Hutch extension. | dodos/hutch.py | task_hutch_install | 17zhangw/postgres | 0 | python | def task_hutch_install():
'\n \n '
return {'actions': [(lambda : os.chdir('cmudb/extensions/hutch/')), 'sudo PYTHONPATH=../../tscout:$PYTHONPATH python3 tscout_feature_gen.py', 'PG_CONFIG=%(pg_config)s make clean -j', 'PG_CONFIG=%(pg_config)s make -j', 'PG_CONFIG=%(pg_config)s make install -j', (lambda : os.chdir(doit.get_initial_workdir()))], 'verbosity': VERBOSITY_DEFAULT, 'uptodate': [False], 'params': [{'name': 'pg_config', 'long': 'pg_config', 'help': 'The location of the pg_config binary.', 'default': '../../../build/bin/pg_config'}]} | def task_hutch_install():
'\n \n '
return {'actions': [(lambda : os.chdir('cmudb/extensions/hutch/')), 'sudo PYTHONPATH=../../tscout:$PYTHONPATH python3 tscout_feature_gen.py', 'PG_CONFIG=%(pg_config)s make clean -j', 'PG_CONFIG=%(pg_config)s make -j', 'PG_CONFIG=%(pg_config)s make install -j', (lambda : os.chdir(doit.get_initial_workdir()))], 'verbosity': VERBOSITY_DEFAULT, 'uptodate': [False], 'params': [{'name': 'pg_config', 'long': 'pg_config', 'help': 'The location of the pg_config binary.', 'default': '../../../build/bin/pg_config'}]}<|docstring|>Hutch: Compile and install the Hutch extension.<|endoftext|> |
3b2bdd5db92d4357c6c6ce2e824e955737da0ee052326a689f0889b92930339f | def logoutfunction(self, line):
' Main logout worker function\n\n :param line: command line input\n :type line: string.\n '
try:
(_, _) = self._parse_arglist(line)
except (InvalidCommandLineErrorOPTS, SystemExit):
if (('-h' in line) or ('--help' in line)):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS('')
self._rdmc.app.logout('') | Main logout worker function
:param line: command line input
:type line: string. | src/extensions/COMMANDS/LogoutCommand.py | logoutfunction | xnox/python-redfish-utility | 0 | python | def logoutfunction(self, line):
' Main logout worker function\n\n :param line: command line input\n :type line: string.\n '
try:
(_, _) = self._parse_arglist(line)
except (InvalidCommandLineErrorOPTS, SystemExit):
if (('-h' in line) or ('--help' in line)):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS()
self._rdmc.app.logout() | def logoutfunction(self, line):
' Main logout worker function\n\n :param line: command line input\n :type line: string.\n '
try:
(_, _) = self._parse_arglist(line)
except (InvalidCommandLineErrorOPTS, SystemExit):
if (('-h' in line) or ('--help' in line)):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS()
self._rdmc.app.logout()<|docstring|>Main logout worker function
:param line: command line input
:type line: string.<|endoftext|> |
51714612da3c83e0bb904a2284a207355b682d7495f4c518b97a27e19b10efab | def run(self, line):
' Wrapper function for main logout function\n\n :param line: command line input\n :type line: string.\n '
sys.stdout.write('Logging session out.\n')
self.logoutfunction(line)
return ReturnCodes.SUCCESS | Wrapper function for main logout function
:param line: command line input
:type line: string. | src/extensions/COMMANDS/LogoutCommand.py | run | xnox/python-redfish-utility | 0 | python | def run(self, line):
' Wrapper function for main logout function\n\n :param line: command line input\n :type line: string.\n '
sys.stdout.write('Logging session out.\n')
self.logoutfunction(line)
return ReturnCodes.SUCCESS | def run(self, line):
' Wrapper function for main logout function\n\n :param line: command line input\n :type line: string.\n '
sys.stdout.write('Logging session out.\n')
self.logoutfunction(line)
return ReturnCodes.SUCCESS<|docstring|>Wrapper function for main logout function
:param line: command line input
:type line: string.<|endoftext|> |
64b13e0cdc3f473e0413f9ecb2c06ee0bab7470a7a9ad77faf84250587f6692d | def definearguments(self, customparser):
' Wrapper function for new command main function\n\n :param customparser: command line input\n :type customparser: parser.\n '
if (not customparser):
return
customparser.add_argument('-u', '--user', dest='user', help='Pass this flag along with the password flag if you are running in local higher security modes.', default=None)
customparser.add_argument('-p', '--password', dest='password', help='Pass this flag along with the user flag if you are running in local higher security modes.', default=None)
customparser.add_argument('-e', '--enc', dest='encode', action='store_true', help=SUPPRESS, default=False) | Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser. | src/extensions/COMMANDS/LogoutCommand.py | definearguments | xnox/python-redfish-utility | 0 | python | def definearguments(self, customparser):
' Wrapper function for new command main function\n\n :param customparser: command line input\n :type customparser: parser.\n '
if (not customparser):
return
customparser.add_argument('-u', '--user', dest='user', help='Pass this flag along with the password flag if you are running in local higher security modes.', default=None)
customparser.add_argument('-p', '--password', dest='password', help='Pass this flag along with the user flag if you are running in local higher security modes.', default=None)
customparser.add_argument('-e', '--enc', dest='encode', action='store_true', help=SUPPRESS, default=False) | def definearguments(self, customparser):
' Wrapper function for new command main function\n\n :param customparser: command line input\n :type customparser: parser.\n '
if (not customparser):
return
customparser.add_argument('-u', '--user', dest='user', help='Pass this flag along with the password flag if you are running in local higher security modes.', default=None)
customparser.add_argument('-p', '--password', dest='password', help='Pass this flag along with the user flag if you are running in local higher security modes.', default=None)
customparser.add_argument('-e', '--enc', dest='encode', action='store_true', help=SUPPRESS, default=False)<|docstring|>Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.<|endoftext|> |
baf991d31965b80ba3f4c423e9f65de32ba930c321340b196069420d0fffe4a5 | def __init__(self, game, show_progress=True):
'Build new CFR instance.\n\n Args:\n game (Game): ACPC game definition object.\n '
self.game = game
self.show_progress = show_progress
if (game.get_num_players() != 2):
raise AttributeError('Only games with 2 players are supported')
if (game.get_betting_type() != acpc.BettingType.LIMIT):
raise AttributeError('No-limit betting games not supported')
total_cards_count = (game.get_num_hole_cards() + game.get_total_num_board_cards((game.get_num_rounds() - 1)))
if (total_cards_count > 5):
raise AttributeError('Only games with up to 5 cards are supported')
game_tree_builder = GameTreeBuilder(game, CfrNodeProvider())
if (not self.show_progress):
self.game_tree = game_tree_builder.build_tree()
else:
try:
with tqdm(total=1) as progress:
progress.set_description('Building game tree')
self.game_tree = game_tree_builder.build_tree()
progress.update(1)
except NameError:
self.game_tree = game_tree_builder.build_tree() | Build new CFR instance.
Args:
game (Game): ACPC game definition object. | cfr/main.py | __init__ | JakubPetriska/poker-agent-kit | 19 | python | def __init__(self, game, show_progress=True):
'Build new CFR instance.\n\n Args:\n game (Game): ACPC game definition object.\n '
self.game = game
self.show_progress = show_progress
if (game.get_num_players() != 2):
raise AttributeError('Only games with 2 players are supported')
if (game.get_betting_type() != acpc.BettingType.LIMIT):
raise AttributeError('No-limit betting games not supported')
total_cards_count = (game.get_num_hole_cards() + game.get_total_num_board_cards((game.get_num_rounds() - 1)))
if (total_cards_count > 5):
raise AttributeError('Only games with up to 5 cards are supported')
game_tree_builder = GameTreeBuilder(game, CfrNodeProvider())
if (not self.show_progress):
self.game_tree = game_tree_builder.build_tree()
else:
try:
with tqdm(total=1) as progress:
progress.set_description('Building game tree')
self.game_tree = game_tree_builder.build_tree()
progress.update(1)
except NameError:
self.game_tree = game_tree_builder.build_tree() | def __init__(self, game, show_progress=True):
'Build new CFR instance.\n\n Args:\n game (Game): ACPC game definition object.\n '
self.game = game
self.show_progress = show_progress
if (game.get_num_players() != 2):
raise AttributeError('Only games with 2 players are supported')
if (game.get_betting_type() != acpc.BettingType.LIMIT):
raise AttributeError('No-limit betting games not supported')
total_cards_count = (game.get_num_hole_cards() + game.get_total_num_board_cards((game.get_num_rounds() - 1)))
if (total_cards_count > 5):
raise AttributeError('Only games with up to 5 cards are supported')
game_tree_builder = GameTreeBuilder(game, CfrNodeProvider())
if (not self.show_progress):
self.game_tree = game_tree_builder.build_tree()
else:
try:
with tqdm(total=1) as progress:
progress.set_description('Building game tree')
self.game_tree = game_tree_builder.build_tree()
progress.update(1)
except NameError:
self.game_tree = game_tree_builder.build_tree()<|docstring|>Build new CFR instance.
Args:
game (Game): ACPC game definition object.<|endoftext|> |
f08d17b3d0ecaf6a5f63ca1d48de9c1849fded104d9f5ad28b8ef5871c411370 | def train(self, iterations, weight_delay=700, checkpoint_iterations=None, checkpoint_callback=(lambda *args: None), minimal_action_probability=None):
'Run CFR for given number of iterations.\n\n The trained tree can be found by retrieving the game_tree\n property from this object. The result strategy is stored\n in average_strategy of each ActionNode in game tree.\n\n This method can be called multiple times on one instance\n to train more. This can be used for evaluation during training\n and to make number of training iterations dynamic.\n\n Args:\n iterations (int): Number of iterations.\n show_progress (bool): Show training progress bar.\n '
if (not self.show_progress):
iterations_iterable = range(iterations)
else:
try:
iterations_iterable = tqdm(range(iterations))
iterations_iterable.set_description(('%s training' % self._get_algorithm_name()))
except NameError:
iterations_iterable = range(iterations)
if (iterations <= weight_delay):
raise AttributeError('Number of iterations must be larger than weight delay')
if ((checkpoint_iterations is None) or (checkpoint_iterations <= 0) or (checkpoint_iterations > iterations)):
checkpoint_iterations = iterations
iterations_left_to_checkpoint = (weight_delay + checkpoint_iterations)
checkpoint_index = 0
for i in iterations_iterable:
self.weight = max((i - weight_delay), 0)
for player in range(2):
self._start_iteration(player)
iterations_left_to_checkpoint -= 1
if ((iterations_left_to_checkpoint == 0) or (i == (iterations - 1))):
Cfr._calculate_tree_average_strategy(self.game_tree, minimal_action_probability)
checkpoint_callback(self.game_tree, checkpoint_index, (i + 1))
checkpoint_index += 1
iterations_left_to_checkpoint = checkpoint_iterations
return self.game_tree | Run CFR for given number of iterations.
The trained tree can be found by retrieving the game_tree
property from this object. The result strategy is stored
in average_strategy of each ActionNode in game tree.
This method can be called multiple times on one instance
to train more. This can be used for evaluation during training
and to make number of training iterations dynamic.
Args:
iterations (int): Number of iterations.
show_progress (bool): Show training progress bar. | cfr/main.py | train | JakubPetriska/poker-agent-kit | 19 | python | def train(self, iterations, weight_delay=700, checkpoint_iterations=None, checkpoint_callback=(lambda *args: None), minimal_action_probability=None):
'Run CFR for given number of iterations.\n\n The trained tree can be found by retrieving the game_tree\n property from this object. The result strategy is stored\n in average_strategy of each ActionNode in game tree.\n\n This method can be called multiple times on one instance\n to train more. This can be used for evaluation during training\n and to make number of training iterations dynamic.\n\n Args:\n iterations (int): Number of iterations.\n show_progress (bool): Show training progress bar.\n '
if (not self.show_progress):
iterations_iterable = range(iterations)
else:
try:
iterations_iterable = tqdm(range(iterations))
iterations_iterable.set_description(('%s training' % self._get_algorithm_name()))
except NameError:
iterations_iterable = range(iterations)
if (iterations <= weight_delay):
raise AttributeError('Number of iterations must be larger than weight delay')
if ((checkpoint_iterations is None) or (checkpoint_iterations <= 0) or (checkpoint_iterations > iterations)):
checkpoint_iterations = iterations
iterations_left_to_checkpoint = (weight_delay + checkpoint_iterations)
checkpoint_index = 0
for i in iterations_iterable:
self.weight = max((i - weight_delay), 0)
for player in range(2):
self._start_iteration(player)
iterations_left_to_checkpoint -= 1
if ((iterations_left_to_checkpoint == 0) or (i == (iterations - 1))):
Cfr._calculate_tree_average_strategy(self.game_tree, minimal_action_probability)
checkpoint_callback(self.game_tree, checkpoint_index, (i + 1))
checkpoint_index += 1
iterations_left_to_checkpoint = checkpoint_iterations
return self.game_tree | def train(self, iterations, weight_delay=700, checkpoint_iterations=None, checkpoint_callback=(lambda *args: None), minimal_action_probability=None):
'Run CFR for given number of iterations.\n\n The trained tree can be found by retrieving the game_tree\n property from this object. The result strategy is stored\n in average_strategy of each ActionNode in game tree.\n\n This method can be called multiple times on one instance\n to train more. This can be used for evaluation during training\n and to make number of training iterations dynamic.\n\n Args:\n iterations (int): Number of iterations.\n show_progress (bool): Show training progress bar.\n '
if (not self.show_progress):
iterations_iterable = range(iterations)
else:
try:
iterations_iterable = tqdm(range(iterations))
iterations_iterable.set_description(('%s training' % self._get_algorithm_name()))
except NameError:
iterations_iterable = range(iterations)
if (iterations <= weight_delay):
raise AttributeError('Number of iterations must be larger than weight delay')
if ((checkpoint_iterations is None) or (checkpoint_iterations <= 0) or (checkpoint_iterations > iterations)):
checkpoint_iterations = iterations
iterations_left_to_checkpoint = (weight_delay + checkpoint_iterations)
checkpoint_index = 0
for i in iterations_iterable:
self.weight = max((i - weight_delay), 0)
for player in range(2):
self._start_iteration(player)
iterations_left_to_checkpoint -= 1
if ((iterations_left_to_checkpoint == 0) or (i == (iterations - 1))):
Cfr._calculate_tree_average_strategy(self.game_tree, minimal_action_probability)
checkpoint_callback(self.game_tree, checkpoint_index, (i + 1))
checkpoint_index += 1
iterations_left_to_checkpoint = checkpoint_iterations
return self.game_tree<|docstring|>Run CFR for given number of iterations.
The trained tree can be found by retrieving the game_tree
property from this object. The result strategy is stored
in average_strategy of each ActionNode in game tree.
This method can be called multiple times on one instance
to train more. This can be used for evaluation during training
and to make number of training iterations dynamic.
Args:
iterations (int): Number of iterations.
show_progress (bool): Show training progress bar.<|endoftext|> |
b1b3162eca93c3f5747480406dbee5cb9a720d65f6a7df47ab760fa83fbbe912 | def make_users_me_request(self):
"\n Need to wrap the get in a class method to get 'self' context into timeit\n "
response = self.client.get(self.url, **self.exporter_headers)
self.assertTrue((response.status_code == status.HTTP_200_OK)) | Need to wrap the get in a class method to get 'self' context into timeit | api/users/tests/tests_performance.py | make_users_me_request | code-review-doctor/lite-api | 3 | python | def make_users_me_request(self):
"\n \n "
response = self.client.get(self.url, **self.exporter_headers)
self.assertTrue((response.status_code == status.HTTP_200_OK)) | def make_users_me_request(self):
"\n \n "
response = self.client.get(self.url, **self.exporter_headers)
self.assertTrue((response.status_code == status.HTTP_200_OK))<|docstring|>Need to wrap the get in a class method to get 'self' context into timeit<|endoftext|> |
c0b536aff21d42c65998028e3dbf93a3a18d0d439a892d7293ef6c45439b2498 | @parameterized.expand([(10, 0), (100, 0), (1000, 0)])
def test_users_me_performance_by_organisation(self, org_count, users):
"\n Tests the performance of the 'users/me' endpoint\n "
self.create_organisations_multiple_users(required_user=self.exporter_user, organisations=org_count, users_per_org=users)
print(f'organisations: {org_count}')
self.timeit(self.make_users_me_request) | Tests the performance of the 'users/me' endpoint | api/users/tests/tests_performance.py | test_users_me_performance_by_organisation | code-review-doctor/lite-api | 3 | python | @parameterized.expand([(10, 0), (100, 0), (1000, 0)])
def test_users_me_performance_by_organisation(self, org_count, users):
"\n \n "
self.create_organisations_multiple_users(required_user=self.exporter_user, organisations=org_count, users_per_org=users)
print(f'organisations: {org_count}')
self.timeit(self.make_users_me_request) | @parameterized.expand([(10, 0), (100, 0), (1000, 0)])
def test_users_me_performance_by_organisation(self, org_count, users):
"\n \n "
self.create_organisations_multiple_users(required_user=self.exporter_user, organisations=org_count, users_per_org=users)
print(f'organisations: {org_count}')
self.timeit(self.make_users_me_request)<|docstring|>Tests the performance of the 'users/me' endpoint<|endoftext|> |
7c3e8832283c92e3af1164983eede6a7766dddaa10bcd07017b9d59c88d00a7c | @parameterized.expand([(10, 0), (100, 0), (1000, 0)])
def test_users_me_performance_by_sites(self, sites, users):
"\n Tests the performance of the 'users/me' endpoint\n "
self.create_multiple_sites_for_an_organisation(organisation=self.organisation, sites_count=sites)
print(f'sites: {sites}')
self.timeit(self.make_users_me_request) | Tests the performance of the 'users/me' endpoint | api/users/tests/tests_performance.py | test_users_me_performance_by_sites | code-review-doctor/lite-api | 3 | python | @parameterized.expand([(10, 0), (100, 0), (1000, 0)])
def test_users_me_performance_by_sites(self, sites, users):
"\n \n "
self.create_multiple_sites_for_an_organisation(organisation=self.organisation, sites_count=sites)
print(f'sites: {sites}')
self.timeit(self.make_users_me_request) | @parameterized.expand([(10, 0), (100, 0), (1000, 0)])
def test_users_me_performance_by_sites(self, sites, users):
"\n \n "
self.create_multiple_sites_for_an_organisation(organisation=self.organisation, sites_count=sites)
print(f'sites: {sites}')
self.timeit(self.make_users_me_request)<|docstring|>Tests the performance of the 'users/me' endpoint<|endoftext|> |
031c240b78b9bd87a79d369b06e4f995243bb136ef12ccb538896a8ba0808ce4 | @parameterized.expand([(1, 10), (1, 100), (1, 1000)])
def test_users_me_performance_by_users_per_site(self, sites, users):
"\n Tests the performance of the 'users/me' endpoint\n "
print(f'users: {users}')
self.create_multiple_sites_for_an_organisation(organisation=self.organisation, sites_count=1, users_per_site=users)
self.timeit(self.make_users_me_request) | Tests the performance of the 'users/me' endpoint | api/users/tests/tests_performance.py | test_users_me_performance_by_users_per_site | code-review-doctor/lite-api | 3 | python | @parameterized.expand([(1, 10), (1, 100), (1, 1000)])
def test_users_me_performance_by_users_per_site(self, sites, users):
"\n \n "
print(f'users: {users}')
self.create_multiple_sites_for_an_organisation(organisation=self.organisation, sites_count=1, users_per_site=users)
self.timeit(self.make_users_me_request) | @parameterized.expand([(1, 10), (1, 100), (1, 1000)])
def test_users_me_performance_by_users_per_site(self, sites, users):
"\n \n "
print(f'users: {users}')
self.create_multiple_sites_for_an_organisation(organisation=self.organisation, sites_count=1, users_per_site=users)
self.timeit(self.make_users_me_request)<|docstring|>Tests the performance of the 'users/me' endpoint<|endoftext|> |
86204316e7d80ec4ac38865f66a173dae719cb2373c211c02cfaf2b1524ceb80 | def train(X, Y, args):
'\n Train a model given the arguments, the dataset and \n the corresponding labels (ground-truth)\n\n Parameters\n ----------\n\n X : array\n features of the dataset\n Y : array\n corresponding labels\n args : dict\n arguments to prepare the model\n\n Returns\n -------\n\n model : object\n trained model\n\n '
if (args.model == 'svm'):
logging.info(f'Training SVM model...')
if (args.gridsearch == 'n'):
logging.info(f'Using predefined parameters.')
kernel = 'rbf'
gamma = 0.0001
C = 1000
svm_model = SVC(kernel=kernel, gamma=gamma, C=C)
svm_model.fit(X, Y)
return svm_model
elif (args.gridsearch == 'y'):
logging.info(f'Doing grid search, it may take a while...')
params_grid = [{'kernel': ['rbf'], 'gamma': [0.01, 0.001, 0.0001], 'C': [10, 100, 1000]}, {'kernel': ['linear'], 'C': [10, 100, 1000]}, {'kernel': ['poly'], 'gamma': [0.01, 0.001, 0.0001], 'degree': [3, 4, 5], 'C': [10, 100, 1000]}, {'kernel': ['sigmoid'], 'gamma': [0.01, 0.001, 0.0001], 'C': [10, 100, 1000]}]
svm_model = GridSearchCV(SVC(), params_grid, cv=3, verbose=10, n_jobs=(- 1))
svm_model.fit(X, Y)
logging.info(f'Using hyperparameters: {svm_model.best_params_}')
return svm_model
elif (args.model == 'rf'):
logging.info(f'Training RF model...')
if (args.gridsearch == 'n'):
logging.info(f'Using predefined parameters.')
n_estimators = 50
max_depth = 25
min_samples_split = 2
min_samples_leaf = 4
bootstrap = True
rf_model = RandomForestClassifier(max_depth=max_depth, n_estimators=n_estimators, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, bootstrap=bootstrap, random_state=42)
rf_model.fit(X, Y)
return rf_model
elif (args.gridsearch == 'y'):
logging.info(f'Doing grid search, it may take a while...')
n_estimators = [50, 75, 100]
max_depth = [10, 25, 50]
min_samples_split = [2, 4, 6]
min_samples_leaf = [1, 2, 4]
bootstrap = [True]
param_grid = {'n_estimators': n_estimators, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}
rf = RandomForestClassifier(random_state=42)
rf_model = GridSearchCV(estimator=rf, param_grid=param_grid, cv=3, verbose=10, n_jobs=(- 1))
rf_model.fit(X, Y)
logging.info(f'Using hyperparameters: {rf_model.best_params_}')
return rf_model | Train a model given the arguments, the dataset and
the corresponding labels (ground-truth)
Parameters
----------
X : array
features of the dataset
Y : array
corresponding labels
args : dict
arguments to prepare the model
Returns
-------
model : object
trained model | rrgp/algorithm.py | train | patrickaudriaz/mini-project | 4 | python | def train(X, Y, args):
'\n Train a model given the arguments, the dataset and \n the corresponding labels (ground-truth)\n\n Parameters\n ----------\n\n X : array\n features of the dataset\n Y : array\n corresponding labels\n args : dict\n arguments to prepare the model\n\n Returns\n -------\n\n model : object\n trained model\n\n '
if (args.model == 'svm'):
logging.info(f'Training SVM model...')
if (args.gridsearch == 'n'):
logging.info(f'Using predefined parameters.')
kernel = 'rbf'
gamma = 0.0001
C = 1000
svm_model = SVC(kernel=kernel, gamma=gamma, C=C)
svm_model.fit(X, Y)
return svm_model
elif (args.gridsearch == 'y'):
logging.info(f'Doing grid search, it may take a while...')
params_grid = [{'kernel': ['rbf'], 'gamma': [0.01, 0.001, 0.0001], 'C': [10, 100, 1000]}, {'kernel': ['linear'], 'C': [10, 100, 1000]}, {'kernel': ['poly'], 'gamma': [0.01, 0.001, 0.0001], 'degree': [3, 4, 5], 'C': [10, 100, 1000]}, {'kernel': ['sigmoid'], 'gamma': [0.01, 0.001, 0.0001], 'C': [10, 100, 1000]}]
svm_model = GridSearchCV(SVC(), params_grid, cv=3, verbose=10, n_jobs=(- 1))
svm_model.fit(X, Y)
logging.info(f'Using hyperparameters: {svm_model.best_params_}')
return svm_model
elif (args.model == 'rf'):
logging.info(f'Training RF model...')
if (args.gridsearch == 'n'):
logging.info(f'Using predefined parameters.')
n_estimators = 50
max_depth = 25
min_samples_split = 2
min_samples_leaf = 4
bootstrap = True
rf_model = RandomForestClassifier(max_depth=max_depth, n_estimators=n_estimators, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, bootstrap=bootstrap, random_state=42)
rf_model.fit(X, Y)
return rf_model
elif (args.gridsearch == 'y'):
logging.info(f'Doing grid search, it may take a while...')
n_estimators = [50, 75, 100]
max_depth = [10, 25, 50]
min_samples_split = [2, 4, 6]
min_samples_leaf = [1, 2, 4]
bootstrap = [True]
param_grid = {'n_estimators': n_estimators, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}
rf = RandomForestClassifier(random_state=42)
rf_model = GridSearchCV(estimator=rf, param_grid=param_grid, cv=3, verbose=10, n_jobs=(- 1))
rf_model.fit(X, Y)
logging.info(f'Using hyperparameters: {rf_model.best_params_}')
return rf_model | def train(X, Y, args):
'\n Train a model given the arguments, the dataset and \n the corresponding labels (ground-truth)\n\n Parameters\n ----------\n\n X : array\n features of the dataset\n Y : array\n corresponding labels\n args : dict\n arguments to prepare the model\n\n Returns\n -------\n\n model : object\n trained model\n\n '
if (args.model == 'svm'):
logging.info(f'Training SVM model...')
if (args.gridsearch == 'n'):
logging.info(f'Using predefined parameters.')
kernel = 'rbf'
gamma = 0.0001
C = 1000
svm_model = SVC(kernel=kernel, gamma=gamma, C=C)
svm_model.fit(X, Y)
return svm_model
elif (args.gridsearch == 'y'):
logging.info(f'Doing grid search, it may take a while...')
params_grid = [{'kernel': ['rbf'], 'gamma': [0.01, 0.001, 0.0001], 'C': [10, 100, 1000]}, {'kernel': ['linear'], 'C': [10, 100, 1000]}, {'kernel': ['poly'], 'gamma': [0.01, 0.001, 0.0001], 'degree': [3, 4, 5], 'C': [10, 100, 1000]}, {'kernel': ['sigmoid'], 'gamma': [0.01, 0.001, 0.0001], 'C': [10, 100, 1000]}]
svm_model = GridSearchCV(SVC(), params_grid, cv=3, verbose=10, n_jobs=(- 1))
svm_model.fit(X, Y)
logging.info(f'Using hyperparameters: {svm_model.best_params_}')
return svm_model
elif (args.model == 'rf'):
logging.info(f'Training RF model...')
if (args.gridsearch == 'n'):
logging.info(f'Using predefined parameters.')
n_estimators = 50
max_depth = 25
min_samples_split = 2
min_samples_leaf = 4
bootstrap = True
rf_model = RandomForestClassifier(max_depth=max_depth, n_estimators=n_estimators, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, bootstrap=bootstrap, random_state=42)
rf_model.fit(X, Y)
return rf_model
elif (args.gridsearch == 'y'):
logging.info(f'Doing grid search, it may take a while...')
n_estimators = [50, 75, 100]
max_depth = [10, 25, 50]
min_samples_split = [2, 4, 6]
min_samples_leaf = [1, 2, 4]
bootstrap = [True]
param_grid = {'n_estimators': n_estimators, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}
rf = RandomForestClassifier(random_state=42)
rf_model = GridSearchCV(estimator=rf, param_grid=param_grid, cv=3, verbose=10, n_jobs=(- 1))
rf_model.fit(X, Y)
logging.info(f'Using hyperparameters: {rf_model.best_params_}')
return rf_model<|docstring|>Train a model given the arguments, the dataset and
the corresponding labels (ground-truth)
Parameters
----------
X : array
features of the dataset
Y : array
corresponding labels
args : dict
arguments to prepare the model
Returns
-------
model : object
trained model<|endoftext|> |
829f9a1974c3f3c7cc794039b5df35a581e19f8953dcc5bff8dc07ffe3b9e24b | def predict(X, model):
'\n Predict labels given the features and the trained model\n\n Parameters\n ----------\n\n X : array\n features to predict on\n model : object\n trained model\n\n Returns\n -------\n\n predictions : array\n Array with the predicted labels\n\n '
Y_pred = model.predict(X)
return Y_pred | Predict labels given the features and the trained model
Parameters
----------
X : array
features to predict on
model : object
trained model
Returns
-------
predictions : array
Array with the predicted labels | rrgp/algorithm.py | predict | patrickaudriaz/mini-project | 4 | python | def predict(X, model):
'\n Predict labels given the features and the trained model\n\n Parameters\n ----------\n\n X : array\n features to predict on\n model : object\n trained model\n\n Returns\n -------\n\n predictions : array\n Array with the predicted labels\n\n '
Y_pred = model.predict(X)
return Y_pred | def predict(X, model):
'\n Predict labels given the features and the trained model\n\n Parameters\n ----------\n\n X : array\n features to predict on\n model : object\n trained model\n\n Returns\n -------\n\n predictions : array\n Array with the predicted labels\n\n '
Y_pred = model.predict(X)
return Y_pred<|docstring|>Predict labels given the features and the trained model
Parameters
----------
X : array
features to predict on
model : object
trained model
Returns
-------
predictions : array
Array with the predicted labels<|endoftext|> |
4cca8fb29c02e720f7cbf29bc423f83f5f0d0a8ad0ac4f123ea819e7c94489e1 | def uast2graphlets(self, uast):
"\n :param uast: The UAST root node.\n :generate: The nodes which compose the UAST.\n :class: 'Node' is used to access the nodes of the graphlets.\n "
root = self._extract_node(uast, None)
stack = [(root, uast)]
while stack:
(parent, parent_uast) = stack.pop()
children_nodes = [self._extract_node(child, parent) for child in parent_uast.children]
parent.children = children_nodes
stack.extend(zip(children_nodes, parent_uast.children))
(yield parent) | :param uast: The UAST root node.
:generate: The nodes which compose the UAST.
:class: 'Node' is used to access the nodes of the graphlets. | sourced/ml/algorithms/uast_inttypes_to_graphlets.py | uast2graphlets | vmarkovtsev/ml | 122 | python | def uast2graphlets(self, uast):
"\n :param uast: The UAST root node.\n :generate: The nodes which compose the UAST.\n :class: 'Node' is used to access the nodes of the graphlets.\n "
root = self._extract_node(uast, None)
stack = [(root, uast)]
while stack:
(parent, parent_uast) = stack.pop()
children_nodes = [self._extract_node(child, parent) for child in parent_uast.children]
parent.children = children_nodes
stack.extend(zip(children_nodes, parent_uast.children))
(yield parent) | def uast2graphlets(self, uast):
"\n :param uast: The UAST root node.\n :generate: The nodes which compose the UAST.\n :class: 'Node' is used to access the nodes of the graphlets.\n "
root = self._extract_node(uast, None)
stack = [(root, uast)]
while stack:
(parent, parent_uast) = stack.pop()
children_nodes = [self._extract_node(child, parent) for child in parent_uast.children]
parent.children = children_nodes
stack.extend(zip(children_nodes, parent_uast.children))
(yield parent)<|docstring|>:param uast: The UAST root node.
:generate: The nodes which compose the UAST.
:class: 'Node' is used to access the nodes of the graphlets.<|endoftext|> |
f53f03bbf12da5e39e0b3beb70aa69a218f3033ee79c673fcbb0b55e52e33b41 | def node2key(self, node):
"\n Builds the string joining internal types of all the nodes\n in the node's graphlet in the following order:\n parent_node_child1_child2_child3. The children are sorted by alphabetic order.\n str format is required for BagsExtractor.\n\n :param node: a node of UAST\n :return: The string key of node\n "
try:
parent_type = node.parent.internal_type
except AttributeError:
parent_type = None
key = [parent_type, node.internal_type]
key.extend(sorted((ch.internal_type for ch in node.children)))
return '_'.join(map(str, key)) | Builds the string joining internal types of all the nodes
in the node's graphlet in the following order:
parent_node_child1_child2_child3. The children are sorted by alphabetic order.
str format is required for BagsExtractor.
:param node: a node of UAST
:return: The string key of node | sourced/ml/algorithms/uast_inttypes_to_graphlets.py | node2key | vmarkovtsev/ml | 122 | python | def node2key(self, node):
"\n Builds the string joining internal types of all the nodes\n in the node's graphlet in the following order:\n parent_node_child1_child2_child3. The children are sorted by alphabetic order.\n str format is required for BagsExtractor.\n\n :param node: a node of UAST\n :return: The string key of node\n "
try:
parent_type = node.parent.internal_type
except AttributeError:
parent_type = None
key = [parent_type, node.internal_type]
key.extend(sorted((ch.internal_type for ch in node.children)))
return '_'.join(map(str, key)) | def node2key(self, node):
"\n Builds the string joining internal types of all the nodes\n in the node's graphlet in the following order:\n parent_node_child1_child2_child3. The children are sorted by alphabetic order.\n str format is required for BagsExtractor.\n\n :param node: a node of UAST\n :return: The string key of node\n "
try:
parent_type = node.parent.internal_type
except AttributeError:
parent_type = None
key = [parent_type, node.internal_type]
key.extend(sorted((ch.internal_type for ch in node.children)))
return '_'.join(map(str, key))<|docstring|>Builds the string joining internal types of all the nodes
in the node's graphlet in the following order:
parent_node_child1_child2_child3. The children are sorted by alphabetic order.
str format is required for BagsExtractor.
:param node: a node of UAST
:return: The string key of node<|endoftext|> |
9da5bb8e3ad9cc1f726071765de4103f9e5e86611c6ee5bc9377a608305056dc | def __call__(self, uast):
'\n Converts a UAST to a weighed bag of graphlets. The weights are graphlets frequencies.\n :param uast: The UAST root node.\n :return: bag of graphlets.\n '
bag = defaultdict(int)
for node in self.uast2graphlets(uast):
bag[self.node2key(node)] += 1
return bag | Converts a UAST to a weighed bag of graphlets. The weights are graphlets frequencies.
:param uast: The UAST root node.
:return: bag of graphlets. | sourced/ml/algorithms/uast_inttypes_to_graphlets.py | __call__ | vmarkovtsev/ml | 122 | python | def __call__(self, uast):
'\n Converts a UAST to a weighed bag of graphlets. The weights are graphlets frequencies.\n :param uast: The UAST root node.\n :return: bag of graphlets.\n '
bag = defaultdict(int)
for node in self.uast2graphlets(uast):
bag[self.node2key(node)] += 1
return bag | def __call__(self, uast):
'\n Converts a UAST to a weighed bag of graphlets. The weights are graphlets frequencies.\n :param uast: The UAST root node.\n :return: bag of graphlets.\n '
bag = defaultdict(int)
for node in self.uast2graphlets(uast):
bag[self.node2key(node)] += 1
return bag<|docstring|>Converts a UAST to a weighed bag of graphlets. The weights are graphlets frequencies.
:param uast: The UAST root node.
:return: bag of graphlets.<|endoftext|> |
9e722c97904581dec05898eb08559e4f69a4a81befa564a2272d3e7ea067a995 | def scatterplot(self, func=None, xlabel='[Compound] (nM)', ylabel='Anisotropy', palette='viridis_r', baseline_correction=True, invert=False, *args, **kargs):
'Plot and Curve Fit data on a log[x] axis.'
if (self.df_main is None):
self._load_data()
self._prep_data_for_plotting()
if (self.df_plot_ready is None):
self._prep_data_for_plotting()
if (func is None):
func = self.method
count = 0
compounds = np.unique(self.df_plot_ready['COMPOUND'])
df_list = []
colors = sns.color_palette(palette, self.n_compounds)
'\n other nice color palettes I like...\n > rainbow\n > ocean_r\n > ocean\n > viridis_r\n '
for c in compounds:
df = self.df_plot_ready[((self.df_plot_ready['COMPOUND'] == c) & (~ np.isnan(self.df_plot_ready['value'])))].copy()
if baseline_correction:
baseline = df.loc[((df['CONCENTRATION'] == 0), 'value')].mean()
df['value_corrected'] = (df['value'] - baseline)
else:
df['value_corrected'] = df['value']
df = df[(df['CONCENTRATION'] != 0)]
if (self.top and baseline_correction):
df['value_normalized'] = ((df['value_corrected'] * 100) / (self.top - baseline))
elif self.top:
df['value_normalized'] = ((df['value_corrected'] * 100) / self.top)
else:
df['value_normalized'] = df['value_corrected']
if (invert and self.top):
df['value_normalized'] = (100 - df['value_normalized'])
elif invert:
max_value = df['value_normalized'].max()
df['value_normalized'] = (100 - ((df['value_normalized'] * 100) / max_value))
else:
pass
df_list.append(df)
(popt, popv) = curve_fit(func, *args, method='trf', xdata=df['CONCENTRATION'], ydata=df['value_normalized'], xtol=1e-12, ftol=1e-12, gtol=1e-12, **kargs)
degrees_of_freedom = max(0, (len(df) - len(popt)))
t_value = t.ppf(0.975, degrees_of_freedom)
l_ci = []
for (val, var) in zip(popt, np.diag(popv)):
sigma = (var ** 0.5)
ci = ((val - (sigma * t_value)), (val + (sigma * t_value)))
l_ci.append(ci)
self.fit_parameters[c] = [*popt, *l_ci]
xdata = np.linspace(start=df['CONCENTRATION'].min(), stop=df['CONCENTRATION'].max(), num=int(df['CONCENTRATION'].max()), endpoint=True)
plt.plot(func(xdata, *popt), ':', label=c, color=colors[count])
count += 1
df_concat = pd.concat(df_list, axis=0)
self.df_plot_ready = self.df_plot_ready.merge(df_concat, on=['COMPOUND', 'CONCENTRATION', 'variable', 'value'], how='left')
cols = list(getfullargspec(func))[0][1:]
columns = [*cols, *[f'{i}_CI' for i in cols]]
self.fit_parameters = pd.DataFrame.from_dict(self.fit_parameters, orient='index', columns=columns)
self.plot = sns.scatterplot(data=self.df_plot_ready, hue=self.df_plot_ready['COMPOUND'], x=self.df_plot_ready['CONCENTRATION'], y=self.df_plot_ready['value_normalized'], palette=colors)
self.plot.set(xscale='log', xlabel=xlabel, ylabel=ylabel)
return self.plot | Plot and Curve Fit data on a log[x] axis. | DoseResponse/dose_response_curve.py | scatterplot | Spill-Tea/Rio | 0 | python | def scatterplot(self, func=None, xlabel='[Compound] (nM)', ylabel='Anisotropy', palette='viridis_r', baseline_correction=True, invert=False, *args, **kargs):
if (self.df_main is None):
self._load_data()
self._prep_data_for_plotting()
if (self.df_plot_ready is None):
self._prep_data_for_plotting()
if (func is None):
func = self.method
count = 0
compounds = np.unique(self.df_plot_ready['COMPOUND'])
df_list = []
colors = sns.color_palette(palette, self.n_compounds)
'\n other nice color palettes I like...\n > rainbow\n > ocean_r\n > ocean\n > viridis_r\n '
for c in compounds:
df = self.df_plot_ready[((self.df_plot_ready['COMPOUND'] == c) & (~ np.isnan(self.df_plot_ready['value'])))].copy()
if baseline_correction:
baseline = df.loc[((df['CONCENTRATION'] == 0), 'value')].mean()
df['value_corrected'] = (df['value'] - baseline)
else:
df['value_corrected'] = df['value']
df = df[(df['CONCENTRATION'] != 0)]
if (self.top and baseline_correction):
df['value_normalized'] = ((df['value_corrected'] * 100) / (self.top - baseline))
elif self.top:
df['value_normalized'] = ((df['value_corrected'] * 100) / self.top)
else:
df['value_normalized'] = df['value_corrected']
if (invert and self.top):
df['value_normalized'] = (100 - df['value_normalized'])
elif invert:
max_value = df['value_normalized'].max()
df['value_normalized'] = (100 - ((df['value_normalized'] * 100) / max_value))
else:
pass
df_list.append(df)
(popt, popv) = curve_fit(func, *args, method='trf', xdata=df['CONCENTRATION'], ydata=df['value_normalized'], xtol=1e-12, ftol=1e-12, gtol=1e-12, **kargs)
degrees_of_freedom = max(0, (len(df) - len(popt)))
t_value = t.ppf(0.975, degrees_of_freedom)
l_ci = []
for (val, var) in zip(popt, np.diag(popv)):
sigma = (var ** 0.5)
ci = ((val - (sigma * t_value)), (val + (sigma * t_value)))
l_ci.append(ci)
self.fit_parameters[c] = [*popt, *l_ci]
xdata = np.linspace(start=df['CONCENTRATION'].min(), stop=df['CONCENTRATION'].max(), num=int(df['CONCENTRATION'].max()), endpoint=True)
plt.plot(func(xdata, *popt), ':', label=c, color=colors[count])
count += 1
df_concat = pd.concat(df_list, axis=0)
self.df_plot_ready = self.df_plot_ready.merge(df_concat, on=['COMPOUND', 'CONCENTRATION', 'variable', 'value'], how='left')
cols = list(getfullargspec(func))[0][1:]
columns = [*cols, *[f'{i}_CI' for i in cols]]
self.fit_parameters = pd.DataFrame.from_dict(self.fit_parameters, orient='index', columns=columns)
self.plot = sns.scatterplot(data=self.df_plot_ready, hue=self.df_plot_ready['COMPOUND'], x=self.df_plot_ready['CONCENTRATION'], y=self.df_plot_ready['value_normalized'], palette=colors)
self.plot.set(xscale='log', xlabel=xlabel, ylabel=ylabel)
return self.plot | def scatterplot(self, func=None, xlabel='[Compound] (nM)', ylabel='Anisotropy', palette='viridis_r', baseline_correction=True, invert=False, *args, **kargs):
if (self.df_main is None):
self._load_data()
self._prep_data_for_plotting()
if (self.df_plot_ready is None):
self._prep_data_for_plotting()
if (func is None):
func = self.method
count = 0
compounds = np.unique(self.df_plot_ready['COMPOUND'])
df_list = []
colors = sns.color_palette(palette, self.n_compounds)
'\n other nice color palettes I like...\n > rainbow\n > ocean_r\n > ocean\n > viridis_r\n '
for c in compounds:
df = self.df_plot_ready[((self.df_plot_ready['COMPOUND'] == c) & (~ np.isnan(self.df_plot_ready['value'])))].copy()
if baseline_correction:
baseline = df.loc[((df['CONCENTRATION'] == 0), 'value')].mean()
df['value_corrected'] = (df['value'] - baseline)
else:
df['value_corrected'] = df['value']
df = df[(df['CONCENTRATION'] != 0)]
if (self.top and baseline_correction):
df['value_normalized'] = ((df['value_corrected'] * 100) / (self.top - baseline))
elif self.top:
df['value_normalized'] = ((df['value_corrected'] * 100) / self.top)
else:
df['value_normalized'] = df['value_corrected']
if (invert and self.top):
df['value_normalized'] = (100 - df['value_normalized'])
elif invert:
max_value = df['value_normalized'].max()
df['value_normalized'] = (100 - ((df['value_normalized'] * 100) / max_value))
else:
pass
df_list.append(df)
(popt, popv) = curve_fit(func, *args, method='trf', xdata=df['CONCENTRATION'], ydata=df['value_normalized'], xtol=1e-12, ftol=1e-12, gtol=1e-12, **kargs)
degrees_of_freedom = max(0, (len(df) - len(popt)))
t_value = t.ppf(0.975, degrees_of_freedom)
l_ci = []
for (val, var) in zip(popt, np.diag(popv)):
sigma = (var ** 0.5)
ci = ((val - (sigma * t_value)), (val + (sigma * t_value)))
l_ci.append(ci)
self.fit_parameters[c] = [*popt, *l_ci]
xdata = np.linspace(start=df['CONCENTRATION'].min(), stop=df['CONCENTRATION'].max(), num=int(df['CONCENTRATION'].max()), endpoint=True)
plt.plot(func(xdata, *popt), ':', label=c, color=colors[count])
count += 1
df_concat = pd.concat(df_list, axis=0)
self.df_plot_ready = self.df_plot_ready.merge(df_concat, on=['COMPOUND', 'CONCENTRATION', 'variable', 'value'], how='left')
cols = list(getfullargspec(func))[0][1:]
columns = [*cols, *[f'{i}_CI' for i in cols]]
self.fit_parameters = pd.DataFrame.from_dict(self.fit_parameters, orient='index', columns=columns)
self.plot = sns.scatterplot(data=self.df_plot_ready, hue=self.df_plot_ready['COMPOUND'], x=self.df_plot_ready['CONCENTRATION'], y=self.df_plot_ready['value_normalized'], palette=colors)
self.plot.set(xscale='log', xlabel=xlabel, ylabel=ylabel)
return self.plot<|docstring|>Plot and Curve Fit data on a log[x] axis.<|endoftext|> |
f9d265a133c1b72a353e29a6107a23f9a53bedd8886744df5b8390e4954f2005 | def data_summary(self):
'This function summarizes the raw Data.'
if (self.df_main is None):
self._load_data()
self.df_summary = self.df_main.copy()
self.df_summary['N'] = self.df_main.count(axis=1)
self.df_summary['MEAN'] = self.df_main.mean(axis=1)
self.df_summary['SD'] = self.df_main.std(axis=1) | This function summarizes the raw Data. | DoseResponse/dose_response_curve.py | data_summary | Spill-Tea/Rio | 0 | python | def data_summary(self):
if (self.df_main is None):
self._load_data()
self.df_summary = self.df_main.copy()
self.df_summary['N'] = self.df_main.count(axis=1)
self.df_summary['MEAN'] = self.df_main.mean(axis=1)
self.df_summary['SD'] = self.df_main.std(axis=1) | def data_summary(self):
if (self.df_main is None):
self._load_data()
self.df_summary = self.df_main.copy()
self.df_summary['N'] = self.df_main.count(axis=1)
self.df_summary['MEAN'] = self.df_main.mean(axis=1)
self.df_summary['SD'] = self.df_main.std(axis=1)<|docstring|>This function summarizes the raw Data.<|endoftext|> |
0af176e0250f06bd7bead4a4b96d1c52ce9d38e6eceb6f794ccc6ba49bfb4c36 | def _load_data(self):
'Helper Function to Load data from a file.'
self.df_main = pd.read_csv(self.datafile, header=[0, 1], sep='\t').T
self.n_replicates = len(self.df_main.columns) | Helper Function to Load data from a file. | DoseResponse/dose_response_curve.py | _load_data | Spill-Tea/Rio | 0 | python | def _load_data(self):
self.df_main = pd.read_csv(self.datafile, header=[0, 1], sep='\t').T
self.n_replicates = len(self.df_main.columns) | def _load_data(self):
self.df_main = pd.read_csv(self.datafile, header=[0, 1], sep='\t').T
self.n_replicates = len(self.df_main.columns)<|docstring|>Helper Function to Load data from a file.<|endoftext|> |
194d353cd3dc9c218cf545ea0d95fb0f2ddeacc96c06232450c28b1a7c1de39d | def pad_image(img):
'\n Pad image with 0s to make it square\n :param image: HxWx3 numpy array\n :return: AxAx3 numpy array (square image)\n '
(height, width, _) = img.shape
if (width < height):
border_width = ((height - width) // 2)
padded = cv2.copyMakeBorder(img, 0, 0, border_width, border_width, cv2.BORDER_CONSTANT, value=0)
else:
border_width = ((width - height) // 2)
padded = cv2.copyMakeBorder(img, border_width, border_width, 0, 0, cv2.BORDER_CONSTANT, value=0)
return padded | Pad image with 0s to make it square
:param image: HxWx3 numpy array
:return: AxAx3 numpy array (square image) | bodypart_segmentation_predict.py | pad_image | akashsengupta1997/segmentation_models | 0 | python | def pad_image(img):
'\n Pad image with 0s to make it square\n :param image: HxWx3 numpy array\n :return: AxAx3 numpy array (square image)\n '
(height, width, _) = img.shape
if (width < height):
border_width = ((height - width) // 2)
padded = cv2.copyMakeBorder(img, 0, 0, border_width, border_width, cv2.BORDER_CONSTANT, value=0)
else:
border_width = ((width - height) // 2)
padded = cv2.copyMakeBorder(img, border_width, border_width, 0, 0, cv2.BORDER_CONSTANT, value=0)
return padded | def pad_image(img):
'\n Pad image with 0s to make it square\n :param image: HxWx3 numpy array\n :return: AxAx3 numpy array (square image)\n '
(height, width, _) = img.shape
if (width < height):
border_width = ((height - width) // 2)
padded = cv2.copyMakeBorder(img, 0, 0, border_width, border_width, cv2.BORDER_CONSTANT, value=0)
else:
border_width = ((width - height) // 2)
padded = cv2.copyMakeBorder(img, border_width, border_width, 0, 0, cv2.BORDER_CONSTANT, value=0)
return padded<|docstring|>Pad image with 0s to make it square
:param image: HxWx3 numpy array
:return: AxAx3 numpy array (square image)<|endoftext|> |
0e5db6dcf0e6636c5fc48adb6f15123020a73a91b5b9774ab21b1ed0f01fb8f4 | def readWrite(self):
'\n Step through the structure of a PWDINT file and read/write it.\n\n Logic to control which records will be present is here, which\n comes directly off the File specification.\n '
self._rwFileID()
self._rw1DRecord()
self._rw2DRecord() | Step through the structure of a PWDINT file and read/write it.
Logic to control which records will be present is here, which
comes directly off the File specification. | armi/nuclearDataIO/cccc/pwdint.py | readWrite | DennisYelizarov/armi | 162 | python | def readWrite(self):
'\n Step through the structure of a PWDINT file and read/write it.\n\n Logic to control which records will be present is here, which\n comes directly off the File specification.\n '
self._rwFileID()
self._rw1DRecord()
self._rw2DRecord() | def readWrite(self):
'\n Step through the structure of a PWDINT file and read/write it.\n\n Logic to control which records will be present is here, which\n comes directly off the File specification.\n '
self._rwFileID()
self._rw1DRecord()
self._rw2DRecord()<|docstring|>Step through the structure of a PWDINT file and read/write it.
Logic to control which records will be present is here, which
comes directly off the File specification.<|endoftext|> |
c4ecf571c30ff697e388b970e0af6cf0f20c739b280bfc9e542b23b0c2d589e6 | def _rw1DRecord(self):
'\n Read/write File specifications on 1D record.\n '
with self.createRecord() as record:
self._metadata.update(record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata)) | Read/write File specifications on 1D record. | armi/nuclearDataIO/cccc/pwdint.py | _rw1DRecord | DennisYelizarov/armi | 162 | python | def _rw1DRecord(self):
'\n \n '
with self.createRecord() as record:
self._metadata.update(record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata)) | def _rw1DRecord(self):
'\n \n '
with self.createRecord() as record:
self._metadata.update(record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata))<|docstring|>Read/write File specifications on 1D record.<|endoftext|> |
49e004df0a5ad58b4afe12172f7b351af6583783aaece9cd91ce094ad0702d9d | def _rw2DRecord(self):
'Read/write power density by mesh point.'
imax = self._metadata['NINTI']
jmax = self._metadata['NINTJ']
kmax = self._metadata['NINTK']
nblck = self._metadata['NBLOK']
if (self._data.powerDensity.size == 0):
self._data.powerDensity = numpy.zeros((imax, jmax, kmax), dtype=numpy.float32)
for ki in range(kmax):
for bi in range(nblck):
(jL, jU) = cccc.getBlockBandwidth((bi + 1), jmax, nblck)
with self.createRecord() as record:
self._data.powerDensity[(:, jL:(jU + 1), ki)] = record.rwMatrix(self._data.powerDensity[(:, jL:(jU + 1), ki)], ((jU - jL) + 1), imax) | Read/write power density by mesh point. | armi/nuclearDataIO/cccc/pwdint.py | _rw2DRecord | DennisYelizarov/armi | 162 | python | def _rw2DRecord(self):
imax = self._metadata['NINTI']
jmax = self._metadata['NINTJ']
kmax = self._metadata['NINTK']
nblck = self._metadata['NBLOK']
if (self._data.powerDensity.size == 0):
self._data.powerDensity = numpy.zeros((imax, jmax, kmax), dtype=numpy.float32)
for ki in range(kmax):
for bi in range(nblck):
(jL, jU) = cccc.getBlockBandwidth((bi + 1), jmax, nblck)
with self.createRecord() as record:
self._data.powerDensity[(:, jL:(jU + 1), ki)] = record.rwMatrix(self._data.powerDensity[(:, jL:(jU + 1), ki)], ((jU - jL) + 1), imax) | def _rw2DRecord(self):
imax = self._metadata['NINTI']
jmax = self._metadata['NINTJ']
kmax = self._metadata['NINTK']
nblck = self._metadata['NBLOK']
if (self._data.powerDensity.size == 0):
self._data.powerDensity = numpy.zeros((imax, jmax, kmax), dtype=numpy.float32)
for ki in range(kmax):
for bi in range(nblck):
(jL, jU) = cccc.getBlockBandwidth((bi + 1), jmax, nblck)
with self.createRecord() as record:
self._data.powerDensity[(:, jL:(jU + 1), ki)] = record.rwMatrix(self._data.powerDensity[(:, jL:(jU + 1), ki)], ((jU - jL) + 1), imax)<|docstring|>Read/write power density by mesh point.<|endoftext|> |
9421d2986aef6901d9007624fa79ff6376e81a930b60c448c4f9ac10262c9829 | def load_traces_optimally(roi_data_handle, roi_ns=None, frame_ns=None, rois_first=True):
'\n load_traces_optimally(roi_data_handle)\n\n Updates indices, possibly reordered, for optimal loading of ROI traces.\n\n Optional args:\n - roi_ns (int or array-like) : ROIs to load (None for all)\n default: None\n - frame_ns (int or array-like): frames to load (None for all) \n default: None\n - rois_first (bool) : if True, ROIs are stored as \n ROIs x frames, else frames x ROIs\n default: True\n\n Returns:\n - roi_traces (1 or 2D array): ROI traces (ROI x frame)\n '
if ((roi_ns is None) and (frame_ns is None)):
roi_traces = roi_data_handle[()]
elif isinstance(roi_ns, int):
if rois_first:
roi_traces = roi_data_handle[roi_ns]
else:
roi_traces = roi_data_handle[(:, roi_ns)]
if (frame_ns is not None):
roi_traces = roi_traces[frame_ns]
elif isinstance(frame_ns, int):
if rois_first:
roi_traces = roi_data_handle[(:, frame_ns)]
else:
roi_traces = roi_data_handle[frame_ns]
if (roi_ns is not None):
roi_traces = roi_traces[roi_ns]
elif ((frame_ns is not None) and (len(np.unique(frame_ns)) == len(frame_ns))):
if (roi_ns is None):
roi_ns = slice(None, None, None)
frame_ns = np.asarray(frame_ns)
resort = None
if (np.sort(frame_ns) != frame_ns).any():
resort = np.argsort(np.argsort(frame_ns))
frame_ns = np.sort(frame_ns)
if rois_first:
roi_traces = roi_data_handle[(:, frame_ns)][roi_ns]
if (resort is not None):
roi_traces = roi_traces[(:, resort)]
else:
roi_traces = roi_data_handle[frame_ns][(..., roi_ns)]
if (resort is not None):
roi_traces = roi_traces[resort]
elif ((roi_ns is not None) and (len(np.unique(roi_ns)) == len(roi_ns))):
if (frame_ns is None):
frame_ns = slice(None, None, None)
roi_ns = np.asarray(roi_ns)
resort = None
if (np.sort(roi_ns) != roi_ns).any():
resort = np.argsort(np.argsort(roi_ns))
roi_ns = np.sort(roi_ns)
if rois_first:
roi_traces = roi_data_handle[roi_ns][(:, frame_ns)]
if (resort is not None):
roi_traces = roi_traces[resort]
else:
roi_traces = roi_data_handle[(:, roi_ns)][frame_ns]
if (resort is not None):
roi_traces = roi_traces[(:, resort)]
else:
if (roi_ns is None):
roi_ns = slice(None, None, None)
if (frame_ns is None):
frame_ns = slice(None, None, None)
if rois_first:
roi_traces = roi_data_handle[()][roi_ns][(:, frame_ns)]
else:
roi_traces = roi_data_handle[()][(:, roi_ns)][frame_ns]
if (not rois_first):
roi_traces = roi_traces.T
return roi_traces | load_traces_optimally(roi_data_handle)
Updates indices, possibly reordered, for optimal loading of ROI traces.
Optional args:
- roi_ns (int or array-like) : ROIs to load (None for all)
default: None
- frame_ns (int or array-like): frames to load (None for all)
default: None
- rois_first (bool) : if True, ROIs are stored as
ROIs x frames, else frames x ROIs
default: True
Returns:
- roi_traces (1 or 2D array): ROI traces (ROI x frame) | sess_util/sess_trace_util.py | load_traces_optimally | AllenInstitute/OpenScope_CA_Analysis | 0 | python | def load_traces_optimally(roi_data_handle, roi_ns=None, frame_ns=None, rois_first=True):
'\n load_traces_optimally(roi_data_handle)\n\n Updates indices, possibly reordered, for optimal loading of ROI traces.\n\n Optional args:\n - roi_ns (int or array-like) : ROIs to load (None for all)\n default: None\n - frame_ns (int or array-like): frames to load (None for all) \n default: None\n - rois_first (bool) : if True, ROIs are stored as \n ROIs x frames, else frames x ROIs\n default: True\n\n Returns:\n - roi_traces (1 or 2D array): ROI traces (ROI x frame)\n '
if ((roi_ns is None) and (frame_ns is None)):
roi_traces = roi_data_handle[()]
elif isinstance(roi_ns, int):
if rois_first:
roi_traces = roi_data_handle[roi_ns]
else:
roi_traces = roi_data_handle[(:, roi_ns)]
if (frame_ns is not None):
roi_traces = roi_traces[frame_ns]
elif isinstance(frame_ns, int):
if rois_first:
roi_traces = roi_data_handle[(:, frame_ns)]
else:
roi_traces = roi_data_handle[frame_ns]
if (roi_ns is not None):
roi_traces = roi_traces[roi_ns]
elif ((frame_ns is not None) and (len(np.unique(frame_ns)) == len(frame_ns))):
if (roi_ns is None):
roi_ns = slice(None, None, None)
frame_ns = np.asarray(frame_ns)
resort = None
if (np.sort(frame_ns) != frame_ns).any():
resort = np.argsort(np.argsort(frame_ns))
frame_ns = np.sort(frame_ns)
if rois_first:
roi_traces = roi_data_handle[(:, frame_ns)][roi_ns]
if (resort is not None):
roi_traces = roi_traces[(:, resort)]
else:
roi_traces = roi_data_handle[frame_ns][(..., roi_ns)]
if (resort is not None):
roi_traces = roi_traces[resort]
elif ((roi_ns is not None) and (len(np.unique(roi_ns)) == len(roi_ns))):
if (frame_ns is None):
frame_ns = slice(None, None, None)
roi_ns = np.asarray(roi_ns)
resort = None
if (np.sort(roi_ns) != roi_ns).any():
resort = np.argsort(np.argsort(roi_ns))
roi_ns = np.sort(roi_ns)
if rois_first:
roi_traces = roi_data_handle[roi_ns][(:, frame_ns)]
if (resort is not None):
roi_traces = roi_traces[resort]
else:
roi_traces = roi_data_handle[(:, roi_ns)][frame_ns]
if (resort is not None):
roi_traces = roi_traces[(:, resort)]
else:
if (roi_ns is None):
roi_ns = slice(None, None, None)
if (frame_ns is None):
frame_ns = slice(None, None, None)
if rois_first:
roi_traces = roi_data_handle[()][roi_ns][(:, frame_ns)]
else:
roi_traces = roi_data_handle[()][(:, roi_ns)][frame_ns]
if (not rois_first):
roi_traces = roi_traces.T
return roi_traces | def load_traces_optimally(roi_data_handle, roi_ns=None, frame_ns=None, rois_first=True):
'\n load_traces_optimally(roi_data_handle)\n\n Updates indices, possibly reordered, for optimal loading of ROI traces.\n\n Optional args:\n - roi_ns (int or array-like) : ROIs to load (None for all)\n default: None\n - frame_ns (int or array-like): frames to load (None for all) \n default: None\n - rois_first (bool) : if True, ROIs are stored as \n ROIs x frames, else frames x ROIs\n default: True\n\n Returns:\n - roi_traces (1 or 2D array): ROI traces (ROI x frame)\n '
if ((roi_ns is None) and (frame_ns is None)):
roi_traces = roi_data_handle[()]
elif isinstance(roi_ns, int):
if rois_first:
roi_traces = roi_data_handle[roi_ns]
else:
roi_traces = roi_data_handle[(:, roi_ns)]
if (frame_ns is not None):
roi_traces = roi_traces[frame_ns]
elif isinstance(frame_ns, int):
if rois_first:
roi_traces = roi_data_handle[(:, frame_ns)]
else:
roi_traces = roi_data_handle[frame_ns]
if (roi_ns is not None):
roi_traces = roi_traces[roi_ns]
elif ((frame_ns is not None) and (len(np.unique(frame_ns)) == len(frame_ns))):
if (roi_ns is None):
roi_ns = slice(None, None, None)
frame_ns = np.asarray(frame_ns)
resort = None
if (np.sort(frame_ns) != frame_ns).any():
resort = np.argsort(np.argsort(frame_ns))
frame_ns = np.sort(frame_ns)
if rois_first:
roi_traces = roi_data_handle[(:, frame_ns)][roi_ns]
if (resort is not None):
roi_traces = roi_traces[(:, resort)]
else:
roi_traces = roi_data_handle[frame_ns][(..., roi_ns)]
if (resort is not None):
roi_traces = roi_traces[resort]
elif ((roi_ns is not None) and (len(np.unique(roi_ns)) == len(roi_ns))):
if (frame_ns is None):
frame_ns = slice(None, None, None)
roi_ns = np.asarray(roi_ns)
resort = None
if (np.sort(roi_ns) != roi_ns).any():
resort = np.argsort(np.argsort(roi_ns))
roi_ns = np.sort(roi_ns)
if rois_first:
roi_traces = roi_data_handle[roi_ns][(:, frame_ns)]
if (resort is not None):
roi_traces = roi_traces[resort]
else:
roi_traces = roi_data_handle[(:, roi_ns)][frame_ns]
if (resort is not None):
roi_traces = roi_traces[(:, resort)]
else:
if (roi_ns is None):
roi_ns = slice(None, None, None)
if (frame_ns is None):
frame_ns = slice(None, None, None)
if rois_first:
roi_traces = roi_data_handle[()][roi_ns][(:, frame_ns)]
else:
roi_traces = roi_data_handle[()][(:, roi_ns)][frame_ns]
if (not rois_first):
roi_traces = roi_traces.T
return roi_traces<|docstring|>load_traces_optimally(roi_data_handle)
Updates indices, possibly reordered, for optimal loading of ROI traces.
Optional args:
- roi_ns (int or array-like) : ROIs to load (None for all)
default: None
- frame_ns (int or array-like): frames to load (None for all)
default: None
- rois_first (bool) : if True, ROIs are stored as
ROIs x frames, else frames x ROIs
default: True
Returns:
- roi_traces (1 or 2D array): ROI traces (ROI x frame)<|endoftext|> |
cd68a37138f9a2cb8d98a74b9ebad72d6a81070036368d4d51ef19894200f86a | def load_roi_traces_nwb(sess_files, roi_ns=None, frame_ns=None):
'\n load_roi_traces_nwb(sess_files)\n\n Returns ROI traces from NWB files (stored as frames x ROIs). \n\n Required args:\n - sess_files (list): full path names of the session files\n\n Optional args:\n - roi_ns (int or array-like) : ROIs to load (None for all)\n default: None\n - frame_ns (int or array-like): frames to load (None for all) \n default: None\n\n Returns:\n - roi_traces (1 or 2D array): ROI traces (ROI x frame)\n '
ophys_file = sess_file_util.select_nwb_sess_path(sess_files, ophys=True)
with pynwb.NWBHDF5IO(str(ophys_file), 'r') as f:
nwbfile_in = f.read()
ophys_module = nwbfile_in.get_processing_module('ophys')
main_field = 'DfOverF'
data_field = 'RoiResponseSeries'
try:
roi_resp_series = ophys_module.get_data_interface(main_field).get_roi_response_series(data_field)
except KeyError as err:
raise KeyError(f'Could not find ROI response series data in image segmentation for {ophys_file} due to: {err}')
roi_data_handle = roi_resp_series.data
roi_traces = load_traces_optimally(roi_data_handle, roi_ns=roi_ns, frame_ns=frame_ns, rois_first=False)
return roi_traces | load_roi_traces_nwb(sess_files)
Returns ROI traces from NWB files (stored as frames x ROIs).
Required args:
- sess_files (list): full path names of the session files
Optional args:
- roi_ns (int or array-like) : ROIs to load (None for all)
default: None
- frame_ns (int or array-like): frames to load (None for all)
default: None
Returns:
- roi_traces (1 or 2D array): ROI traces (ROI x frame) | sess_util/sess_trace_util.py | load_roi_traces_nwb | AllenInstitute/OpenScope_CA_Analysis | 0 | python | def load_roi_traces_nwb(sess_files, roi_ns=None, frame_ns=None):
'\n load_roi_traces_nwb(sess_files)\n\n Returns ROI traces from NWB files (stored as frames x ROIs). \n\n Required args:\n - sess_files (list): full path names of the session files\n\n Optional args:\n - roi_ns (int or array-like) : ROIs to load (None for all)\n default: None\n - frame_ns (int or array-like): frames to load (None for all) \n default: None\n\n Returns:\n - roi_traces (1 or 2D array): ROI traces (ROI x frame)\n '
ophys_file = sess_file_util.select_nwb_sess_path(sess_files, ophys=True)
with pynwb.NWBHDF5IO(str(ophys_file), 'r') as f:
nwbfile_in = f.read()
ophys_module = nwbfile_in.get_processing_module('ophys')
main_field = 'DfOverF'
data_field = 'RoiResponseSeries'
try:
roi_resp_series = ophys_module.get_data_interface(main_field).get_roi_response_series(data_field)
except KeyError as err:
raise KeyError(f'Could not find ROI response series data in image segmentation for {ophys_file} due to: {err}')
roi_data_handle = roi_resp_series.data
roi_traces = load_traces_optimally(roi_data_handle, roi_ns=roi_ns, frame_ns=frame_ns, rois_first=False)
return roi_traces | def load_roi_traces_nwb(sess_files, roi_ns=None, frame_ns=None):
'\n load_roi_traces_nwb(sess_files)\n\n Returns ROI traces from NWB files (stored as frames x ROIs). \n\n Required args:\n - sess_files (list): full path names of the session files\n\n Optional args:\n - roi_ns (int or array-like) : ROIs to load (None for all)\n default: None\n - frame_ns (int or array-like): frames to load (None for all) \n default: None\n\n Returns:\n - roi_traces (1 or 2D array): ROI traces (ROI x frame)\n '
ophys_file = sess_file_util.select_nwb_sess_path(sess_files, ophys=True)
with pynwb.NWBHDF5IO(str(ophys_file), 'r') as f:
nwbfile_in = f.read()
ophys_module = nwbfile_in.get_processing_module('ophys')
main_field = 'DfOverF'
data_field = 'RoiResponseSeries'
try:
roi_resp_series = ophys_module.get_data_interface(main_field).get_roi_response_series(data_field)
except KeyError as err:
raise KeyError(f'Could not find ROI response series data in image segmentation for {ophys_file} due to: {err}')
roi_data_handle = roi_resp_series.data
roi_traces = load_traces_optimally(roi_data_handle, roi_ns=roi_ns, frame_ns=frame_ns, rois_first=False)
return roi_traces<|docstring|>load_roi_traces_nwb(sess_files)
Returns ROI traces from NWB files (stored as frames x ROIs).
Required args:
- sess_files (list): full path names of the session files
Optional args:
- roi_ns (int or array-like) : ROIs to load (None for all)
default: None
- frame_ns (int or array-like): frames to load (None for all)
default: None
Returns:
- roi_traces (1 or 2D array): ROI traces (ROI x frame)<|endoftext|> |
a87cc4c9b62237682cc5b81b0c5416801306e6bb9c074cc72e36d73068601643 | def load_roi_traces(roi_trace_path, roi_ns=None, frame_ns=None):
'\n load_roi_traces(roi_trace_path)\n\n Returns ROI traces from ROI data file (stored as ROI x frames). \n\n Required args:\n - roi_trace_path (Path): full path name of the ROI data file\n\n Optional args:\n - roi_ns (int or array-like) : ROIs to load (None for all)\n default: None\n - frame_ns (int or array-like): frames to load (None for all) \n default: None\n\n Returns:\n - roi_traces (1 or 2D array): ROI traces (ROI x frame) \n '
with h5py.File(roi_trace_path, 'r') as f:
dataset_name = ('data' if ('data' in f.keys()) else 'FC')
roi_data_handle = f[dataset_name]
roi_traces = load_traces_optimally(roi_data_handle, roi_ns=roi_ns, frame_ns=frame_ns, rois_first=True)
return roi_traces | load_roi_traces(roi_trace_path)
Returns ROI traces from ROI data file (stored as ROI x frames).
Required args:
- roi_trace_path (Path): full path name of the ROI data file
Optional args:
- roi_ns (int or array-like) : ROIs to load (None for all)
default: None
- frame_ns (int or array-like): frames to load (None for all)
default: None
Returns:
- roi_traces (1 or 2D array): ROI traces (ROI x frame) | sess_util/sess_trace_util.py | load_roi_traces | AllenInstitute/OpenScope_CA_Analysis | 0 | python | def load_roi_traces(roi_trace_path, roi_ns=None, frame_ns=None):
'\n load_roi_traces(roi_trace_path)\n\n Returns ROI traces from ROI data file (stored as ROI x frames). \n\n Required args:\n - roi_trace_path (Path): full path name of the ROI data file\n\n Optional args:\n - roi_ns (int or array-like) : ROIs to load (None for all)\n default: None\n - frame_ns (int or array-like): frames to load (None for all) \n default: None\n\n Returns:\n - roi_traces (1 or 2D array): ROI traces (ROI x frame) \n '
with h5py.File(roi_trace_path, 'r') as f:
dataset_name = ('data' if ('data' in f.keys()) else 'FC')
roi_data_handle = f[dataset_name]
roi_traces = load_traces_optimally(roi_data_handle, roi_ns=roi_ns, frame_ns=frame_ns, rois_first=True)
return roi_traces | def load_roi_traces(roi_trace_path, roi_ns=None, frame_ns=None):
'\n load_roi_traces(roi_trace_path)\n\n Returns ROI traces from ROI data file (stored as ROI x frames). \n\n Required args:\n - roi_trace_path (Path): full path name of the ROI data file\n\n Optional args:\n - roi_ns (int or array-like) : ROIs to load (None for all)\n default: None\n - frame_ns (int or array-like): frames to load (None for all) \n default: None\n\n Returns:\n - roi_traces (1 or 2D array): ROI traces (ROI x frame) \n '
with h5py.File(roi_trace_path, 'r') as f:
dataset_name = ('data' if ('data' in f.keys()) else 'FC')
roi_data_handle = f[dataset_name]
roi_traces = load_traces_optimally(roi_data_handle, roi_ns=roi_ns, frame_ns=frame_ns, rois_first=True)
return roi_traces<|docstring|>load_roi_traces(roi_trace_path)
Returns ROI traces from ROI data file (stored as ROI x frames).
Required args:
- roi_trace_path (Path): full path name of the ROI data file
Optional args:
- roi_ns (int or array-like) : ROIs to load (None for all)
default: None
- frame_ns (int or array-like): frames to load (None for all)
default: None
Returns:
- roi_traces (1 or 2D array): ROI traces (ROI x frame)<|endoftext|> |
34927ce5b493a10198563cedf28caf0cb9d53db075bf401e3e3f6c8c20ffc932 | def load_roi_data_nwb(sess_files):
'\n load_roi_data_nwb(sess_files)\n\n Returns ROI data from NWB files. \n\n Required args:\n - sess_files (Path): full path names of the session files\n\n Returns:\n - roi_ids (list) : ROI IDs\n - nrois (int) : total number of ROIs\n - tot_twop_fr (int): total number of two-photon frames recorded\n '
ophys_file = sess_file_util.select_nwb_sess_path(sess_files, ophys=True)
with pynwb.NWBHDF5IO(str(ophys_file), 'r') as f:
nwbfile_in = f.read()
ophys_module = nwbfile_in.get_processing_module('ophys')
main_field = 'ImageSegmentation'
data_field = 'PlaneSegmentation'
try:
plane_seg = ophys_module.get_data_interface(main_field).get_plane_segmentation(data_field)
except KeyError as err:
raise KeyError(f'Could not find plane segmentation data in image segmentation for {ophys_file} due to: {err}')
roi_ids = list(plane_seg['id'].data)
main_field = 'DfOverF'
data_field = 'RoiResponseSeries'
try:
roi_resp_series = ophys_module.get_data_interface(main_field).get_roi_response_series(data_field)
except KeyError as err:
raise KeyError(f'Could not find ROI response series data in image segmentation for {ophys_file} due to: {err}')
(tot_twop_fr, nrois) = roi_resp_series.data.shape
return (roi_ids, nrois, tot_twop_fr) | load_roi_data_nwb(sess_files)
Returns ROI data from NWB files.
Required args:
- sess_files (Path): full path names of the session files
Returns:
- roi_ids (list) : ROI IDs
- nrois (int) : total number of ROIs
- tot_twop_fr (int): total number of two-photon frames recorded | sess_util/sess_trace_util.py | load_roi_data_nwb | AllenInstitute/OpenScope_CA_Analysis | 0 | python | def load_roi_data_nwb(sess_files):
'\n load_roi_data_nwb(sess_files)\n\n Returns ROI data from NWB files. \n\n Required args:\n - sess_files (Path): full path names of the session files\n\n Returns:\n - roi_ids (list) : ROI IDs\n - nrois (int) : total number of ROIs\n - tot_twop_fr (int): total number of two-photon frames recorded\n '
ophys_file = sess_file_util.select_nwb_sess_path(sess_files, ophys=True)
with pynwb.NWBHDF5IO(str(ophys_file), 'r') as f:
nwbfile_in = f.read()
ophys_module = nwbfile_in.get_processing_module('ophys')
main_field = 'ImageSegmentation'
data_field = 'PlaneSegmentation'
try:
plane_seg = ophys_module.get_data_interface(main_field).get_plane_segmentation(data_field)
except KeyError as err:
raise KeyError(f'Could not find plane segmentation data in image segmentation for {ophys_file} due to: {err}')
roi_ids = list(plane_seg['id'].data)
main_field = 'DfOverF'
data_field = 'RoiResponseSeries'
try:
roi_resp_series = ophys_module.get_data_interface(main_field).get_roi_response_series(data_field)
except KeyError as err:
raise KeyError(f'Could not find ROI response series data in image segmentation for {ophys_file} due to: {err}')
(tot_twop_fr, nrois) = roi_resp_series.data.shape
return (roi_ids, nrois, tot_twop_fr) | def load_roi_data_nwb(sess_files):
'\n load_roi_data_nwb(sess_files)\n\n Returns ROI data from NWB files. \n\n Required args:\n - sess_files (Path): full path names of the session files\n\n Returns:\n - roi_ids (list) : ROI IDs\n - nrois (int) : total number of ROIs\n - tot_twop_fr (int): total number of two-photon frames recorded\n '
ophys_file = sess_file_util.select_nwb_sess_path(sess_files, ophys=True)
with pynwb.NWBHDF5IO(str(ophys_file), 'r') as f:
nwbfile_in = f.read()
ophys_module = nwbfile_in.get_processing_module('ophys')
main_field = 'ImageSegmentation'
data_field = 'PlaneSegmentation'
try:
plane_seg = ophys_module.get_data_interface(main_field).get_plane_segmentation(data_field)
except KeyError as err:
raise KeyError(f'Could not find plane segmentation data in image segmentation for {ophys_file} due to: {err}')
roi_ids = list(plane_seg['id'].data)
main_field = 'DfOverF'
data_field = 'RoiResponseSeries'
try:
roi_resp_series = ophys_module.get_data_interface(main_field).get_roi_response_series(data_field)
except KeyError as err:
raise KeyError(f'Could not find ROI response series data in image segmentation for {ophys_file} due to: {err}')
(tot_twop_fr, nrois) = roi_resp_series.data.shape
return (roi_ids, nrois, tot_twop_fr)<|docstring|>load_roi_data_nwb(sess_files)
Returns ROI data from NWB files.
Required args:
- sess_files (Path): full path names of the session files
Returns:
- roi_ids (list) : ROI IDs
- nrois (int) : total number of ROIs
- tot_twop_fr (int): total number of two-photon frames recorded<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.