body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
d2eaf851d77ba3ff512ba37c09916e63a2034a53b333172647f38f8af95dc4e4 | def addFilesToCombineArchive(archive_path, file_names, entry_locations, file_formats, master_attributes, out_archive_path):
' Add multiple files to an existing COMBINE archive on disk and save the result as a new archive.\n\n :param archive_path: The path to the archive.\n :param file_names: List of extra files to add.\n :param entry_locations: List of destination locations for the files in the output archive.\n :param file_format: List of formats for the resp. files.\n :param master_attributes: List of true/false values for the resp. master attributes of the files.\n :param out_archive_path: The path to the output archive.\n '
import tecombine, tempfile
input_archive = tecombine.CombineArchive()
if (not input_archive.initializeFromArchive(archive_path)):
raise RuntimeError('Failed to initialize archive')
tempfiles = []
output_archive = tecombine.CombineArchive()
description = input_archive.getMetadataForLocation('.')
if description:
output_archive.addMetadata('.', description)
for entry in (input_archive.getEntry(k) for k in range(input_archive.getNumEntries())):
(fhandle, fname) = tempfile.mkstemp()
tempfiles.append(fname)
input_archive.extractEntry(entry.getLocation(), fname)
if (not (entry.getLocation() in entry_locations)):
output_archive.addFile(fname, entry.getLocation(), entry.getFormat(), entry.getMaster())
for (file_name, entry_location, file_format, master) in zip(file_names, entry_locations, file_formats, master_attributes):
output_archive.addFile(file_name, entry_location, file_format, master)
if os.path.exists(out_archive_path):
if os.path.isfile(out_archive_path):
os.remove(out_archive_path)
elif os.path.isdir(out_archive_path):
raise RuntimeError('Tried to write archive to {}, which is a directory.'.format(out_archive_path))
else:
raise RuntimeError('Could not write archive to {}.'.format(out_archive_path))
output_archive.writeToFile(out_archive_path)
for t in tempfiles:
os.remove(t) | Add multiple files to an existing COMBINE archive on disk and save the result as a new archive.
:param archive_path: The path to the archive.
:param file_names: List of extra files to add.
:param entry_locations: List of destination locations for the files in the output archive.
:param file_format: List of formats for the resp. files.
:param master_attributes: List of true/false values for the resp. master attributes of the files.
:param out_archive_path: The path to the output archive. | tellurium/bombBeetle.py | addFilesToCombineArchive | madfain/BombBeetle | 1 | python | def addFilesToCombineArchive(archive_path, file_names, entry_locations, file_formats, master_attributes, out_archive_path):
' Add multiple files to an existing COMBINE archive on disk and save the result as a new archive.\n\n :param archive_path: The path to the archive.\n :param file_names: List of extra files to add.\n :param entry_locations: List of destination locations for the files in the output archive.\n :param file_format: List of formats for the resp. files.\n :param master_attributes: List of true/false values for the resp. master attributes of the files.\n :param out_archive_path: The path to the output archive.\n '
import tecombine, tempfile
input_archive = tecombine.CombineArchive()
if (not input_archive.initializeFromArchive(archive_path)):
raise RuntimeError('Failed to initialize archive')
tempfiles = []
output_archive = tecombine.CombineArchive()
description = input_archive.getMetadataForLocation('.')
if description:
output_archive.addMetadata('.', description)
for entry in (input_archive.getEntry(k) for k in range(input_archive.getNumEntries())):
(fhandle, fname) = tempfile.mkstemp()
tempfiles.append(fname)
input_archive.extractEntry(entry.getLocation(), fname)
if (not (entry.getLocation() in entry_locations)):
output_archive.addFile(fname, entry.getLocation(), entry.getFormat(), entry.getMaster())
for (file_name, entry_location, file_format, master) in zip(file_names, entry_locations, file_formats, master_attributes):
output_archive.addFile(file_name, entry_location, file_format, master)
if os.path.exists(out_archive_path):
if os.path.isfile(out_archive_path):
os.remove(out_archive_path)
elif os.path.isdir(out_archive_path):
raise RuntimeError('Tried to write archive to {}, which is a directory.'.format(out_archive_path))
else:
raise RuntimeError('Could not write archive to {}.'.format(out_archive_path))
output_archive.writeToFile(out_archive_path)
for t in tempfiles:
os.remove(t) | def addFilesToCombineArchive(archive_path, file_names, entry_locations, file_formats, master_attributes, out_archive_path):
' Add multiple files to an existing COMBINE archive on disk and save the result as a new archive.\n\n :param archive_path: The path to the archive.\n :param file_names: List of extra files to add.\n :param entry_locations: List of destination locations for the files in the output archive.\n :param file_format: List of formats for the resp. files.\n :param master_attributes: List of true/false values for the resp. master attributes of the files.\n :param out_archive_path: The path to the output archive.\n '
import tecombine, tempfile
input_archive = tecombine.CombineArchive()
if (not input_archive.initializeFromArchive(archive_path)):
raise RuntimeError('Failed to initialize archive')
tempfiles = []
output_archive = tecombine.CombineArchive()
description = input_archive.getMetadataForLocation('.')
if description:
output_archive.addMetadata('.', description)
for entry in (input_archive.getEntry(k) for k in range(input_archive.getNumEntries())):
(fhandle, fname) = tempfile.mkstemp()
tempfiles.append(fname)
input_archive.extractEntry(entry.getLocation(), fname)
if (not (entry.getLocation() in entry_locations)):
output_archive.addFile(fname, entry.getLocation(), entry.getFormat(), entry.getMaster())
for (file_name, entry_location, file_format, master) in zip(file_names, entry_locations, file_formats, master_attributes):
output_archive.addFile(file_name, entry_location, file_format, master)
if os.path.exists(out_archive_path):
if os.path.isfile(out_archive_path):
os.remove(out_archive_path)
elif os.path.isdir(out_archive_path):
raise RuntimeError('Tried to write archive to {}, which is a directory.'.format(out_archive_path))
else:
raise RuntimeError('Could not write archive to {}.'.format(out_archive_path))
output_archive.writeToFile(out_archive_path)
for t in tempfiles:
os.remove(t)<|docstring|>Add multiple files to an existing COMBINE archive on disk and save the result as a new archive.
:param archive_path: The path to the archive.
:param file_names: List of extra files to add.
:param entry_locations: List of destination locations for the files in the output archive.
:param file_format: List of formats for the resp. files.
:param master_attributes: List of true/false values for the resp. master attributes of the files.
:param out_archive_path: The path to the output archive.<|endoftext|> |
a8353acf36cc3cb3cf31f6ba251a8fa9c00d7993050b4ee94f8f6e46f72e9c9e | def createCombineArchive(archive_path, file_names, entry_locations, file_formats, master_attributes, description=None):
' Create a new COMBINE archive containing the provided entries and locations.\n\n :param archive_path: The path to the archive.\n :param file_names: List of extra files to add.\n :param entry_locations: List of destination locations for the files in the output archive.\n :param file_format: List of formats for the resp. files.\n :param master_attributes: List of true/false values for the resp. master attributes of the files.\n :param out_archive_path: The path to the output archive.\n :param description: A libcombine description structure to be assigned to the combine archive, if desired.\n '
import tecombine
output_archive = tecombine.CombineArchive()
if (description is not None):
output_archive.addMetadata('.', description)
for (file_name, entry_location, file_format, master) in zip(file_names, entry_locations, file_formats, master_attributes):
output_archive.addFile(file_name, entry_location, file_format, master)
if os.path.exists(archive_path):
if os.path.isfile(archive_path):
os.remove(archive_path)
elif os.path.isdir(archive_path):
raise RuntimeError('Tried to write archive to {}, which is a directory.'.format(archive_path))
else:
raise RuntimeError('Could not write archive to {}.'.format(archive_path))
output_archive.writeToFile(archive_path) | Create a new COMBINE archive containing the provided entries and locations.
:param archive_path: The path to the archive.
:param file_names: List of extra files to add.
:param entry_locations: List of destination locations for the files in the output archive.
:param file_format: List of formats for the resp. files.
:param master_attributes: List of true/false values for the resp. master attributes of the files.
:param out_archive_path: The path to the output archive.
:param description: A libcombine description structure to be assigned to the combine archive, if desired. | tellurium/bombBeetle.py | createCombineArchive | madfain/BombBeetle | 1 | python | def createCombineArchive(archive_path, file_names, entry_locations, file_formats, master_attributes, description=None):
' Create a new COMBINE archive containing the provided entries and locations.\n\n :param archive_path: The path to the archive.\n :param file_names: List of extra files to add.\n :param entry_locations: List of destination locations for the files in the output archive.\n :param file_format: List of formats for the resp. files.\n :param master_attributes: List of true/false values for the resp. master attributes of the files.\n :param out_archive_path: The path to the output archive.\n :param description: A libcombine description structure to be assigned to the combine archive, if desired.\n '
import tecombine
output_archive = tecombine.CombineArchive()
if (description is not None):
output_archive.addMetadata('.', description)
for (file_name, entry_location, file_format, master) in zip(file_names, entry_locations, file_formats, master_attributes):
output_archive.addFile(file_name, entry_location, file_format, master)
if os.path.exists(archive_path):
if os.path.isfile(archive_path):
os.remove(archive_path)
elif os.path.isdir(archive_path):
raise RuntimeError('Tried to write archive to {}, which is a directory.'.format(archive_path))
else:
raise RuntimeError('Could not write archive to {}.'.format(archive_path))
output_archive.writeToFile(archive_path) | def createCombineArchive(archive_path, file_names, entry_locations, file_formats, master_attributes, description=None):
' Create a new COMBINE archive containing the provided entries and locations.\n\n :param archive_path: The path to the archive.\n :param file_names: List of extra files to add.\n :param entry_locations: List of destination locations for the files in the output archive.\n :param file_format: List of formats for the resp. files.\n :param master_attributes: List of true/false values for the resp. master attributes of the files.\n :param out_archive_path: The path to the output archive.\n :param description: A libcombine description structure to be assigned to the combine archive, if desired.\n '
import tecombine
output_archive = tecombine.CombineArchive()
if (description is not None):
output_archive.addMetadata('.', description)
for (file_name, entry_location, file_format, master) in zip(file_names, entry_locations, file_formats, master_attributes):
output_archive.addFile(file_name, entry_location, file_format, master)
if os.path.exists(archive_path):
if os.path.isfile(archive_path):
os.remove(archive_path)
elif os.path.isdir(archive_path):
raise RuntimeError('Tried to write archive to {}, which is a directory.'.format(archive_path))
else:
raise RuntimeError('Could not write archive to {}.'.format(archive_path))
output_archive.writeToFile(archive_path)<|docstring|>Create a new COMBINE archive containing the provided entries and locations.
:param archive_path: The path to the archive.
:param file_names: List of extra files to add.
:param entry_locations: List of destination locations for the files in the output archive.
:param file_format: List of formats for the resp. files.
:param master_attributes: List of true/false values for the resp. master attributes of the files.
:param out_archive_path: The path to the output archive.
:param description: A libcombine description structure to be assigned to the combine archive, if desired.<|endoftext|> |
7fcd714539c5e3be8e23bd3c72d72bba2f71a8e2c0eee8de9352f785ed155f7f | def getEigenvalues(m):
' Eigenvalues of matrix.\n\n Convenience method for computing the eigenvalues of a matrix m\n Uses numpy eig to compute the eigenvalues.\n\n :param m: numpy array\n :returns: numpy array containing eigenvalues\n '
from numpy import linalg
(w, v) = linalg.eig(m)
return w | Eigenvalues of matrix.
Convenience method for computing the eigenvalues of a matrix m
Uses numpy eig to compute the eigenvalues.
:param m: numpy array
:returns: numpy array containing eigenvalues | tellurium/bombBeetle.py | getEigenvalues | madfain/BombBeetle | 1 | python | def getEigenvalues(m):
' Eigenvalues of matrix.\n\n Convenience method for computing the eigenvalues of a matrix m\n Uses numpy eig to compute the eigenvalues.\n\n :param m: numpy array\n :returns: numpy array containing eigenvalues\n '
from numpy import linalg
(w, v) = linalg.eig(m)
return w | def getEigenvalues(m):
' Eigenvalues of matrix.\n\n Convenience method for computing the eigenvalues of a matrix m\n Uses numpy eig to compute the eigenvalues.\n\n :param m: numpy array\n :returns: numpy array containing eigenvalues\n '
from numpy import linalg
(w, v) = linalg.eig(m)
return w<|docstring|>Eigenvalues of matrix.
Convenience method for computing the eigenvalues of a matrix m
Uses numpy eig to compute the eigenvalues.
:param m: numpy array
:returns: numpy array containing eigenvalues<|endoftext|> |
b058dffb0d0d3f6dc3983b473775155af253e12bbc519497727c3ab00b88d818 | def plotArray(result, loc='upper right', show=True, resetColorCycle=True, xlabel=None, ylabel=None, title=None, xlim=None, ylim=None, xscale='linear', yscale='linear', grid=False, labels=None, **kwargs):
' Plot an array.\n\n :param result: Array to plot, first column of the array must be the x-axis and remaining columns the y-axis\n :param loc: Location of legend box. Valid strings \'best\' | upper right\' | \'upper left\' | \'lower left\' | \'lower right\' | \'right\' | \'center left\' | \'center right\' | \'lower center\' | \'upper center\' | \'center\' |\n :type loc: str\n :param color: \'red\', \'blue\', etc. to use the same color for every curve\n :type color: str\n :param labels: A list of labels for the legend, include as many labels as there are curves to plot\n :param xlabel: x-axis label\n :type xlabel: str\n :param ylabel: y-axis label\n :type ylabel: str\n :param title: Add plot title\n :type title: str\n :param xlim: Limits on x-axis (tuple [start, end])\n :param ylim: Limits on y-axis\n :param xscale: \'linear\' or \'log\' scale for x-axis\n :param yscale: \'linear\' or \'log\' scale for y-axis\n :param grid: Show grid\n :type grid: bool\n :param show: show=True (default) shows the plot, use show=False to plot multiple simulations in one plot\n :param resetColorCycle: If true, resets color cycle on given figure (works with show=False to plot multiple simulations on a single plot) \n :type resetColorCycle: bool\n :param kwargs: Additional matplotlib keywords like linewidth, linestyle...\n\n ::\n\n import numpy as np, tellurium as te\n result = np.array([[1,2,3], [7.2,6.5,8.8], [9.8, 6.5, 4.3]])\n te.plotArray(result, title="My graph", xlim=((1, 5)), labels=["Label 1", "Label 2"],\n yscale=\'log\', linestyle=\'dashed\')\n '
warnings.warn('plotArray is deprecated, use plot instead', DeprecationWarning)
if resetColorCycle:
plt.gca().set_prop_cycle(None)
if ('linewidth' not in kwargs):
kwargs['linewidth'] = 2.0
Ncol = result.shape[1]
if (labels is None):
labels = result.dtype.names
for k in range(1, Ncol):
if ((loc is None) or (labels is None)):
p = plt.plot(result[(:, 0)], result[(:, k)], **kwargs)
else:
p = plt.plot(result[(:, 0)], result[(:, k)], label=labels[(k - 1)], **kwargs)
if (xlabel is not None):
plt.xlabel(xlabel)
if (ylabel is not None):
plt.ylabel(ylabel)
if (title is not None):
plt.title(title)
if (xlim is not None):
plt.xlim(xlim)
if (ylim is not None):
plt.ylim(ylim)
plt.xscale(xscale)
plt.yscale(yscale)
plt.grid(grid)
if ((loc is not None) and (labels is not None)):
plt.legend(loc=loc)
if show:
plt.show()
return p | Plot an array.
:param result: Array to plot, first column of the array must be the x-axis and remaining columns the y-axis
:param loc: Location of legend box. Valid strings 'best' | upper right' | 'upper left' | 'lower left' | 'lower right' | 'right' | 'center left' | 'center right' | 'lower center' | 'upper center' | 'center' |
:type loc: str
:param color: 'red', 'blue', etc. to use the same color for every curve
:type color: str
:param labels: A list of labels for the legend, include as many labels as there are curves to plot
:param xlabel: x-axis label
:type xlabel: str
:param ylabel: y-axis label
:type ylabel: str
:param title: Add plot title
:type title: str
:param xlim: Limits on x-axis (tuple [start, end])
:param ylim: Limits on y-axis
:param xscale: 'linear' or 'log' scale for x-axis
:param yscale: 'linear' or 'log' scale for y-axis
:param grid: Show grid
:type grid: bool
:param show: show=True (default) shows the plot, use show=False to plot multiple simulations in one plot
:param resetColorCycle: If true, resets color cycle on given figure (works with show=False to plot multiple simulations on a single plot)
:type resetColorCycle: bool
:param kwargs: Additional matplotlib keywords like linewidth, linestyle...
::
import numpy as np, tellurium as te
result = np.array([[1,2,3], [7.2,6.5,8.8], [9.8, 6.5, 4.3]])
te.plotArray(result, title="My graph", xlim=((1, 5)), labels=["Label 1", "Label 2"],
yscale='log', linestyle='dashed') | tellurium/bombBeetle.py | plotArray | madfain/BombBeetle | 1 | python | def plotArray(result, loc='upper right', show=True, resetColorCycle=True, xlabel=None, ylabel=None, title=None, xlim=None, ylim=None, xscale='linear', yscale='linear', grid=False, labels=None, **kwargs):
' Plot an array.\n\n :param result: Array to plot, first column of the array must be the x-axis and remaining columns the y-axis\n :param loc: Location of legend box. Valid strings \'best\' | upper right\' | \'upper left\' | \'lower left\' | \'lower right\' | \'right\' | \'center left\' | \'center right\' | \'lower center\' | \'upper center\' | \'center\' |\n :type loc: str\n :param color: \'red\', \'blue\', etc. to use the same color for every curve\n :type color: str\n :param labels: A list of labels for the legend, include as many labels as there are curves to plot\n :param xlabel: x-axis label\n :type xlabel: str\n :param ylabel: y-axis label\n :type ylabel: str\n :param title: Add plot title\n :type title: str\n :param xlim: Limits on x-axis (tuple [start, end])\n :param ylim: Limits on y-axis\n :param xscale: \'linear\' or \'log\' scale for x-axis\n :param yscale: \'linear\' or \'log\' scale for y-axis\n :param grid: Show grid\n :type grid: bool\n :param show: show=True (default) shows the plot, use show=False to plot multiple simulations in one plot\n :param resetColorCycle: If true, resets color cycle on given figure (works with show=False to plot multiple simulations on a single plot) \n :type resetColorCycle: bool\n :param kwargs: Additional matplotlib keywords like linewidth, linestyle...\n\n ::\n\n import numpy as np, tellurium as te\n result = np.array([[1,2,3], [7.2,6.5,8.8], [9.8, 6.5, 4.3]])\n te.plotArray(result, title="My graph", xlim=((1, 5)), labels=["Label 1", "Label 2"],\n yscale=\'log\', linestyle=\'dashed\')\n '
warnings.warn('plotArray is deprecated, use plot instead', DeprecationWarning)
if resetColorCycle:
plt.gca().set_prop_cycle(None)
if ('linewidth' not in kwargs):
kwargs['linewidth'] = 2.0
Ncol = result.shape[1]
if (labels is None):
labels = result.dtype.names
for k in range(1, Ncol):
if ((loc is None) or (labels is None)):
p = plt.plot(result[(:, 0)], result[(:, k)], **kwargs)
else:
p = plt.plot(result[(:, 0)], result[(:, k)], label=labels[(k - 1)], **kwargs)
if (xlabel is not None):
plt.xlabel(xlabel)
if (ylabel is not None):
plt.ylabel(ylabel)
if (title is not None):
plt.title(title)
if (xlim is not None):
plt.xlim(xlim)
if (ylim is not None):
plt.ylim(ylim)
plt.xscale(xscale)
plt.yscale(yscale)
plt.grid(grid)
if ((loc is not None) and (labels is not None)):
plt.legend(loc=loc)
if show:
plt.show()
return p | def plotArray(result, loc='upper right', show=True, resetColorCycle=True, xlabel=None, ylabel=None, title=None, xlim=None, ylim=None, xscale='linear', yscale='linear', grid=False, labels=None, **kwargs):
' Plot an array.\n\n :param result: Array to plot, first column of the array must be the x-axis and remaining columns the y-axis\n :param loc: Location of legend box. Valid strings \'best\' | upper right\' | \'upper left\' | \'lower left\' | \'lower right\' | \'right\' | \'center left\' | \'center right\' | \'lower center\' | \'upper center\' | \'center\' |\n :type loc: str\n :param color: \'red\', \'blue\', etc. to use the same color for every curve\n :type color: str\n :param labels: A list of labels for the legend, include as many labels as there are curves to plot\n :param xlabel: x-axis label\n :type xlabel: str\n :param ylabel: y-axis label\n :type ylabel: str\n :param title: Add plot title\n :type title: str\n :param xlim: Limits on x-axis (tuple [start, end])\n :param ylim: Limits on y-axis\n :param xscale: \'linear\' or \'log\' scale for x-axis\n :param yscale: \'linear\' or \'log\' scale for y-axis\n :param grid: Show grid\n :type grid: bool\n :param show: show=True (default) shows the plot, use show=False to plot multiple simulations in one plot\n :param resetColorCycle: If true, resets color cycle on given figure (works with show=False to plot multiple simulations on a single plot) \n :type resetColorCycle: bool\n :param kwargs: Additional matplotlib keywords like linewidth, linestyle...\n\n ::\n\n import numpy as np, tellurium as te\n result = np.array([[1,2,3], [7.2,6.5,8.8], [9.8, 6.5, 4.3]])\n te.plotArray(result, title="My graph", xlim=((1, 5)), labels=["Label 1", "Label 2"],\n yscale=\'log\', linestyle=\'dashed\')\n '
warnings.warn('plotArray is deprecated, use plot instead', DeprecationWarning)
if resetColorCycle:
plt.gca().set_prop_cycle(None)
if ('linewidth' not in kwargs):
kwargs['linewidth'] = 2.0
Ncol = result.shape[1]
if (labels is None):
labels = result.dtype.names
for k in range(1, Ncol):
if ((loc is None) or (labels is None)):
p = plt.plot(result[(:, 0)], result[(:, k)], **kwargs)
else:
p = plt.plot(result[(:, 0)], result[(:, k)], label=labels[(k - 1)], **kwargs)
if (xlabel is not None):
plt.xlabel(xlabel)
if (ylabel is not None):
plt.ylabel(ylabel)
if (title is not None):
plt.title(title)
if (xlim is not None):
plt.xlim(xlim)
if (ylim is not None):
plt.ylim(ylim)
plt.xscale(xscale)
plt.yscale(yscale)
plt.grid(grid)
if ((loc is not None) and (labels is not None)):
plt.legend(loc=loc)
if show:
plt.show()
return p<|docstring|>Plot an array.
:param result: Array to plot, first column of the array must be the x-axis and remaining columns the y-axis
:param loc: Location of legend box. Valid strings 'best' | upper right' | 'upper left' | 'lower left' | 'lower right' | 'right' | 'center left' | 'center right' | 'lower center' | 'upper center' | 'center' |
:type loc: str
:param color: 'red', 'blue', etc. to use the same color for every curve
:type color: str
:param labels: A list of labels for the legend, include as many labels as there are curves to plot
:param xlabel: x-axis label
:type xlabel: str
:param ylabel: y-axis label
:type ylabel: str
:param title: Add plot title
:type title: str
:param xlim: Limits on x-axis (tuple [start, end])
:param ylim: Limits on y-axis
:param xscale: 'linear' or 'log' scale for x-axis
:param yscale: 'linear' or 'log' scale for y-axis
:param grid: Show grid
:type grid: bool
:param show: show=True (default) shows the plot, use show=False to plot multiple simulations in one plot
:param resetColorCycle: If true, resets color cycle on given figure (works with show=False to plot multiple simulations on a single plot)
:type resetColorCycle: bool
:param kwargs: Additional matplotlib keywords like linewidth, linestyle...
::
import numpy as np, tellurium as te
result = np.array([[1,2,3], [7.2,6.5,8.8], [9.8, 6.5, 4.3]])
te.plotArray(result, title="My graph", xlim=((1, 5)), labels=["Label 1", "Label 2"],
yscale='log', linestyle='dashed')<|endoftext|> |
272d7f678c44b3ba635e2719ecf5c5a4bfaebca4bb86ff55d98badb0ac00f4d0 | def plotWithLegend(r, result=None, loc='upper right', show=True):
'\n Plot an array and include a legend. The first argument must be a roadrunner variable.\n The second argument must be an array containing data to plot. The first column of the array will\n be the x-axis and remaining columns the y-axis. Returns\n a handle to the plotting object.\n\n plotWithLegend (r)\n '
if (not isinstance(r, roadrunner.RoadRunner)):
raise Exception('First argument must be a roadrunner variable')
if (result is None):
result = r.getSimulationData()
if (result is None):
raise Exception('No simulation result available')
if (result.dtype.names is None):
columns = result.shape[1]
legendItems = r.selections[1:]
if ((columns - 1) != len(legendItems)):
raise Exception('Legend list must match result array')
elif (len(result.dtype.names) < 1):
raise Exception('No columns available to plot')
return plotArray(result, loc=loc, labels=legendItems, show=show) | Plot an array and include a legend. The first argument must be a roadrunner variable.
The second argument must be an array containing data to plot. The first column of the array will
be the x-axis and remaining columns the y-axis. Returns
a handle to the plotting object.
plotWithLegend (r) | tellurium/bombBeetle.py | plotWithLegend | madfain/BombBeetle | 1 | python | def plotWithLegend(r, result=None, loc='upper right', show=True):
'\n Plot an array and include a legend. The first argument must be a roadrunner variable.\n The second argument must be an array containing data to plot. The first column of the array will\n be the x-axis and remaining columns the y-axis. Returns\n a handle to the plotting object.\n\n plotWithLegend (r)\n '
if (not isinstance(r, roadrunner.RoadRunner)):
raise Exception('First argument must be a roadrunner variable')
if (result is None):
result = r.getSimulationData()
if (result is None):
raise Exception('No simulation result available')
if (result.dtype.names is None):
columns = result.shape[1]
legendItems = r.selections[1:]
if ((columns - 1) != len(legendItems)):
raise Exception('Legend list must match result array')
elif (len(result.dtype.names) < 1):
raise Exception('No columns available to plot')
return plotArray(result, loc=loc, labels=legendItems, show=show) | def plotWithLegend(r, result=None, loc='upper right', show=True):
'\n Plot an array and include a legend. The first argument must be a roadrunner variable.\n The second argument must be an array containing data to plot. The first column of the array will\n be the x-axis and remaining columns the y-axis. Returns\n a handle to the plotting object.\n\n plotWithLegend (r)\n '
if (not isinstance(r, roadrunner.RoadRunner)):
raise Exception('First argument must be a roadrunner variable')
if (result is None):
result = r.getSimulationData()
if (result is None):
raise Exception('No simulation result available')
if (result.dtype.names is None):
columns = result.shape[1]
legendItems = r.selections[1:]
if ((columns - 1) != len(legendItems)):
raise Exception('Legend list must match result array')
elif (len(result.dtype.names) < 1):
raise Exception('No columns available to plot')
return plotArray(result, loc=loc, labels=legendItems, show=show)<|docstring|>Plot an array and include a legend. The first argument must be a roadrunner variable.
The second argument must be an array containing data to plot. The first column of the array will
be the x-axis and remaining columns the y-axis. Returns
a handle to the plotting object.
plotWithLegend (r)<|endoftext|> |
00b0422a89f75c3c52b5946dc95d4aaeddac1e34b40f0f49bb4bc04878c65b4f | def loadTestModel(string):
"Loads particular test model into roadrunner.\n ::\n\n rr = te.loadTestModel('feedback.xml')\n\n :returns: RoadRunner instance with test model loaded\n "
import roadrunner.testing
return roadrunner.testing.getRoadRunner(string) | Loads particular test model into roadrunner.
::
rr = te.loadTestModel('feedback.xml')
:returns: RoadRunner instance with test model loaded | tellurium/bombBeetle.py | loadTestModel | madfain/BombBeetle | 1 | python | def loadTestModel(string):
"Loads particular test model into roadrunner.\n ::\n\n rr = te.loadTestModel('feedback.xml')\n\n :returns: RoadRunner instance with test model loaded\n "
import roadrunner.testing
return roadrunner.testing.getRoadRunner(string) | def loadTestModel(string):
"Loads particular test model into roadrunner.\n ::\n\n rr = te.loadTestModel('feedback.xml')\n\n :returns: RoadRunner instance with test model loaded\n "
import roadrunner.testing
return roadrunner.testing.getRoadRunner(string)<|docstring|>Loads particular test model into roadrunner.
::
rr = te.loadTestModel('feedback.xml')
:returns: RoadRunner instance with test model loaded<|endoftext|> |
bc602df0528bc404c02ddd07a3120ce2d3ac305e4daf0001ab5c00760d66a55c | def getTestModel(string):
"SBML of given test model as a string.\n ::\n\n # load test model as SBML\n sbml = te.getTestModel('feedback.xml')\n r = te.loadSBMLModel(sbml)\n # simulate\n r.simulate(0, 100, 20)\n\n :returns: SBML string of test model\n "
import roadrunner.testing
return roadrunner.testing.getData(string) | SBML of given test model as a string.
::
# load test model as SBML
sbml = te.getTestModel('feedback.xml')
r = te.loadSBMLModel(sbml)
# simulate
r.simulate(0, 100, 20)
:returns: SBML string of test model | tellurium/bombBeetle.py | getTestModel | madfain/BombBeetle | 1 | python | def getTestModel(string):
"SBML of given test model as a string.\n ::\n\n # load test model as SBML\n sbml = te.getTestModel('feedback.xml')\n r = te.loadSBMLModel(sbml)\n # simulate\n r.simulate(0, 100, 20)\n\n :returns: SBML string of test model\n "
import roadrunner.testing
return roadrunner.testing.getData(string) | def getTestModel(string):
"SBML of given test model as a string.\n ::\n\n # load test model as SBML\n sbml = te.getTestModel('feedback.xml')\n r = te.loadSBMLModel(sbml)\n # simulate\n r.simulate(0, 100, 20)\n\n :returns: SBML string of test model\n "
import roadrunner.testing
return roadrunner.testing.getData(string)<|docstring|>SBML of given test model as a string.
::
# load test model as SBML
sbml = te.getTestModel('feedback.xml')
r = te.loadSBMLModel(sbml)
# simulate
r.simulate(0, 100, 20)
:returns: SBML string of test model<|endoftext|> |
8bc338e74a4a62b14d01cc4c7cc1bdca799f6b57c9194151258aaea5dadc289f | def listTestModels():
' List roadrunner SBML test models.\n ::\n\n print(te.listTestModels())\n\n :returns: list of test model paths\n '
import roadrunner.testing
modelList = []
fileList = roadrunner.testing.dir('*.xml')
for pathName in fileList:
modelList.append(os.path.basename(pathName))
return modelList | List roadrunner SBML test models.
::
print(te.listTestModels())
:returns: list of test model paths | tellurium/bombBeetle.py | listTestModels | madfain/BombBeetle | 1 | python | def listTestModels():
' List roadrunner SBML test models.\n ::\n\n print(te.listTestModels())\n\n :returns: list of test model paths\n '
import roadrunner.testing
modelList = []
fileList = roadrunner.testing.dir('*.xml')
for pathName in fileList:
modelList.append(os.path.basename(pathName))
return modelList | def listTestModels():
' List roadrunner SBML test models.\n ::\n\n print(te.listTestModels())\n\n :returns: list of test model paths\n '
import roadrunner.testing
modelList = []
fileList = roadrunner.testing.dir('*.xml')
for pathName in fileList:
modelList.append(os.path.basename(pathName))
return modelList<|docstring|>List roadrunner SBML test models.
::
print(te.listTestModels())
:returns: list of test model paths<|endoftext|> |
46ed15f0dbba070e21e23cfc1448444a728efa37ef5c88be6f42c8f203eaf7be | def _model_function_factory(key):
' Dynamic creation of model functions.\n\n :param key: function key, i.e. the name of the function\n :type key: str\n :return: function object\n :rtype: function\n '
def f(self):
return getattr(self.model, key).__call__()
f.__name__ = key
f.__doc__ = getattr(roadrunner.ExecutableModel, key).__doc__
return f | Dynamic creation of model functions.
:param key: function key, i.e. the name of the function
:type key: str
:return: function object
:rtype: function | tellurium/bombBeetle.py | _model_function_factory | madfain/BombBeetle | 1 | python | def _model_function_factory(key):
' Dynamic creation of model functions.\n\n :param key: function key, i.e. the name of the function\n :type key: str\n :return: function object\n :rtype: function\n '
def f(self):
return getattr(self.model, key).__call__()
f.__name__ = key
f.__doc__ = getattr(roadrunner.ExecutableModel, key).__doc__
return f | def _model_function_factory(key):
' Dynamic creation of model functions.\n\n :param key: function key, i.e. the name of the function\n :type key: str\n :return: function object\n :rtype: function\n '
def f(self):
return getattr(self.model, key).__call__()
f.__name__ = key
f.__doc__ = getattr(roadrunner.ExecutableModel, key).__doc__
return f<|docstring|>Dynamic creation of model functions.
:param key: function key, i.e. the name of the function
:type key: str
:return: function object
:rtype: function<|endoftext|> |
444383f11511e76164aa551db6acd12e0f1d35830be98a952e45c6445a3f3316 | def VersionDict():
'Return dict of version strings.'
import tesbml, tesedml, tecombine
return {'tellurium': getTelluriumVersion(), 'roadrunner': roadrunner.getVersionStr(roadrunner.VERSIONSTR_BASIC), 'antimony': antimony.__version__, 'phrasedml': phrasedml.__version__, 'tesbml': libsbml.getLibSBMLDottedVersion(), 'tesedml': tesedml.__version__, 'tecombine': tecombine.__version__} | Return dict of version strings. | tellurium/bombBeetle.py | VersionDict | madfain/BombBeetle | 1 | python | def VersionDict():
import tesbml, tesedml, tecombine
return {'tellurium': getTelluriumVersion(), 'roadrunner': roadrunner.getVersionStr(roadrunner.VERSIONSTR_BASIC), 'antimony': antimony.__version__, 'phrasedml': phrasedml.__version__, 'tesbml': libsbml.getLibSBMLDottedVersion(), 'tesedml': tesedml.__version__, 'tecombine': tecombine.__version__} | def VersionDict():
import tesbml, tesedml, tecombine
return {'tellurium': getTelluriumVersion(), 'roadrunner': roadrunner.getVersionStr(roadrunner.VERSIONSTR_BASIC), 'antimony': antimony.__version__, 'phrasedml': phrasedml.__version__, 'tesbml': libsbml.getLibSBMLDottedVersion(), 'tesedml': tesedml.__version__, 'tecombine': tecombine.__version__}<|docstring|>Return dict of version strings.<|endoftext|> |
d05b1e641925aec3ee0d0093991b489771654513cdcbacc10817ed4f92a6526f | def DumpJSONInfo():
'Tellurium dist info. Goes into COMBINE archive.'
return json.dumps({'authoring_tool': 'tellurium', 'info': 'Created with Tellurium (tellurium.analogmachine.org).', 'version_info': VersionDict()}) | Tellurium dist info. Goes into COMBINE archive. | tellurium/bombBeetle.py | DumpJSONInfo | madfain/BombBeetle | 1 | python | def DumpJSONInfo():
return json.dumps({'authoring_tool': 'tellurium', 'info': 'Created with Tellurium (tellurium.analogmachine.org).', 'version_info': VersionDict()}) | def DumpJSONInfo():
return json.dumps({'authoring_tool': 'tellurium', 'info': 'Created with Tellurium (tellurium.analogmachine.org).', 'version_info': VersionDict()})<|docstring|>Tellurium dist info. Goes into COMBINE archive.<|endoftext|> |
d75aa109fab436cb3ba19ad3ea3d3d35ed119b940bf27bb56ff6e3037fd9603b | def setLastReport(report):
'Used by SED-ML to save the last report created (for validation).'
global _last_report
_last_report = report | Used by SED-ML to save the last report created (for validation). | tellurium/bombBeetle.py | setLastReport | madfain/BombBeetle | 1 | python | def setLastReport(report):
global _last_report
_last_report = report | def setLastReport(report):
global _last_report
_last_report = report<|docstring|>Used by SED-ML to save the last report created (for validation).<|endoftext|> |
afee1cb5d42c9af24bd144e73c2ac8aff03f104a5e77934c10f1692459e4f48d | def getLastReport():
'Get the last report generated by SED-ML.'
global _last_report
return _last_report | Get the last report generated by SED-ML. | tellurium/bombBeetle.py | getLastReport | madfain/BombBeetle | 1 | python | def getLastReport():
global _last_report
return _last_report | def getLastReport():
global _last_report
return _last_report<|docstring|>Get the last report generated by SED-ML.<|endoftext|> |
c278e53f72a1fcf501f86f011ebed233b2705b0e13ce34dcbf9ccbb988f11488 | @classmethod
def all(cls, preds, index=0, conf_thresh=cfg.NMS_CONF_THRESH):
'\n Calculates NMS predictions for all object classes\n\n Returns:\n 3 item tuple (\n bbs: 2d torch.Tensor\n scores: 1d torch.Tensor\n cls ids: 1d torch.Tensor\n )\n '
bbs = torch.empty(0, dtype=torch.float).to(device)
scores = torch.empty(0, dtype=torch.float).to(device)
cls_ids = torch.empty(0, dtype=torch.long).to(device)
for c in range((NUM_CLASSES - 1)):
single_preds = cls.single(c, preds, index, conf_thresh)
if single_preds:
(single_bbs, single_scores, single_cls_ids) = single_preds
bbs = torch.cat((bbs, single_bbs))
scores = torch.cat((scores, single_scores))
cls_ids = torch.cat((cls_ids, single_cls_ids))
(sorted_scores, sorted_ids) = scores.sort(0, descending=True)
return (bbs[sorted_ids], sorted_scores, cls_ids[sorted_ids]) | Calculates NMS predictions for all object classes
Returns:
3 item tuple (
bbs: 2d torch.Tensor
scores: 1d torch.Tensor
cls ids: 1d torch.Tensor
) | ssdmultibox/predict.py | all | aaronlelevier/ssd-pytorch | 0 | python | @classmethod
def all(cls, preds, index=0, conf_thresh=cfg.NMS_CONF_THRESH):
'\n Calculates NMS predictions for all object classes\n\n Returns:\n 3 item tuple (\n bbs: 2d torch.Tensor\n scores: 1d torch.Tensor\n cls ids: 1d torch.Tensor\n )\n '
bbs = torch.empty(0, dtype=torch.float).to(device)
scores = torch.empty(0, dtype=torch.float).to(device)
cls_ids = torch.empty(0, dtype=torch.long).to(device)
for c in range((NUM_CLASSES - 1)):
single_preds = cls.single(c, preds, index, conf_thresh)
if single_preds:
(single_bbs, single_scores, single_cls_ids) = single_preds
bbs = torch.cat((bbs, single_bbs))
scores = torch.cat((scores, single_scores))
cls_ids = torch.cat((cls_ids, single_cls_ids))
(sorted_scores, sorted_ids) = scores.sort(0, descending=True)
return (bbs[sorted_ids], sorted_scores, cls_ids[sorted_ids]) | @classmethod
def all(cls, preds, index=0, conf_thresh=cfg.NMS_CONF_THRESH):
'\n Calculates NMS predictions for all object classes\n\n Returns:\n 3 item tuple (\n bbs: 2d torch.Tensor\n scores: 1d torch.Tensor\n cls ids: 1d torch.Tensor\n )\n '
bbs = torch.empty(0, dtype=torch.float).to(device)
scores = torch.empty(0, dtype=torch.float).to(device)
cls_ids = torch.empty(0, dtype=torch.long).to(device)
for c in range((NUM_CLASSES - 1)):
single_preds = cls.single(c, preds, index, conf_thresh)
if single_preds:
(single_bbs, single_scores, single_cls_ids) = single_preds
bbs = torch.cat((bbs, single_bbs))
scores = torch.cat((scores, single_scores))
cls_ids = torch.cat((cls_ids, single_cls_ids))
(sorted_scores, sorted_ids) = scores.sort(0, descending=True)
return (bbs[sorted_ids], sorted_scores, cls_ids[sorted_ids])<|docstring|>Calculates NMS predictions for all object classes
Returns:
3 item tuple (
bbs: 2d torch.Tensor
scores: 1d torch.Tensor
cls ids: 1d torch.Tensor
)<|endoftext|> |
fc5efc8bb30a8da2e93ec082ca9b789616f62be434c9f36fc80c775ab01ffab1 | @classmethod
def single(cls, cls_id, preds, index=0, conf_thresh=cfg.NMS_CONF_THRESH):
'\n Full predictions for a single class\n\n Args:\n cls_id (int): category_id\n preds: mini-batch preds from model\n index (int): index of batch item to choose\n conf_thresh (float):\n percent confidence threshold to filter detections by\n Returns:\n tuple(bbs, scores, cls_ids) or None\n '
(bbs_preds, cats_preds) = preds
(item_bbs, item_cats) = (bbs_preds[index], cats_preds[index][(:, :(- 1))])
return cls.single_nms(cls_id, item_bbs, item_cats, conf_thresh) | Full predictions for a single class
Args:
cls_id (int): category_id
preds: mini-batch preds from model
index (int): index of batch item to choose
conf_thresh (float):
percent confidence threshold to filter detections by
Returns:
tuple(bbs, scores, cls_ids) or None | ssdmultibox/predict.py | single | aaronlelevier/ssd-pytorch | 0 | python | @classmethod
def single(cls, cls_id, preds, index=0, conf_thresh=cfg.NMS_CONF_THRESH):
'\n Full predictions for a single class\n\n Args:\n cls_id (int): category_id\n preds: mini-batch preds from model\n index (int): index of batch item to choose\n conf_thresh (float):\n percent confidence threshold to filter detections by\n Returns:\n tuple(bbs, scores, cls_ids) or None\n '
(bbs_preds, cats_preds) = preds
(item_bbs, item_cats) = (bbs_preds[index], cats_preds[index][(:, :(- 1))])
return cls.single_nms(cls_id, item_bbs, item_cats, conf_thresh) | @classmethod
def single(cls, cls_id, preds, index=0, conf_thresh=cfg.NMS_CONF_THRESH):
'\n Full predictions for a single class\n\n Args:\n cls_id (int): category_id\n preds: mini-batch preds from model\n index (int): index of batch item to choose\n conf_thresh (float):\n percent confidence threshold to filter detections by\n Returns:\n tuple(bbs, scores, cls_ids) or None\n '
(bbs_preds, cats_preds) = preds
(item_bbs, item_cats) = (bbs_preds[index], cats_preds[index][(:, :(- 1))])
return cls.single_nms(cls_id, item_bbs, item_cats, conf_thresh)<|docstring|>Full predictions for a single class
Args:
cls_id (int): category_id
preds: mini-batch preds from model
index (int): index of batch item to choose
conf_thresh (float):
percent confidence threshold to filter detections by
Returns:
tuple(bbs, scores, cls_ids) or None<|endoftext|> |
ec87e67a9ff57f2357ffd068664b7968e13e6225319e72e89fda9e584d9842cb | @classmethod
def single_nms(cls, cls_id, item_bbs, item_cats, conf_thresh=cfg.NMS_CONF_THRESH):
'\n Returns the NMS detections for a single image\n\n Args:\n cls_id (int): category id of object to detect\n item_bbs (2d array): [feature_maps, 4] bbs preds\n item_cats (2d array):[feature_maps, 20] one-hot cats preds\n conf_thresh (float):\n percent confidence threshold to filter detections by\n Returns:\n tuple ([nms_bbs, 4], [scores], cls_ids) or None if no matches\n '
item_bbs = item_bbs.detach()
item_cats = item_cats.detach()
(cls_conf, cls_ids) = item_cats.max(1)
cls_conf_thresh_mask = cls_conf.gt(conf_thresh)
cls_ids_gt_conf_thresh = cls_ids[cls_conf_thresh_mask]
cls_conf_gt_conf_thresh = cls_conf[cls_conf_thresh_mask]
bbs_gt_conf_thresh = item_bbs[cls_conf_thresh_mask]
gt_conf_thresh_mask = cls_ids_gt_conf_thresh.eq(cls_id)
boxes = bbs_gt_conf_thresh[gt_conf_thresh_mask]
scores = cls_conf_gt_conf_thresh[gt_conf_thresh_mask]
if (not scores.sum().item()):
return
(nms_ids, nms_count) = cls.nms(boxes, scores)
detected_ids = nms_ids[:nms_count]
detected_cls_ids = torch.tensor([cls_id]).repeat(nms_count).to(device)
return (boxes[detected_ids], scores[detected_ids], detected_cls_ids) | Returns the NMS detections for a single image
Args:
cls_id (int): category id of object to detect
item_bbs (2d array): [feature_maps, 4] bbs preds
item_cats (2d array):[feature_maps, 20] one-hot cats preds
conf_thresh (float):
percent confidence threshold to filter detections by
Returns:
tuple ([nms_bbs, 4], [scores], cls_ids) or None if no matches | ssdmultibox/predict.py | single_nms | aaronlelevier/ssd-pytorch | 0 | python | @classmethod
def single_nms(cls, cls_id, item_bbs, item_cats, conf_thresh=cfg.NMS_CONF_THRESH):
'\n Returns the NMS detections for a single image\n\n Args:\n cls_id (int): category id of object to detect\n item_bbs (2d array): [feature_maps, 4] bbs preds\n item_cats (2d array):[feature_maps, 20] one-hot cats preds\n conf_thresh (float):\n percent confidence threshold to filter detections by\n Returns:\n tuple ([nms_bbs, 4], [scores], cls_ids) or None if no matches\n '
item_bbs = item_bbs.detach()
item_cats = item_cats.detach()
(cls_conf, cls_ids) = item_cats.max(1)
cls_conf_thresh_mask = cls_conf.gt(conf_thresh)
cls_ids_gt_conf_thresh = cls_ids[cls_conf_thresh_mask]
cls_conf_gt_conf_thresh = cls_conf[cls_conf_thresh_mask]
bbs_gt_conf_thresh = item_bbs[cls_conf_thresh_mask]
gt_conf_thresh_mask = cls_ids_gt_conf_thresh.eq(cls_id)
boxes = bbs_gt_conf_thresh[gt_conf_thresh_mask]
scores = cls_conf_gt_conf_thresh[gt_conf_thresh_mask]
if (not scores.sum().item()):
return
(nms_ids, nms_count) = cls.nms(boxes, scores)
detected_ids = nms_ids[:nms_count]
detected_cls_ids = torch.tensor([cls_id]).repeat(nms_count).to(device)
return (boxes[detected_ids], scores[detected_ids], detected_cls_ids) | @classmethod
def single_nms(cls, cls_id, item_bbs, item_cats, conf_thresh=cfg.NMS_CONF_THRESH):
'\n Returns the NMS detections for a single image\n\n Args:\n cls_id (int): category id of object to detect\n item_bbs (2d array): [feature_maps, 4] bbs preds\n item_cats (2d array):[feature_maps, 20] one-hot cats preds\n conf_thresh (float):\n percent confidence threshold to filter detections by\n Returns:\n tuple ([nms_bbs, 4], [scores], cls_ids) or None if no matches\n '
item_bbs = item_bbs.detach()
item_cats = item_cats.detach()
(cls_conf, cls_ids) = item_cats.max(1)
cls_conf_thresh_mask = cls_conf.gt(conf_thresh)
cls_ids_gt_conf_thresh = cls_ids[cls_conf_thresh_mask]
cls_conf_gt_conf_thresh = cls_conf[cls_conf_thresh_mask]
bbs_gt_conf_thresh = item_bbs[cls_conf_thresh_mask]
gt_conf_thresh_mask = cls_ids_gt_conf_thresh.eq(cls_id)
boxes = bbs_gt_conf_thresh[gt_conf_thresh_mask]
scores = cls_conf_gt_conf_thresh[gt_conf_thresh_mask]
if (not scores.sum().item()):
return
(nms_ids, nms_count) = cls.nms(boxes, scores)
detected_ids = nms_ids[:nms_count]
detected_cls_ids = torch.tensor([cls_id]).repeat(nms_count).to(device)
return (boxes[detected_ids], scores[detected_ids], detected_cls_ids)<|docstring|>Returns the NMS detections for a single image
Args:
cls_id (int): category id of object to detect
item_bbs (2d array): [feature_maps, 4] bbs preds
item_cats (2d array):[feature_maps, 20] one-hot cats preds
conf_thresh (float):
percent confidence threshold to filter detections by
Returns:
tuple ([nms_bbs, 4], [scores], cls_ids) or None if no matches<|endoftext|> |
7e73047e4937f8d8818550333e850dd1c2b058d9c1ab73e21fd155b5365bc580 | @staticmethod
def nms(boxes, scores, overlap=0.5, top_k=200):
'Apply non-maximum suppression at test time to avoid detecting too many\n overlapping bounding boxes for a given object.\n Args:\n boxes: (tensor) The location preds for the img, Shape: [num_priors,4].\n scores: (tensor) The class predscores for the img, Shape:[num_priors].\n overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n top_k: (int) The Maximum number of box preds to consider.\n Return:\n The indices of the kept boxes with respect to num_priors.\n '
keep = scores.new(scores.size(0)).zero_().long()
if (boxes.numel() == 0):
return keep
x1 = boxes[(:, 0)]
y1 = boxes[(:, 1)]
x2 = boxes[(:, 2)]
y2 = boxes[(:, 3)]
area = torch.mul((x2 - x1), (y2 - y1))
(v, idx) = scores.sort(0)
idx = idx[(- top_k):]
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
count = 0
while (idx.numel() > 0):
i = idx[(- 1)]
keep[count] = i
count += 1
if (idx.size(0) == 1):
break
idx = idx[:(- 1)]
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = (xx2 - xx1)
h = (yy2 - yy1)
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = (w * h)
rem_areas = torch.index_select(area, 0, idx)
union = ((rem_areas - inter) + area[i])
IoU = (inter / union)
idx = idx[IoU.le(overlap)]
return (keep, count) | Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors. | ssdmultibox/predict.py | nms | aaronlelevier/ssd-pytorch | 0 | python | @staticmethod
def nms(boxes, scores, overlap=0.5, top_k=200):
'Apply non-maximum suppression at test time to avoid detecting too many\n overlapping bounding boxes for a given object.\n Args:\n boxes: (tensor) The location preds for the img, Shape: [num_priors,4].\n scores: (tensor) The class predscores for the img, Shape:[num_priors].\n overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n top_k: (int) The Maximum number of box preds to consider.\n Return:\n The indices of the kept boxes with respect to num_priors.\n '
keep = scores.new(scores.size(0)).zero_().long()
if (boxes.numel() == 0):
return keep
x1 = boxes[(:, 0)]
y1 = boxes[(:, 1)]
x2 = boxes[(:, 2)]
y2 = boxes[(:, 3)]
area = torch.mul((x2 - x1), (y2 - y1))
(v, idx) = scores.sort(0)
idx = idx[(- top_k):]
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
count = 0
while (idx.numel() > 0):
i = idx[(- 1)]
keep[count] = i
count += 1
if (idx.size(0) == 1):
break
idx = idx[:(- 1)]
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = (xx2 - xx1)
h = (yy2 - yy1)
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = (w * h)
rem_areas = torch.index_select(area, 0, idx)
union = ((rem_areas - inter) + area[i])
IoU = (inter / union)
idx = idx[IoU.le(overlap)]
return (keep, count) | @staticmethod
def nms(boxes, scores, overlap=0.5, top_k=200):
'Apply non-maximum suppression at test time to avoid detecting too many\n overlapping bounding boxes for a given object.\n Args:\n boxes: (tensor) The location preds for the img, Shape: [num_priors,4].\n scores: (tensor) The class predscores for the img, Shape:[num_priors].\n overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n top_k: (int) The Maximum number of box preds to consider.\n Return:\n The indices of the kept boxes with respect to num_priors.\n '
keep = scores.new(scores.size(0)).zero_().long()
if (boxes.numel() == 0):
return keep
x1 = boxes[(:, 0)]
y1 = boxes[(:, 1)]
x2 = boxes[(:, 2)]
y2 = boxes[(:, 3)]
area = torch.mul((x2 - x1), (y2 - y1))
(v, idx) = scores.sort(0)
idx = idx[(- top_k):]
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
count = 0
while (idx.numel() > 0):
i = idx[(- 1)]
keep[count] = i
count += 1
if (idx.size(0) == 1):
break
idx = idx[:(- 1)]
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = (xx2 - xx1)
h = (yy2 - yy1)
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = (w * h)
rem_areas = torch.index_select(area, 0, idx)
union = ((rem_areas - inter) + area[i])
IoU = (inter / union)
idx = idx[IoU.le(overlap)]
return (keep, count)<|docstring|>Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.<|endoftext|> |
bb7afc8f55b6153b449a1d9ed6a75316f9fff51b7cf890ee251bd268a648a1a8 | def estep(X: np.ndarray, mixture: GaussianMixture) -> Tuple[(np.ndarray, float)]:
'E-step: Softly assigns each datapoint to a gaussian component\n\n Args:\n X: (n, d) array holding the data\n mixture: the current gaussian mixture\n\n Returns:\n np.ndarray: (n, K) array holding the soft counts\n for all components for all examples\n float: log-likelihood of the assignment\n '
(n, d) = X.shape
(k, _) = mixture.mu.shape
post = np.zeros([n, k])
ll = 0
p = mixture.p
m = mixture.mu
v = mixture.var
for (i, xx) in enumerate(X):
prob = np.zeros(k)
for j in range(k):
ex = (((- 1) / (2 * v[j])) * (np.linalg.norm((xx - m[j])) ** 2))
prob[j] = ((p[j] / np.power(((2 * np.pi) * v[j]), (d / 2))) * np.exp(ex))
ll += np.log(prob.sum())
prob = (prob / prob.sum())
post[(i, :)] = prob
return (post, ll)
raise NotImplementedError | E-step: Softly assigns each datapoint to a gaussian component
Args:
X: (n, d) array holding the data
mixture: the current gaussian mixture
Returns:
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the assignment | Statistical Method for Colaborative Filtering/em_simple.py | estep | arifmujib/MIT-Machine-Learning-Projects | 0 | python | def estep(X: np.ndarray, mixture: GaussianMixture) -> Tuple[(np.ndarray, float)]:
'E-step: Softly assigns each datapoint to a gaussian component\n\n Args:\n X: (n, d) array holding the data\n mixture: the current gaussian mixture\n\n Returns:\n np.ndarray: (n, K) array holding the soft counts\n for all components for all examples\n float: log-likelihood of the assignment\n '
(n, d) = X.shape
(k, _) = mixture.mu.shape
post = np.zeros([n, k])
ll = 0
p = mixture.p
m = mixture.mu
v = mixture.var
for (i, xx) in enumerate(X):
prob = np.zeros(k)
for j in range(k):
ex = (((- 1) / (2 * v[j])) * (np.linalg.norm((xx - m[j])) ** 2))
prob[j] = ((p[j] / np.power(((2 * np.pi) * v[j]), (d / 2))) * np.exp(ex))
ll += np.log(prob.sum())
prob = (prob / prob.sum())
post[(i, :)] = prob
return (post, ll)
raise NotImplementedError | def estep(X: np.ndarray, mixture: GaussianMixture) -> Tuple[(np.ndarray, float)]:
'E-step: Softly assigns each datapoint to a gaussian component\n\n Args:\n X: (n, d) array holding the data\n mixture: the current gaussian mixture\n\n Returns:\n np.ndarray: (n, K) array holding the soft counts\n for all components for all examples\n float: log-likelihood of the assignment\n '
(n, d) = X.shape
(k, _) = mixture.mu.shape
post = np.zeros([n, k])
ll = 0
p = mixture.p
m = mixture.mu
v = mixture.var
for (i, xx) in enumerate(X):
prob = np.zeros(k)
for j in range(k):
ex = (((- 1) / (2 * v[j])) * (np.linalg.norm((xx - m[j])) ** 2))
prob[j] = ((p[j] / np.power(((2 * np.pi) * v[j]), (d / 2))) * np.exp(ex))
ll += np.log(prob.sum())
prob = (prob / prob.sum())
post[(i, :)] = prob
return (post, ll)
raise NotImplementedError<|docstring|>E-step: Softly assigns each datapoint to a gaussian component
Args:
X: (n, d) array holding the data
mixture: the current gaussian mixture
Returns:
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the assignment<|endoftext|> |
e22b59d74b7d155010597d20f62313aa22cbe16ab44319a6db3828424f2f99fe | def mstep(X: np.ndarray, post: np.ndarray) -> GaussianMixture:
'M-step: Updates the gaussian mixture by maximizing the log-likelihood\n of the weighted dataset\n\n Args:\n X: (n, d) array holding the data\n post: (n, K) array holding the soft counts\n for all components for all examples\n\n Returns:\n GaussianMixture: the new gaussian mixture\n '
(n, d) = X.shape
(_, k) = post.shape
mu = np.zeros((k, d))
var = np.ones(k)
n_hat = post.sum(axis=0)
p = (n_hat / n)
px = np.dot(post.T, X)
for i in range(k):
mu[i] = (px[i] / n_hat[i])
p = ((1 / n) * np.sum(post, axis=0))
su = 0
for i in range(n):
mu_h = np.zeros_like(var)
for j in range(k):
mu_h[j] = (np.linalg.norm((X[i] - mu[j])) ** 2)
nu = (post[i] * mu_h)
su += nu
var = (su / (d * np.sum(post, axis=0)))
return GaussianMixture(mu=mu, var=var, p=p)
raise NotImplementedError | M-step: Updates the gaussian mixture by maximizing the log-likelihood
of the weighted dataset
Args:
X: (n, d) array holding the data
post: (n, K) array holding the soft counts
for all components for all examples
Returns:
GaussianMixture: the new gaussian mixture | Statistical Method for Colaborative Filtering/em_simple.py | mstep | arifmujib/MIT-Machine-Learning-Projects | 0 | python | def mstep(X: np.ndarray, post: np.ndarray) -> GaussianMixture:
'M-step: Updates the gaussian mixture by maximizing the log-likelihood\n of the weighted dataset\n\n Args:\n X: (n, d) array holding the data\n post: (n, K) array holding the soft counts\n for all components for all examples\n\n Returns:\n GaussianMixture: the new gaussian mixture\n '
(n, d) = X.shape
(_, k) = post.shape
mu = np.zeros((k, d))
var = np.ones(k)
n_hat = post.sum(axis=0)
p = (n_hat / n)
px = np.dot(post.T, X)
for i in range(k):
mu[i] = (px[i] / n_hat[i])
p = ((1 / n) * np.sum(post, axis=0))
su = 0
for i in range(n):
mu_h = np.zeros_like(var)
for j in range(k):
mu_h[j] = (np.linalg.norm((X[i] - mu[j])) ** 2)
nu = (post[i] * mu_h)
su += nu
var = (su / (d * np.sum(post, axis=0)))
return GaussianMixture(mu=mu, var=var, p=p)
raise NotImplementedError | def mstep(X: np.ndarray, post: np.ndarray) -> GaussianMixture:
'M-step: Updates the gaussian mixture by maximizing the log-likelihood\n of the weighted dataset\n\n Args:\n X: (n, d) array holding the data\n post: (n, K) array holding the soft counts\n for all components for all examples\n\n Returns:\n GaussianMixture: the new gaussian mixture\n '
(n, d) = X.shape
(_, k) = post.shape
mu = np.zeros((k, d))
var = np.ones(k)
n_hat = post.sum(axis=0)
p = (n_hat / n)
px = np.dot(post.T, X)
for i in range(k):
mu[i] = (px[i] / n_hat[i])
p = ((1 / n) * np.sum(post, axis=0))
su = 0
for i in range(n):
mu_h = np.zeros_like(var)
for j in range(k):
mu_h[j] = (np.linalg.norm((X[i] - mu[j])) ** 2)
nu = (post[i] * mu_h)
su += nu
var = (su / (d * np.sum(post, axis=0)))
return GaussianMixture(mu=mu, var=var, p=p)
raise NotImplementedError<|docstring|>M-step: Updates the gaussian mixture by maximizing the log-likelihood
of the weighted dataset
Args:
X: (n, d) array holding the data
post: (n, K) array holding the soft counts
for all components for all examples
Returns:
GaussianMixture: the new gaussian mixture<|endoftext|> |
a80b44ed1d8d7cb7dba5979a5a509f76d2833b9329ff86af92f192a0389d565c | def run(X: np.ndarray, mixture: GaussianMixture, post: np.ndarray) -> Tuple[(GaussianMixture, np.ndarray, float)]:
'Runs the mixture model\n\n Args:\n X: (n, d) array holding the data\n post: (n, K) array holding the soft counts\n for all components for all examples\n\n Returns:\n GaussianMixture: the new gaussian mixture\n np.ndarray: (n, K) array holding the soft counts\n for all components for all examples\n float: log-likelihood of the current assignment\n '
prev_ll = None
ll = None
while ((prev_ll is None) or ((ll - prev_ll) > (1e-06 * abs(ll)))):
prev_ll = ll
(post, ll) = estep(X, mixture)
mixture = mstep(X, post)
return (mixture, post, ll)
raise NotImplementedError | Runs the mixture model
Args:
X: (n, d) array holding the data
post: (n, K) array holding the soft counts
for all components for all examples
Returns:
GaussianMixture: the new gaussian mixture
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the current assignment | Statistical Method for Colaborative Filtering/em_simple.py | run | arifmujib/MIT-Machine-Learning-Projects | 0 | python | def run(X: np.ndarray, mixture: GaussianMixture, post: np.ndarray) -> Tuple[(GaussianMixture, np.ndarray, float)]:
'Runs the mixture model\n\n Args:\n X: (n, d) array holding the data\n post: (n, K) array holding the soft counts\n for all components for all examples\n\n Returns:\n GaussianMixture: the new gaussian mixture\n np.ndarray: (n, K) array holding the soft counts\n for all components for all examples\n float: log-likelihood of the current assignment\n '
prev_ll = None
ll = None
while ((prev_ll is None) or ((ll - prev_ll) > (1e-06 * abs(ll)))):
prev_ll = ll
(post, ll) = estep(X, mixture)
mixture = mstep(X, post)
return (mixture, post, ll)
raise NotImplementedError | def run(X: np.ndarray, mixture: GaussianMixture, post: np.ndarray) -> Tuple[(GaussianMixture, np.ndarray, float)]:
'Runs the mixture model\n\n Args:\n X: (n, d) array holding the data\n post: (n, K) array holding the soft counts\n for all components for all examples\n\n Returns:\n GaussianMixture: the new gaussian mixture\n np.ndarray: (n, K) array holding the soft counts\n for all components for all examples\n float: log-likelihood of the current assignment\n '
prev_ll = None
ll = None
while ((prev_ll is None) or ((ll - prev_ll) > (1e-06 * abs(ll)))):
prev_ll = ll
(post, ll) = estep(X, mixture)
mixture = mstep(X, post)
return (mixture, post, ll)
raise NotImplementedError<|docstring|>Runs the mixture model
Args:
X: (n, d) array holding the data
post: (n, K) array holding the soft counts
for all components for all examples
Returns:
GaussianMixture: the new gaussian mixture
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the current assignment<|endoftext|> |
105ed72bdb04f3dc280d8069b074065171fc22195edce4d891efb90a56b4edf8 | def ksg_cmi(x_data, y_data, z_data, k=5):
'\n KSG Conditional Mutual Information Estimator: I(X;Y|Z)\n See e.g. http://proceedings.mlr.press/v84/runge18a.html\n\n x_data: data with shape (num_samples, x_dim) or (num_samples,)\n y_data: data with shape (num_samples, y_dim) or (num_samples,)\n z_data: conditioning data with shape (num_samples, z_dim) or (num_samples,)\n k: number of nearest neighbors for estimation\n * k recommended to be on the order of ~ num_samples/10 for independence testing\n '
xzy_data = np.concatenate(((x_data.reshape((- 1), 1) if (x_data.ndim == 1) else x_data), (z_data.reshape((- 1), 1) if (z_data.ndim == 1) else z_data), (y_data.reshape((- 1), 1) if (y_data.ndim == 1) else y_data)), axis=1)
lookup = NearestNeighbors(metric='chebyshev')
lookup.fit(xzy_data)
radius = lookup.kneighbors(n_neighbors=k, return_distance=True)[0]
radius = np.nextafter(radius[(:, (- 1))], 0)
x_dim = (x_data.shape[1] if (x_data.ndim > 1) else 1)
z_dim = (z_data.shape[1] if (z_data.ndim > 1) else 1)
lookup.fit(xzy_data[(:, :(x_dim + z_dim))])
n_xz = np.array([i.size for i in lookup.radius_neighbors(radius=radius, return_distance=False)])
lookup.fit(xzy_data[(:, x_dim:)])
n_yz = np.array([i.size for i in lookup.radius_neighbors(radius=radius, return_distance=False)])
lookup.fit(xzy_data[(:, x_dim:(x_dim + z_dim))])
n_z = np.array([i.size for i in lookup.radius_neighbors(radius=radius, return_distance=False)])
return (digamma(k) + np.mean(((digamma((n_z + 1.0)) - digamma((n_xz + 1.0))) - digamma((n_yz + 1.0))))) | KSG Conditional Mutual Information Estimator: I(X;Y|Z)
See e.g. http://proceedings.mlr.press/v84/runge18a.html
x_data: data with shape (num_samples, x_dim) or (num_samples,)
y_data: data with shape (num_samples, y_dim) or (num_samples,)
z_data: conditioning data with shape (num_samples, z_dim) or (num_samples,)
k: number of nearest neighbors for estimation
* k recommended to be on the order of ~ num_samples/10 for independence testing | pycit/estimators/ksg_cmi.py | ksg_cmi | syanga/pycit | 10 | python | def ksg_cmi(x_data, y_data, z_data, k=5):
'\n KSG Conditional Mutual Information Estimator: I(X;Y|Z)\n See e.g. http://proceedings.mlr.press/v84/runge18a.html\n\n x_data: data with shape (num_samples, x_dim) or (num_samples,)\n y_data: data with shape (num_samples, y_dim) or (num_samples,)\n z_data: conditioning data with shape (num_samples, z_dim) or (num_samples,)\n k: number of nearest neighbors for estimation\n * k recommended to be on the order of ~ num_samples/10 for independence testing\n '
xzy_data = np.concatenate(((x_data.reshape((- 1), 1) if (x_data.ndim == 1) else x_data), (z_data.reshape((- 1), 1) if (z_data.ndim == 1) else z_data), (y_data.reshape((- 1), 1) if (y_data.ndim == 1) else y_data)), axis=1)
lookup = NearestNeighbors(metric='chebyshev')
lookup.fit(xzy_data)
radius = lookup.kneighbors(n_neighbors=k, return_distance=True)[0]
radius = np.nextafter(radius[(:, (- 1))], 0)
x_dim = (x_data.shape[1] if (x_data.ndim > 1) else 1)
z_dim = (z_data.shape[1] if (z_data.ndim > 1) else 1)
lookup.fit(xzy_data[(:, :(x_dim + z_dim))])
n_xz = np.array([i.size for i in lookup.radius_neighbors(radius=radius, return_distance=False)])
lookup.fit(xzy_data[(:, x_dim:)])
n_yz = np.array([i.size for i in lookup.radius_neighbors(radius=radius, return_distance=False)])
lookup.fit(xzy_data[(:, x_dim:(x_dim + z_dim))])
n_z = np.array([i.size for i in lookup.radius_neighbors(radius=radius, return_distance=False)])
return (digamma(k) + np.mean(((digamma((n_z + 1.0)) - digamma((n_xz + 1.0))) - digamma((n_yz + 1.0))))) | def ksg_cmi(x_data, y_data, z_data, k=5):
'\n KSG Conditional Mutual Information Estimator: I(X;Y|Z)\n See e.g. http://proceedings.mlr.press/v84/runge18a.html\n\n x_data: data with shape (num_samples, x_dim) or (num_samples,)\n y_data: data with shape (num_samples, y_dim) or (num_samples,)\n z_data: conditioning data with shape (num_samples, z_dim) or (num_samples,)\n k: number of nearest neighbors for estimation\n * k recommended to be on the order of ~ num_samples/10 for independence testing\n '
xzy_data = np.concatenate(((x_data.reshape((- 1), 1) if (x_data.ndim == 1) else x_data), (z_data.reshape((- 1), 1) if (z_data.ndim == 1) else z_data), (y_data.reshape((- 1), 1) if (y_data.ndim == 1) else y_data)), axis=1)
lookup = NearestNeighbors(metric='chebyshev')
lookup.fit(xzy_data)
radius = lookup.kneighbors(n_neighbors=k, return_distance=True)[0]
radius = np.nextafter(radius[(:, (- 1))], 0)
x_dim = (x_data.shape[1] if (x_data.ndim > 1) else 1)
z_dim = (z_data.shape[1] if (z_data.ndim > 1) else 1)
lookup.fit(xzy_data[(:, :(x_dim + z_dim))])
n_xz = np.array([i.size for i in lookup.radius_neighbors(radius=radius, return_distance=False)])
lookup.fit(xzy_data[(:, x_dim:)])
n_yz = np.array([i.size for i in lookup.radius_neighbors(radius=radius, return_distance=False)])
lookup.fit(xzy_data[(:, x_dim:(x_dim + z_dim))])
n_z = np.array([i.size for i in lookup.radius_neighbors(radius=radius, return_distance=False)])
return (digamma(k) + np.mean(((digamma((n_z + 1.0)) - digamma((n_xz + 1.0))) - digamma((n_yz + 1.0)))))<|docstring|>KSG Conditional Mutual Information Estimator: I(X;Y|Z)
See e.g. http://proceedings.mlr.press/v84/runge18a.html
x_data: data with shape (num_samples, x_dim) or (num_samples,)
y_data: data with shape (num_samples, y_dim) or (num_samples,)
z_data: conditioning data with shape (num_samples, z_dim) or (num_samples,)
k: number of nearest neighbors for estimation
* k recommended to be on the order of ~ num_samples/10 for independence testing<|endoftext|> |
baeabc73d3136570af6251ab40da854276dc84ab372f0bc2475578633c8c66ac | def plot_pfilter(time, expected, observed, particles, weights, means):
'Apply a particle filter to a time series, and plot the\n first component of the predictions alongside the expected\n output.'
plt.plot(time, expected, 'C1', lw=3)
plt.plot(time, observed, '+C3', lw=3)
ts = np.tile(time[(:, None)], particles.shape[1]).ravel()
weights = weights.ravel()
rgba_colors = np.zeros((len(weights), 4))
rgba_colors[(:, 0:3)] = matplotlib.colors.to_rgb('C2')
weights *= 10
rgba_colors[(:, 3)] = np.clip(weights, 0, 1)
plt.scatter(ts, particles[(:, :, 0)].ravel(), c=rgba_colors, s=(weights * 200))
plt.plot(time, means, 'C0--', lw=2)
plt.legend(['True', 'Observed', 'Mean estimate', 'Particle'])
plt.xlabel('Time')
plt.ylabel('X')
plt.title('Particle filter estimate') | Apply a particle filter to a time series, and plot the
first component of the predictions alongside the expected
output. | dynamic/particle_utils.py | plot_pfilter | johnhw/summerschool2017 | 6 | python | def plot_pfilter(time, expected, observed, particles, weights, means):
'Apply a particle filter to a time series, and plot the\n first component of the predictions alongside the expected\n output.'
plt.plot(time, expected, 'C1', lw=3)
plt.plot(time, observed, '+C3', lw=3)
ts = np.tile(time[(:, None)], particles.shape[1]).ravel()
weights = weights.ravel()
rgba_colors = np.zeros((len(weights), 4))
rgba_colors[(:, 0:3)] = matplotlib.colors.to_rgb('C2')
weights *= 10
rgba_colors[(:, 3)] = np.clip(weights, 0, 1)
plt.scatter(ts, particles[(:, :, 0)].ravel(), c=rgba_colors, s=(weights * 200))
plt.plot(time, means, 'C0--', lw=2)
plt.legend(['True', 'Observed', 'Mean estimate', 'Particle'])
plt.xlabel('Time')
plt.ylabel('X')
plt.title('Particle filter estimate') | def plot_pfilter(time, expected, observed, particles, weights, means):
'Apply a particle filter to a time series, and plot the\n first component of the predictions alongside the expected\n output.'
plt.plot(time, expected, 'C1', lw=3)
plt.plot(time, observed, '+C3', lw=3)
ts = np.tile(time[(:, None)], particles.shape[1]).ravel()
weights = weights.ravel()
rgba_colors = np.zeros((len(weights), 4))
rgba_colors[(:, 0:3)] = matplotlib.colors.to_rgb('C2')
weights *= 10
rgba_colors[(:, 3)] = np.clip(weights, 0, 1)
plt.scatter(ts, particles[(:, :, 0)].ravel(), c=rgba_colors, s=(weights * 200))
plt.plot(time, means, 'C0--', lw=2)
plt.legend(['True', 'Observed', 'Mean estimate', 'Particle'])
plt.xlabel('Time')
plt.ylabel('X')
plt.title('Particle filter estimate')<|docstring|>Apply a particle filter to a time series, and plot the
first component of the predictions alongside the expected
output.<|endoftext|> |
66eb7c8e5c20905fa00e8b271c0774102646afffcbeb206cb2242ed09b661ed2 | def test_mia_run(self):
'"This tests the run method of MIA with a reduced data set'
(traces, keys, plain) = FileLoader.main(CONST_DEFAULT_TRACES_FILE, CONST_DEFAULT_KEYS_FILE, CONST_DEFAULT_PLAIN_FILE)
(profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext) = DataPartitioner.get_traces(traces, keys, plain, 1000, 0, 0, 1000, 10, 1, 0, False)
expected = np.array([43, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertTrue(np.array_equal(Mia.run(profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext, 1, 0, 0, 1, 10, True), expected)) | "This tests the run method of MIA with a reduced data set | tests/attack/test_mia_integration.py | test_mia_run | AISyLab/side-channel-attacks | 14 | python | def test_mia_run(self):
(traces, keys, plain) = FileLoader.main(CONST_DEFAULT_TRACES_FILE, CONST_DEFAULT_KEYS_FILE, CONST_DEFAULT_PLAIN_FILE)
(profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext) = DataPartitioner.get_traces(traces, keys, plain, 1000, 0, 0, 1000, 10, 1, 0, False)
expected = np.array([43, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertTrue(np.array_equal(Mia.run(profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext, 1, 0, 0, 1, 10, True), expected)) | def test_mia_run(self):
(traces, keys, plain) = FileLoader.main(CONST_DEFAULT_TRACES_FILE, CONST_DEFAULT_KEYS_FILE, CONST_DEFAULT_PLAIN_FILE)
(profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext) = DataPartitioner.get_traces(traces, keys, plain, 1000, 0, 0, 1000, 10, 1, 0, False)
expected = np.array([43, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertTrue(np.array_equal(Mia.run(profiling_traces, profiling_keys, profiling_plaintext, attack_traces, attack_keys, attack_plaintext, 1, 0, 0, 1, 10, True), expected))<|docstring|>"This tests the run method of MIA with a reduced data set<|endoftext|> |
ca1149be38092976dd554c734b0958160fd009550b59551e4526c2e9660de017 | @functools.lru_cache()
def get_backend():
'Return current backend.'
return {'file': FileBackend, 's3': S3Backend}[app.config['STORAGE_BACKEND']['name']]() | Return current backend. | fluffy/component/backends.py | get_backend | fawaf/fluffy | 135 | python | @functools.lru_cache()
def get_backend():
return {'file': FileBackend, 's3': S3Backend}[app.config['STORAGE_BACKEND']['name']]() | @functools.lru_cache()
def get_backend():
return {'file': FileBackend, 's3': S3Backend}[app.config['STORAGE_BACKEND']['name']]()<|docstring|>Return current backend.<|endoftext|> |
1a337dae630dac627863a5f8d99be0ba67fc88a39db3683d7db99946ea53dddf | def test_list_forms_data(admin_user):
'Should return the correct fields about the forms.'
form = EventFormFactory()
field = form.fields.first()
option = field.options.first()
client = get_api_client(user=admin_user)
url = (_get_forms_url() + '?all')
response = client.get(url)
response = response.json()
assert (response[0] == {'id': str(form.id), 'resource_type': 'EventForm', 'title': form.title, 'event': EventListSerializer(form.event).data, 'type': form.type.name, 'viewer_has_answered': False, 'fields': [{'id': str(field.id), 'title': field.title, 'options': [{'id': str(option.id), 'title': option.title, 'order': option.order}], 'type': field.type.name, 'required': field.required, 'order': field.order}], 'template': False}) | Should return the correct fields about the forms. | app/tests/forms/test_eventform_integration.py | test_list_forms_data | TIHLDE/Lepton | 7 | python | def test_list_forms_data(admin_user):
form = EventFormFactory()
field = form.fields.first()
option = field.options.first()
client = get_api_client(user=admin_user)
url = (_get_forms_url() + '?all')
response = client.get(url)
response = response.json()
assert (response[0] == {'id': str(form.id), 'resource_type': 'EventForm', 'title': form.title, 'event': EventListSerializer(form.event).data, 'type': form.type.name, 'viewer_has_answered': False, 'fields': [{'id': str(field.id), 'title': field.title, 'options': [{'id': str(option.id), 'title': option.title, 'order': option.order}], 'type': field.type.name, 'required': field.required, 'order': field.order}], 'template': False}) | def test_list_forms_data(admin_user):
form = EventFormFactory()
field = form.fields.first()
option = field.options.first()
client = get_api_client(user=admin_user)
url = (_get_forms_url() + '?all')
response = client.get(url)
response = response.json()
assert (response[0] == {'id': str(form.id), 'resource_type': 'EventForm', 'title': form.title, 'event': EventListSerializer(form.event).data, 'type': form.type.name, 'viewer_has_answered': False, 'fields': [{'id': str(field.id), 'title': field.title, 'options': [{'id': str(option.id), 'title': option.title, 'order': option.order}], 'type': field.type.name, 'required': field.required, 'order': field.order}], 'template': False})<|docstring|>Should return the correct fields about the forms.<|endoftext|> |
fc60472e02e3df846426609678a97dece5afe7a257ee07ed3bbdfc3e97d86f17 | def test_retrieve_evaluation_event_form_as_member_when_has_attended_event(member):
'\n A member should be able to retrieve an event form of type evaluation if\n they has attended the event.\n '
event = EventFactory(limit=1)
registration = RegistrationFactory(user=member, event=event, is_on_wait=False, has_attended=True)
form = EventFormFactory(event=registration.event, type=EventFormType.EVALUATION)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
response = client.get(url)
assert (response.status_code == status.HTTP_200_OK)
assert response.json() | A member should be able to retrieve an event form of type evaluation if
they has attended the event. | app/tests/forms/test_eventform_integration.py | test_retrieve_evaluation_event_form_as_member_when_has_attended_event | TIHLDE/Lepton | 7 | python | def test_retrieve_evaluation_event_form_as_member_when_has_attended_event(member):
'\n A member should be able to retrieve an event form of type evaluation if\n they has attended the event.\n '
event = EventFactory(limit=1)
registration = RegistrationFactory(user=member, event=event, is_on_wait=False, has_attended=True)
form = EventFormFactory(event=registration.event, type=EventFormType.EVALUATION)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
response = client.get(url)
assert (response.status_code == status.HTTP_200_OK)
assert response.json() | def test_retrieve_evaluation_event_form_as_member_when_has_attended_event(member):
'\n A member should be able to retrieve an event form of type evaluation if\n they has attended the event.\n '
event = EventFactory(limit=1)
registration = RegistrationFactory(user=member, event=event, is_on_wait=False, has_attended=True)
form = EventFormFactory(event=registration.event, type=EventFormType.EVALUATION)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
response = client.get(url)
assert (response.status_code == status.HTTP_200_OK)
assert response.json()<|docstring|>A member should be able to retrieve an event form of type evaluation if
they has attended the event.<|endoftext|> |
306a220bae60b0c43005d0e20868255a2420e1ce928c59d4ae0697200f8cbd73 | def test_retrieve_evaluation_event_form_as_member_when_has_not_attended_event(member):
'A member should not be able to retrieve an event evaluation form if they have not attended the event.'
event = EventFactory(limit=1)
registration = RegistrationFactory(user=member, event=event, is_on_wait=False, has_attended=False)
form = EventFormFactory(event=registration.event, type=EventFormType.EVALUATION)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
response = client.get(url)
assert (response.status_code == status.HTTP_403_FORBIDDEN) | A member should not be able to retrieve an event evaluation form if they have not attended the event. | app/tests/forms/test_eventform_integration.py | test_retrieve_evaluation_event_form_as_member_when_has_not_attended_event | TIHLDE/Lepton | 7 | python | def test_retrieve_evaluation_event_form_as_member_when_has_not_attended_event(member):
event = EventFactory(limit=1)
registration = RegistrationFactory(user=member, event=event, is_on_wait=False, has_attended=False)
form = EventFormFactory(event=registration.event, type=EventFormType.EVALUATION)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
response = client.get(url)
assert (response.status_code == status.HTTP_403_FORBIDDEN) | def test_retrieve_evaluation_event_form_as_member_when_has_not_attended_event(member):
event = EventFactory(limit=1)
registration = RegistrationFactory(user=member, event=event, is_on_wait=False, has_attended=False)
form = EventFormFactory(event=registration.event, type=EventFormType.EVALUATION)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
response = client.get(url)
assert (response.status_code == status.HTTP_403_FORBIDDEN)<|docstring|>A member should not be able to retrieve an event evaluation form if they have not attended the event.<|endoftext|> |
0e1145af89e6a8734f46476e43388afc1293f6d8ebfa41b798f1450742177676 | @permission_params
def test_create_event_form_as_admin(permission_test_util):
'An admin should be able to create an event form.'
(member, event, expected_create_status_code, expected_update_delete_status_code) = permission_test_util
form = EventFormFactory.build()
client = get_api_client(user=member)
url = _get_forms_url()
response = client.post(url, _get_event_form_post_data(form, event))
assert (response.status_code == expected_create_status_code)
if (expected_create_status_code == status.HTTP_201_CREATED):
assert event.forms.filter(title=form.title).exists() | An admin should be able to create an event form. | app/tests/forms/test_eventform_integration.py | test_create_event_form_as_admin | TIHLDE/Lepton | 7 | python | @permission_params
def test_create_event_form_as_admin(permission_test_util):
(member, event, expected_create_status_code, expected_update_delete_status_code) = permission_test_util
form = EventFormFactory.build()
client = get_api_client(user=member)
url = _get_forms_url()
response = client.post(url, _get_event_form_post_data(form, event))
assert (response.status_code == expected_create_status_code)
if (expected_create_status_code == status.HTTP_201_CREATED):
assert event.forms.filter(title=form.title).exists() | @permission_params
def test_create_event_form_as_admin(permission_test_util):
(member, event, expected_create_status_code, expected_update_delete_status_code) = permission_test_util
form = EventFormFactory.build()
client = get_api_client(user=member)
url = _get_forms_url()
response = client.post(url, _get_event_form_post_data(form, event))
assert (response.status_code == expected_create_status_code)
if (expected_create_status_code == status.HTTP_201_CREATED):
assert event.forms.filter(title=form.title).exists()<|docstring|>An admin should be able to create an event form.<|endoftext|> |
e6655269e921a7b3101e578dee4fa7842c67af36c1cb0b3ab61958ffc896a9ad | @permission_params
def test_update_event_form_as_admin(permission_test_util):
'An admin should be able to update an event form.'
(member, event, _, expected_update_delete_status_code) = permission_test_util
form = EventFormFactory(event=event)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
new_title = 'New form title'
response = client.put(url, _get_event_form_update_data(form, new_title))
assert (response.status_code == expected_update_delete_status_code)
if (expected_update_delete_status_code == status.HTTP_200_OK):
assert event.forms.filter(title=new_title).exists() | An admin should be able to update an event form. | app/tests/forms/test_eventform_integration.py | test_update_event_form_as_admin | TIHLDE/Lepton | 7 | python | @permission_params
def test_update_event_form_as_admin(permission_test_util):
(member, event, _, expected_update_delete_status_code) = permission_test_util
form = EventFormFactory(event=event)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
new_title = 'New form title'
response = client.put(url, _get_event_form_update_data(form, new_title))
assert (response.status_code == expected_update_delete_status_code)
if (expected_update_delete_status_code == status.HTTP_200_OK):
assert event.forms.filter(title=new_title).exists() | @permission_params
def test_update_event_form_as_admin(permission_test_util):
(member, event, _, expected_update_delete_status_code) = permission_test_util
form = EventFormFactory(event=event)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
new_title = 'New form title'
response = client.put(url, _get_event_form_update_data(form, new_title))
assert (response.status_code == expected_update_delete_status_code)
if (expected_update_delete_status_code == status.HTTP_200_OK):
assert event.forms.filter(title=new_title).exists()<|docstring|>An admin should be able to update an event form.<|endoftext|> |
5506199c3852019cdc9760aa6c7dae372670b702a2d9cdf974c70c38197b9772 | @permission_params
def test_delete_event_form_as_admin(permission_test_util):
'An admin should be able to delete an event form.'
(member, event, _, expected_update_delete_status_code) = permission_test_util
form = EventFormFactory(event=event)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
response = client.delete(url)
assert (response.status_code == expected_update_delete_status_code)
if (expected_update_delete_status_code == status.HTTP_200_OK):
assert (not event.forms.filter(title=form.title).exists()) | An admin should be able to delete an event form. | app/tests/forms/test_eventform_integration.py | test_delete_event_form_as_admin | TIHLDE/Lepton | 7 | python | @permission_params
def test_delete_event_form_as_admin(permission_test_util):
(member, event, _, expected_update_delete_status_code) = permission_test_util
form = EventFormFactory(event=event)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
response = client.delete(url)
assert (response.status_code == expected_update_delete_status_code)
if (expected_update_delete_status_code == status.HTTP_200_OK):
assert (not event.forms.filter(title=form.title).exists()) | @permission_params
def test_delete_event_form_as_admin(permission_test_util):
(member, event, _, expected_update_delete_status_code) = permission_test_util
form = EventFormFactory(event=event)
client = get_api_client(user=member)
url = _get_form_detail_url(form)
response = client.delete(url)
assert (response.status_code == expected_update_delete_status_code)
if (expected_update_delete_status_code == status.HTTP_200_OK):
assert (not event.forms.filter(title=form.title).exists())<|docstring|>An admin should be able to delete an event form.<|endoftext|> |
05839a9a07b6b6fd8ecbae81fe12a3ddbbaf4c769fe720d8e6c45f473d5ac409 | def eval(model, data_loader, criterion):
'\n Function for evaluation step\n\n Args:\n model ([]): tranformer model\n data_loader (BucketIterator): data_loader to evaluate\n criterion (Loss Object): criterion to calculate the loss \n '
losses = []
with torch.no_grad():
for batch in data_loader:
(src, trg) = (batch.src, batch.trg)
(batch_size, trg_len) = (trg.shape[0], trg.shape[1])
outputs = model(src, trg)
l = criterion(outputs.view((batch_size * trg_len), (- 1)), trg.type_as(outputs).view((- 1)))
losses.append(l.item())
loss = (sum(losses) / len(losses))
ppl = torch.exp(torch.tensor(loss)).item()
return (loss, ppl) | Function for evaluation step
Args:
model ([]): tranformer model
data_loader (BucketIterator): data_loader to evaluate
criterion (Loss Object): criterion to calculate the loss | notebooks/train.py | eval | macabdul9/transformers | 3 | python | def eval(model, data_loader, criterion):
'\n Function for evaluation step\n\n Args:\n model ([]): tranformer model\n data_loader (BucketIterator): data_loader to evaluate\n criterion (Loss Object): criterion to calculate the loss \n '
losses = []
with torch.no_grad():
for batch in data_loader:
(src, trg) = (batch.src, batch.trg)
(batch_size, trg_len) = (trg.shape[0], trg.shape[1])
outputs = model(src, trg)
l = criterion(outputs.view((batch_size * trg_len), (- 1)), trg.type_as(outputs).view((- 1)))
losses.append(l.item())
loss = (sum(losses) / len(losses))
ppl = torch.exp(torch.tensor(loss)).item()
return (loss, ppl) | def eval(model, data_loader, criterion):
'\n Function for evaluation step\n\n Args:\n model ([]): tranformer model\n data_loader (BucketIterator): data_loader to evaluate\n criterion (Loss Object): criterion to calculate the loss \n '
losses = []
with torch.no_grad():
for batch in data_loader:
(src, trg) = (batch.src, batch.trg)
(batch_size, trg_len) = (trg.shape[0], trg.shape[1])
outputs = model(src, trg)
l = criterion(outputs.view((batch_size * trg_len), (- 1)), trg.type_as(outputs).view((- 1)))
losses.append(l.item())
loss = (sum(losses) / len(losses))
ppl = torch.exp(torch.tensor(loss)).item()
return (loss, ppl)<|docstring|>Function for evaluation step
Args:
model ([]): tranformer model
data_loader (BucketIterator): data_loader to evaluate
criterion (Loss Object): criterion to calculate the loss<|endoftext|> |
d0f99490376cc9cefc08eb0175637e6a3d1962430614d735411303615b4fa2f8 | def train(model, train_loader, val_loader, criterion, optimizer, epochs=10):
'\n \n Function to train the model\n\n Args:\n model (nn.Module): model \n train_loader (B): [description]\n val_loader ([type]): [description]\n criterion ([type]): [description]\n optimizer ([type]): [description]\n epochs (int, optional): [description]. Defaults to 10.\n '
epoch_progress = tqdm(total=epochs, desc='Epoch', position=0)
total_steps = (len(train_loader) * epochs)
steps = 0
for epoch in range(epochs):
train_loss = []
for batch in tqdm(train_loader):
(src, trg) = (batch.src, batch.trg)
(batch_size, trg_len) = (batch.trg.shape[0], batch.trg.shape[1])
outputs = model(src, trg)
loss = criterion(outputs.view((batch_size * trg_len), (- 1)), trg.type_as(outputs).view((- 1)))
loss.backward()
optimizer.step()
optimizer.zero_grad()
if ((steps % (len(train_loader) // 2)) == 0):
ppl = torch.exp(loss).item()
print(f'Steps {steps}/{total_steps} | Train_loss {loss.item():.4f} | Train_ppl {ppl:.4f}')
train_loss.append(loss.item())
steps += 1
avg_loss = (sum(train_loss) / len(train_loss))
avg_ppl = torch.exp(torch.tensor([avg_loss]))
(val_loss, val_ppl) = eval(model, val_loader, criterion)
print(f'Epoch {epoch}/{epochs} | Train_loss {avg_loss:.4f} | Train_ppl {avg_ppl:.4f} | Val_loss {val_loss:.4f} | Val_ppl {val_ppl:.4f}')
epoch_progress.update(1) | Function to train the model
Args:
model (nn.Module): model
train_loader (B): [description]
val_loader ([type]): [description]
criterion ([type]): [description]
optimizer ([type]): [description]
epochs (int, optional): [description]. Defaults to 10. | notebooks/train.py | train | macabdul9/transformers | 3 | python | def train(model, train_loader, val_loader, criterion, optimizer, epochs=10):
'\n \n Function to train the model\n\n Args:\n model (nn.Module): model \n train_loader (B): [description]\n val_loader ([type]): [description]\n criterion ([type]): [description]\n optimizer ([type]): [description]\n epochs (int, optional): [description]. Defaults to 10.\n '
epoch_progress = tqdm(total=epochs, desc='Epoch', position=0)
total_steps = (len(train_loader) * epochs)
steps = 0
for epoch in range(epochs):
train_loss = []
for batch in tqdm(train_loader):
(src, trg) = (batch.src, batch.trg)
(batch_size, trg_len) = (batch.trg.shape[0], batch.trg.shape[1])
outputs = model(src, trg)
loss = criterion(outputs.view((batch_size * trg_len), (- 1)), trg.type_as(outputs).view((- 1)))
loss.backward()
optimizer.step()
optimizer.zero_grad()
if ((steps % (len(train_loader) // 2)) == 0):
ppl = torch.exp(loss).item()
print(f'Steps {steps}/{total_steps} | Train_loss {loss.item():.4f} | Train_ppl {ppl:.4f}')
train_loss.append(loss.item())
steps += 1
avg_loss = (sum(train_loss) / len(train_loss))
avg_ppl = torch.exp(torch.tensor([avg_loss]))
(val_loss, val_ppl) = eval(model, val_loader, criterion)
print(f'Epoch {epoch}/{epochs} | Train_loss {avg_loss:.4f} | Train_ppl {avg_ppl:.4f} | Val_loss {val_loss:.4f} | Val_ppl {val_ppl:.4f}')
epoch_progress.update(1) | def train(model, train_loader, val_loader, criterion, optimizer, epochs=10):
'\n \n Function to train the model\n\n Args:\n model (nn.Module): model \n train_loader (B): [description]\n val_loader ([type]): [description]\n criterion ([type]): [description]\n optimizer ([type]): [description]\n epochs (int, optional): [description]. Defaults to 10.\n '
epoch_progress = tqdm(total=epochs, desc='Epoch', position=0)
total_steps = (len(train_loader) * epochs)
steps = 0
for epoch in range(epochs):
train_loss = []
for batch in tqdm(train_loader):
(src, trg) = (batch.src, batch.trg)
(batch_size, trg_len) = (batch.trg.shape[0], batch.trg.shape[1])
outputs = model(src, trg)
loss = criterion(outputs.view((batch_size * trg_len), (- 1)), trg.type_as(outputs).view((- 1)))
loss.backward()
optimizer.step()
optimizer.zero_grad()
if ((steps % (len(train_loader) // 2)) == 0):
ppl = torch.exp(loss).item()
print(f'Steps {steps}/{total_steps} | Train_loss {loss.item():.4f} | Train_ppl {ppl:.4f}')
train_loss.append(loss.item())
steps += 1
avg_loss = (sum(train_loss) / len(train_loss))
avg_ppl = torch.exp(torch.tensor([avg_loss]))
(val_loss, val_ppl) = eval(model, val_loader, criterion)
print(f'Epoch {epoch}/{epochs} | Train_loss {avg_loss:.4f} | Train_ppl {avg_ppl:.4f} | Val_loss {val_loss:.4f} | Val_ppl {val_ppl:.4f}')
epoch_progress.update(1)<|docstring|>Function to train the model
Args:
model (nn.Module): model
train_loader (B): [description]
val_loader ([type]): [description]
criterion ([type]): [description]
optimizer ([type]): [description]
epochs (int, optional): [description]. Defaults to 10.<|endoftext|> |
6d0fbe99c8c7f98221dc0a7c25ea8bb7897d41a867da7fa6589b5b6148ee621a | def poincare_2d_visualization(model, animation, epoch, eval_result, avg_loss, avg_pos_loss, avg_neg_loss, tree, figure_title, num_nodes=50, show_node_labels=()):
'Create a 2-d plot of the nodes and edges of a 2-d poincare embedding.\n\n Parameters\n ----------\n model : :class:`~hyperbolic.dag_emb_model.DAGEmbeddingModel`\n The model to visualize, model size must be 2.\n tree : list\n Set of tuples containing the direct edges present in the original dataset.\n figure_title : str\n Title of the plotted figure.\n num_nodes : int or None\n Number of nodes for which edges are to be plotted.\n If `None`, all edges are plotted.\n Helpful to limit this in case the data is too large to avoid a messy plot.\n show_node_labels : iterable\n Iterable of nodes for which to show labels by default.\n\n Returns\n -------\n :class:`plotly.graph_objs.Figure`\n Plotly figure that contains plot.\n\n '
vectors = model.kv.syn0
if (vectors.shape[1] != 2):
raise ValueError('Can only plot 2-D vectors')
node_labels = model.kv.index2word
nodes_x = list(vectors[(:, 0)])
nodes_y = list(vectors[(:, 1)])
nodes = dict(x=nodes_x, y=nodes_y, mode='markers', marker=dict(color='rgb(30, 100, 200)'), text=node_labels, textposition='bottom')
(nodes_x, nodes_y, node_labels) = ([], [], [])
for node in show_node_labels:
if (node in model.kv):
vector = model.kv[node]
nodes_x.append(vector[0])
nodes_y.append(vector[1])
node_labels.append(node)
nodes_with_labels = dict(x=nodes_x, y=nodes_y, mode='markers+text', marker=dict(color='rgb(200, 100, 200)'), text=node_labels, textfont=dict(family='sans serif', size=18, color='#ff7f0e'), textposition='bottom')
node_out_degrees = Counter((hypernym_pair[1] for hypernym_pair in tree))
if (num_nodes is None):
chosen_nodes = list(node_out_degrees.keys())
else:
chosen_nodes = list(sorted(node_out_degrees.keys(), key=(lambda k: (- node_out_degrees[k]))))[:num_nodes]
edges_x = []
edges_y = []
for (u, v) in tree:
if (not ((u in chosen_nodes) or (v in chosen_nodes))):
continue
vector_u = model.kv[u]
vector_v = model.kv[v]
edges_x += [vector_u[0], vector_v[0], None]
edges_y += [vector_u[1], vector_v[1], None]
edges = dict(x=edges_x, y=edges_y, mode='line', hoverinfo=False, line=dict(color='rgb(50,50,50)', width=1))
layout = go.Layout(title=figure_title, showlegend=False, hovermode='closest', width=1500, height=1500, xaxis={'range': [(- 1), 1.3], 'autorange': False}, yaxis={'range': [(- 1), 1.3], 'autorange': False}, updatemenus=[{'type': 'buttons', 'buttons': [{'label': 'Play', 'method': 'animate', 'args': [None]}, {'args': [[None], {'frame': {'duration': 0, 'redraw': False}, 'mode': 'immediate', 'transition': {'duration': 0}}], 'label': 'Pause', 'method': 'animate'}]}])
epoch_sticker = dict(x=[0.5], y=[1.2], mode='text', text=[('Epoch : ' + str(epoch))], textfont=dict(family='sans serif', size=20, color='rgb(200,0,0)'))
result_str = (str(eval_result) + '<br>')
result_str += ('loss = %.2f; pos loss = %.2f; neg loss = %.2f' % (avg_loss, avg_pos_loss, avg_neg_loss))
eval_result_sticker = dict(x=[0.5], y=[1.1], mode='text', text=[result_str], textfont=dict(family='sans serif', size=20, color='rgb(0,0,200)'))
frame = {'data': [], 'name': str(epoch)}
frame['data'].append(edges)
frame['data'].append(nodes_with_labels)
frame['data'].append(eval_result_sticker)
frame['data'].append(epoch_sticker)
animation['frames'].append(frame)
if (epoch == 0):
animation['data'].append(edges)
animation['data'].append(nodes_with_labels)
animation['data'].append(eval_result_sticker)
animation['data'].append(epoch_sticker)
return go.Figure(data=[edges, nodes, nodes_with_labels, eval_result_sticker, epoch_sticker], layout=layout) | Create a 2-d plot of the nodes and edges of a 2-d poincare embedding.
Parameters
----------
model : :class:`~hyperbolic.dag_emb_model.DAGEmbeddingModel`
The model to visualize, model size must be 2.
tree : list
Set of tuples containing the direct edges present in the original dataset.
figure_title : str
Title of the plotted figure.
num_nodes : int or None
Number of nodes for which edges are to be plotted.
If `None`, all edges are plotted.
Helpful to limit this in case the data is too large to avoid a messy plot.
show_node_labels : iterable
Iterable of nodes for which to show labels by default.
Returns
-------
:class:`plotly.graph_objs.Figure`
Plotly figure that contains plot. | poincare_viz.py | poincare_2d_visualization | dalab/hyperbolic_cones | 103 | python | def poincare_2d_visualization(model, animation, epoch, eval_result, avg_loss, avg_pos_loss, avg_neg_loss, tree, figure_title, num_nodes=50, show_node_labels=()):
'Create a 2-d plot of the nodes and edges of a 2-d poincare embedding.\n\n Parameters\n ----------\n model : :class:`~hyperbolic.dag_emb_model.DAGEmbeddingModel`\n The model to visualize, model size must be 2.\n tree : list\n Set of tuples containing the direct edges present in the original dataset.\n figure_title : str\n Title of the plotted figure.\n num_nodes : int or None\n Number of nodes for which edges are to be plotted.\n If `None`, all edges are plotted.\n Helpful to limit this in case the data is too large to avoid a messy plot.\n show_node_labels : iterable\n Iterable of nodes for which to show labels by default.\n\n Returns\n -------\n :class:`plotly.graph_objs.Figure`\n Plotly figure that contains plot.\n\n '
vectors = model.kv.syn0
if (vectors.shape[1] != 2):
raise ValueError('Can only plot 2-D vectors')
node_labels = model.kv.index2word
nodes_x = list(vectors[(:, 0)])
nodes_y = list(vectors[(:, 1)])
nodes = dict(x=nodes_x, y=nodes_y, mode='markers', marker=dict(color='rgb(30, 100, 200)'), text=node_labels, textposition='bottom')
(nodes_x, nodes_y, node_labels) = ([], [], [])
for node in show_node_labels:
if (node in model.kv):
vector = model.kv[node]
nodes_x.append(vector[0])
nodes_y.append(vector[1])
node_labels.append(node)
nodes_with_labels = dict(x=nodes_x, y=nodes_y, mode='markers+text', marker=dict(color='rgb(200, 100, 200)'), text=node_labels, textfont=dict(family='sans serif', size=18, color='#ff7f0e'), textposition='bottom')
node_out_degrees = Counter((hypernym_pair[1] for hypernym_pair in tree))
if (num_nodes is None):
chosen_nodes = list(node_out_degrees.keys())
else:
chosen_nodes = list(sorted(node_out_degrees.keys(), key=(lambda k: (- node_out_degrees[k]))))[:num_nodes]
edges_x = []
edges_y = []
for (u, v) in tree:
if (not ((u in chosen_nodes) or (v in chosen_nodes))):
continue
vector_u = model.kv[u]
vector_v = model.kv[v]
edges_x += [vector_u[0], vector_v[0], None]
edges_y += [vector_u[1], vector_v[1], None]
edges = dict(x=edges_x, y=edges_y, mode='line', hoverinfo=False, line=dict(color='rgb(50,50,50)', width=1))
layout = go.Layout(title=figure_title, showlegend=False, hovermode='closest', width=1500, height=1500, xaxis={'range': [(- 1), 1.3], 'autorange': False}, yaxis={'range': [(- 1), 1.3], 'autorange': False}, updatemenus=[{'type': 'buttons', 'buttons': [{'label': 'Play', 'method': 'animate', 'args': [None]}, {'args': [[None], {'frame': {'duration': 0, 'redraw': False}, 'mode': 'immediate', 'transition': {'duration': 0}}], 'label': 'Pause', 'method': 'animate'}]}])
epoch_sticker = dict(x=[0.5], y=[1.2], mode='text', text=[('Epoch : ' + str(epoch))], textfont=dict(family='sans serif', size=20, color='rgb(200,0,0)'))
result_str = (str(eval_result) + '<br>')
result_str += ('loss = %.2f; pos loss = %.2f; neg loss = %.2f' % (avg_loss, avg_pos_loss, avg_neg_loss))
eval_result_sticker = dict(x=[0.5], y=[1.1], mode='text', text=[result_str], textfont=dict(family='sans serif', size=20, color='rgb(0,0,200)'))
frame = {'data': [], 'name': str(epoch)}
frame['data'].append(edges)
frame['data'].append(nodes_with_labels)
frame['data'].append(eval_result_sticker)
frame['data'].append(epoch_sticker)
animation['frames'].append(frame)
if (epoch == 0):
animation['data'].append(edges)
animation['data'].append(nodes_with_labels)
animation['data'].append(eval_result_sticker)
animation['data'].append(epoch_sticker)
return go.Figure(data=[edges, nodes, nodes_with_labels, eval_result_sticker, epoch_sticker], layout=layout) | def poincare_2d_visualization(model, animation, epoch, eval_result, avg_loss, avg_pos_loss, avg_neg_loss, tree, figure_title, num_nodes=50, show_node_labels=()):
'Create a 2-d plot of the nodes and edges of a 2-d poincare embedding.\n\n Parameters\n ----------\n model : :class:`~hyperbolic.dag_emb_model.DAGEmbeddingModel`\n The model to visualize, model size must be 2.\n tree : list\n Set of tuples containing the direct edges present in the original dataset.\n figure_title : str\n Title of the plotted figure.\n num_nodes : int or None\n Number of nodes for which edges are to be plotted.\n If `None`, all edges are plotted.\n Helpful to limit this in case the data is too large to avoid a messy plot.\n show_node_labels : iterable\n Iterable of nodes for which to show labels by default.\n\n Returns\n -------\n :class:`plotly.graph_objs.Figure`\n Plotly figure that contains plot.\n\n '
vectors = model.kv.syn0
if (vectors.shape[1] != 2):
raise ValueError('Can only plot 2-D vectors')
node_labels = model.kv.index2word
nodes_x = list(vectors[(:, 0)])
nodes_y = list(vectors[(:, 1)])
nodes = dict(x=nodes_x, y=nodes_y, mode='markers', marker=dict(color='rgb(30, 100, 200)'), text=node_labels, textposition='bottom')
(nodes_x, nodes_y, node_labels) = ([], [], [])
for node in show_node_labels:
if (node in model.kv):
vector = model.kv[node]
nodes_x.append(vector[0])
nodes_y.append(vector[1])
node_labels.append(node)
nodes_with_labels = dict(x=nodes_x, y=nodes_y, mode='markers+text', marker=dict(color='rgb(200, 100, 200)'), text=node_labels, textfont=dict(family='sans serif', size=18, color='#ff7f0e'), textposition='bottom')
node_out_degrees = Counter((hypernym_pair[1] for hypernym_pair in tree))
if (num_nodes is None):
chosen_nodes = list(node_out_degrees.keys())
else:
chosen_nodes = list(sorted(node_out_degrees.keys(), key=(lambda k: (- node_out_degrees[k]))))[:num_nodes]
edges_x = []
edges_y = []
for (u, v) in tree:
if (not ((u in chosen_nodes) or (v in chosen_nodes))):
continue
vector_u = model.kv[u]
vector_v = model.kv[v]
edges_x += [vector_u[0], vector_v[0], None]
edges_y += [vector_u[1], vector_v[1], None]
edges = dict(x=edges_x, y=edges_y, mode='line', hoverinfo=False, line=dict(color='rgb(50,50,50)', width=1))
layout = go.Layout(title=figure_title, showlegend=False, hovermode='closest', width=1500, height=1500, xaxis={'range': [(- 1), 1.3], 'autorange': False}, yaxis={'range': [(- 1), 1.3], 'autorange': False}, updatemenus=[{'type': 'buttons', 'buttons': [{'label': 'Play', 'method': 'animate', 'args': [None]}, {'args': [[None], {'frame': {'duration': 0, 'redraw': False}, 'mode': 'immediate', 'transition': {'duration': 0}}], 'label': 'Pause', 'method': 'animate'}]}])
epoch_sticker = dict(x=[0.5], y=[1.2], mode='text', text=[('Epoch : ' + str(epoch))], textfont=dict(family='sans serif', size=20, color='rgb(200,0,0)'))
result_str = (str(eval_result) + '<br>')
result_str += ('loss = %.2f; pos loss = %.2f; neg loss = %.2f' % (avg_loss, avg_pos_loss, avg_neg_loss))
eval_result_sticker = dict(x=[0.5], y=[1.1], mode='text', text=[result_str], textfont=dict(family='sans serif', size=20, color='rgb(0,0,200)'))
frame = {'data': [], 'name': str(epoch)}
frame['data'].append(edges)
frame['data'].append(nodes_with_labels)
frame['data'].append(eval_result_sticker)
frame['data'].append(epoch_sticker)
animation['frames'].append(frame)
if (epoch == 0):
animation['data'].append(edges)
animation['data'].append(nodes_with_labels)
animation['data'].append(eval_result_sticker)
animation['data'].append(epoch_sticker)
return go.Figure(data=[edges, nodes, nodes_with_labels, eval_result_sticker, epoch_sticker], layout=layout)<|docstring|>Create a 2-d plot of the nodes and edges of a 2-d poincare embedding.
Parameters
----------
model : :class:`~hyperbolic.dag_emb_model.DAGEmbeddingModel`
The model to visualize, model size must be 2.
tree : list
Set of tuples containing the direct edges present in the original dataset.
figure_title : str
Title of the plotted figure.
num_nodes : int or None
Number of nodes for which edges are to be plotted.
If `None`, all edges are plotted.
Helpful to limit this in case the data is too large to avoid a messy plot.
show_node_labels : iterable
Iterable of nodes for which to show labels by default.
Returns
-------
:class:`plotly.graph_objs.Figure`
Plotly figure that contains plot.<|endoftext|> |
0a6d05ec08d1f784d0b8f2bf746cde6ca20b9bf344c02fa6b3be0aac9714f54a | def poincare_distance_heatmap(origin_point, x_range=((- 1.0), 1.0), y_range=((- 1.0), 1.0), num_points=100):
'Create a heatmap of Poincare distances from `origin_point` for each point (x, y),\n where x and y lie in `x_range` and `y_range` respectively, with `num_points` points chosen uniformly in both ranges.\n\n Parameters\n ----------\n origin_point : tuple (int, int)\n (x, y) from which distances are to be measured and plotted.\n x_range : tuple (int, int)\n Range for x-axis from which to choose `num_points` points.\n y_range : tuple (int, int)\n Range for y-axis from which to choose `num_points` points.\n num_points : int\n Number of points to choose from `x_range` and `y_range`.\n\n Notes\n -----\n Points outside the unit circle are ignored, since the Poincare distance is defined\n only for points inside the circle boundaries (exclusive of the boundary).\n\n Returns\n -------\n :class:`plotly.graph_objs.Figure`\n Plotly figure that contains plot\n\n '
epsilon = 1e-08
(x_range, y_range) = (list(x_range), list(y_range))
if ((x_range[0] == (- 1.0)) and (y_range[0] == (- 1.0))):
x_range[0] += epsilon
y_range[0] += epsilon
if ((x_range[0] == 1.0) and (y_range[0] == 1.0)):
x_range[0] -= epsilon
y_range[0] -= epsilon
x_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
y_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
(x, y) = np.meshgrid(x_axis_values, y_axis_values)
all_points = np.dstack((x, y)).swapaxes(1, 2).swapaxes(0, 1).reshape(2, (num_points ** 2)).T
norms = np.linalg.norm(all_points, axis=1)
all_points = all_points[(norms < 1)]
origin_point = np.array(origin_point)
all_distances = PoincareKeyedVectors.poincare_dists(origin_point, all_points)
distances = go.Scatter(x=all_points[(:, 0)], y=all_points[(:, 1)], mode='markers', marker=dict(size='9', color=all_distances, colorscale='Viridis', showscale=True, colorbar=go.ColorBar(title='Poincare Distance')), text=[('Distance from (%.2f, %.2f): %.2f' % (origin_point[0], origin_point[1], d)) for d in all_distances], name='')
origin = go.Scatter(x=[origin_point[0]], y=[origin_point[1]], name=('Distance from (%.2f, %.2f)' % (origin_point[0], origin_point[1])), mode='markers+text', marker=dict(size='10', color='rgb(200, 50, 50)'))
layout = go.Layout(width=900, height=800, showlegend=False, title=('Poincare Distances from (%.2f, %.2f)' % (origin_point[0], origin_point[1])), hovermode='closest')
return go.Figure(data=[distances, origin], layout=layout) | Create a heatmap of Poincare distances from `origin_point` for each point (x, y),
where x and y lie in `x_range` and `y_range` respectively, with `num_points` points chosen uniformly in both ranges.
Parameters
----------
origin_point : tuple (int, int)
(x, y) from which distances are to be measured and plotted.
x_range : tuple (int, int)
Range for x-axis from which to choose `num_points` points.
y_range : tuple (int, int)
Range for y-axis from which to choose `num_points` points.
num_points : int
Number of points to choose from `x_range` and `y_range`.
Notes
-----
Points outside the unit circle are ignored, since the Poincare distance is defined
only for points inside the circle boundaries (exclusive of the boundary).
Returns
-------
:class:`plotly.graph_objs.Figure`
Plotly figure that contains plot | poincare_viz.py | poincare_distance_heatmap | dalab/hyperbolic_cones | 103 | python | def poincare_distance_heatmap(origin_point, x_range=((- 1.0), 1.0), y_range=((- 1.0), 1.0), num_points=100):
'Create a heatmap of Poincare distances from `origin_point` for each point (x, y),\n where x and y lie in `x_range` and `y_range` respectively, with `num_points` points chosen uniformly in both ranges.\n\n Parameters\n ----------\n origin_point : tuple (int, int)\n (x, y) from which distances are to be measured and plotted.\n x_range : tuple (int, int)\n Range for x-axis from which to choose `num_points` points.\n y_range : tuple (int, int)\n Range for y-axis from which to choose `num_points` points.\n num_points : int\n Number of points to choose from `x_range` and `y_range`.\n\n Notes\n -----\n Points outside the unit circle are ignored, since the Poincare distance is defined\n only for points inside the circle boundaries (exclusive of the boundary).\n\n Returns\n -------\n :class:`plotly.graph_objs.Figure`\n Plotly figure that contains plot\n\n '
epsilon = 1e-08
(x_range, y_range) = (list(x_range), list(y_range))
if ((x_range[0] == (- 1.0)) and (y_range[0] == (- 1.0))):
x_range[0] += epsilon
y_range[0] += epsilon
if ((x_range[0] == 1.0) and (y_range[0] == 1.0)):
x_range[0] -= epsilon
y_range[0] -= epsilon
x_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
y_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
(x, y) = np.meshgrid(x_axis_values, y_axis_values)
all_points = np.dstack((x, y)).swapaxes(1, 2).swapaxes(0, 1).reshape(2, (num_points ** 2)).T
norms = np.linalg.norm(all_points, axis=1)
all_points = all_points[(norms < 1)]
origin_point = np.array(origin_point)
all_distances = PoincareKeyedVectors.poincare_dists(origin_point, all_points)
distances = go.Scatter(x=all_points[(:, 0)], y=all_points[(:, 1)], mode='markers', marker=dict(size='9', color=all_distances, colorscale='Viridis', showscale=True, colorbar=go.ColorBar(title='Poincare Distance')), text=[('Distance from (%.2f, %.2f): %.2f' % (origin_point[0], origin_point[1], d)) for d in all_distances], name=)
origin = go.Scatter(x=[origin_point[0]], y=[origin_point[1]], name=('Distance from (%.2f, %.2f)' % (origin_point[0], origin_point[1])), mode='markers+text', marker=dict(size='10', color='rgb(200, 50, 50)'))
layout = go.Layout(width=900, height=800, showlegend=False, title=('Poincare Distances from (%.2f, %.2f)' % (origin_point[0], origin_point[1])), hovermode='closest')
return go.Figure(data=[distances, origin], layout=layout) | def poincare_distance_heatmap(origin_point, x_range=((- 1.0), 1.0), y_range=((- 1.0), 1.0), num_points=100):
'Create a heatmap of Poincare distances from `origin_point` for each point (x, y),\n where x and y lie in `x_range` and `y_range` respectively, with `num_points` points chosen uniformly in both ranges.\n\n Parameters\n ----------\n origin_point : tuple (int, int)\n (x, y) from which distances are to be measured and plotted.\n x_range : tuple (int, int)\n Range for x-axis from which to choose `num_points` points.\n y_range : tuple (int, int)\n Range for y-axis from which to choose `num_points` points.\n num_points : int\n Number of points to choose from `x_range` and `y_range`.\n\n Notes\n -----\n Points outside the unit circle are ignored, since the Poincare distance is defined\n only for points inside the circle boundaries (exclusive of the boundary).\n\n Returns\n -------\n :class:`plotly.graph_objs.Figure`\n Plotly figure that contains plot\n\n '
epsilon = 1e-08
(x_range, y_range) = (list(x_range), list(y_range))
if ((x_range[0] == (- 1.0)) and (y_range[0] == (- 1.0))):
x_range[0] += epsilon
y_range[0] += epsilon
if ((x_range[0] == 1.0) and (y_range[0] == 1.0)):
x_range[0] -= epsilon
y_range[0] -= epsilon
x_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
y_axis_values = np.linspace(x_range[0], x_range[1], num=num_points)
(x, y) = np.meshgrid(x_axis_values, y_axis_values)
all_points = np.dstack((x, y)).swapaxes(1, 2).swapaxes(0, 1).reshape(2, (num_points ** 2)).T
norms = np.linalg.norm(all_points, axis=1)
all_points = all_points[(norms < 1)]
origin_point = np.array(origin_point)
all_distances = PoincareKeyedVectors.poincare_dists(origin_point, all_points)
distances = go.Scatter(x=all_points[(:, 0)], y=all_points[(:, 1)], mode='markers', marker=dict(size='9', color=all_distances, colorscale='Viridis', showscale=True, colorbar=go.ColorBar(title='Poincare Distance')), text=[('Distance from (%.2f, %.2f): %.2f' % (origin_point[0], origin_point[1], d)) for d in all_distances], name=)
origin = go.Scatter(x=[origin_point[0]], y=[origin_point[1]], name=('Distance from (%.2f, %.2f)' % (origin_point[0], origin_point[1])), mode='markers+text', marker=dict(size='10', color='rgb(200, 50, 50)'))
layout = go.Layout(width=900, height=800, showlegend=False, title=('Poincare Distances from (%.2f, %.2f)' % (origin_point[0], origin_point[1])), hovermode='closest')
return go.Figure(data=[distances, origin], layout=layout)<|docstring|>Create a heatmap of Poincare distances from `origin_point` for each point (x, y),
where x and y lie in `x_range` and `y_range` respectively, with `num_points` points chosen uniformly in both ranges.
Parameters
----------
origin_point : tuple (int, int)
(x, y) from which distances are to be measured and plotted.
x_range : tuple (int, int)
Range for x-axis from which to choose `num_points` points.
y_range : tuple (int, int)
Range for y-axis from which to choose `num_points` points.
num_points : int
Number of points to choose from `x_range` and `y_range`.
Notes
-----
Points outside the unit circle are ignored, since the Poincare distance is defined
only for points inside the circle boundaries (exclusive of the boundary).
Returns
-------
:class:`plotly.graph_objs.Figure`
Plotly figure that contains plot<|endoftext|> |
547f13775870625b43fa74deac07f9568c673854e409bf32a5eed5dd995239a8 | def __init__(self, *args, **kwds):
'\n Constructor. Any message fields that are implicitly/explicitly\n set to None will be assigned a default value. The recommend\n use is keyword arguments as this is more robust to future message\n changes. You cannot mix in-order arguments and keyword arguments.\n\n The available fields are:\n header,pose\n\n :param args: complete set of field values, in .msg order\n :param kwds: use keyword arguments corresponding to message field names\n to set specific fields.\n '
if (args or kwds):
super(GeoPoseStamped, self).__init__(*args, **kwds)
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.pose is None):
self.pose = geographic_msgs.msg.GeoPose()
else:
self.header = std_msgs.msg.Header()
self.pose = geographic_msgs.msg.GeoPose() | Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,pose
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields. | TrekBot2_WS/devel/.private/geographic_msgs/lib/python2.7/dist-packages/geographic_msgs/msg/_GeoPoseStamped.py | __init__ | Rafcin/RescueRoboticsLHMV | 1 | python | def __init__(self, *args, **kwds):
'\n Constructor. Any message fields that are implicitly/explicitly\n set to None will be assigned a default value. The recommend\n use is keyword arguments as this is more robust to future message\n changes. You cannot mix in-order arguments and keyword arguments.\n\n The available fields are:\n header,pose\n\n :param args: complete set of field values, in .msg order\n :param kwds: use keyword arguments corresponding to message field names\n to set specific fields.\n '
if (args or kwds):
super(GeoPoseStamped, self).__init__(*args, **kwds)
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.pose is None):
self.pose = geographic_msgs.msg.GeoPose()
else:
self.header = std_msgs.msg.Header()
self.pose = geographic_msgs.msg.GeoPose() | def __init__(self, *args, **kwds):
'\n Constructor. Any message fields that are implicitly/explicitly\n set to None will be assigned a default value. The recommend\n use is keyword arguments as this is more robust to future message\n changes. You cannot mix in-order arguments and keyword arguments.\n\n The available fields are:\n header,pose\n\n :param args: complete set of field values, in .msg order\n :param kwds: use keyword arguments corresponding to message field names\n to set specific fields.\n '
if (args or kwds):
super(GeoPoseStamped, self).__init__(*args, **kwds)
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.pose is None):
self.pose = geographic_msgs.msg.GeoPose()
else:
self.header = std_msgs.msg.Header()
self.pose = geographic_msgs.msg.GeoPose()<|docstring|>Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,pose
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.<|endoftext|> |
1fb6b2b708db1f101aab56633ecd49b6f4087e60f5bbe6926e83ee92f9106530 | def _get_types(self):
'\n internal API method\n '
return self._slot_types | internal API method | TrekBot2_WS/devel/.private/geographic_msgs/lib/python2.7/dist-packages/geographic_msgs/msg/_GeoPoseStamped.py | _get_types | Rafcin/RescueRoboticsLHMV | 1 | python | def _get_types(self):
'\n \n '
return self._slot_types | def _get_types(self):
'\n \n '
return self._slot_types<|docstring|>internal API method<|endoftext|> |
3411963652b50dc80a3ad33fc6ae6284e1ec01a7407c670cfca5ec67a7cf4f07 | def serialize(self, buff):
'\n serialize message into buffer\n :param buff: buffer, ``StringIO``\n '
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))) | serialize message into buffer
:param buff: buffer, ``StringIO`` | TrekBot2_WS/devel/.private/geographic_msgs/lib/python2.7/dist-packages/geographic_msgs/msg/_GeoPoseStamped.py | serialize | Rafcin/RescueRoboticsLHMV | 1 | python | def serialize(self, buff):
'\n serialize message into buffer\n :param buff: buffer, ``StringIO``\n '
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))) | def serialize(self, buff):
'\n serialize message into buffer\n :param buff: buffer, ``StringIO``\n '
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))))<|docstring|>serialize message into buffer
:param buff: buffer, ``StringIO``<|endoftext|> |
62db6f4627009d5683ea157115e112419acb1e4094ed957bc46187e2c3d2f790 | def deserialize(self, str):
'\n unpack serialized message in str into this message instance\n :param str: byte array of serialized message, ``str``\n '
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.pose is None):
self.pose = geographic_msgs.msg.GeoPose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) | unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str`` | TrekBot2_WS/devel/.private/geographic_msgs/lib/python2.7/dist-packages/geographic_msgs/msg/_GeoPoseStamped.py | deserialize | Rafcin/RescueRoboticsLHMV | 1 | python | def deserialize(self, str):
'\n unpack serialized message in str into this message instance\n :param str: byte array of serialized message, ``str``\n '
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.pose is None):
self.pose = geographic_msgs.msg.GeoPose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) | def deserialize(self, str):
'\n unpack serialized message in str into this message instance\n :param str: byte array of serialized message, ``str``\n '
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.pose is None):
self.pose = geographic_msgs.msg.GeoPose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e)<|docstring|>unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``<|endoftext|> |
51423eb71bd72896fc7f08265580e021e911cbf4a479425187fd4b7f98f3fc4e | def serialize_numpy(self, buff, numpy):
'\n serialize message with numpy array types into buffer\n :param buff: buffer, ``StringIO``\n :param numpy: numpy python module\n '
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))) | serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module | TrekBot2_WS/devel/.private/geographic_msgs/lib/python2.7/dist-packages/geographic_msgs/msg/_GeoPoseStamped.py | serialize_numpy | Rafcin/RescueRoboticsLHMV | 1 | python | def serialize_numpy(self, buff, numpy):
'\n serialize message with numpy array types into buffer\n :param buff: buffer, ``StringIO``\n :param numpy: numpy python module\n '
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))) | def serialize_numpy(self, buff, numpy):
'\n serialize message with numpy array types into buffer\n :param buff: buffer, ``StringIO``\n :param numpy: numpy python module\n '
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))))<|docstring|>serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module<|endoftext|> |
38896605d88d39d0f35035f8150e35b77381fef724b12a47444543de878e9402 | def deserialize_numpy(self, str, numpy):
'\n unpack serialized message in str into this message instance using numpy for array types\n :param str: byte array of serialized message, ``str``\n :param numpy: numpy python module\n '
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.pose is None):
self.pose = geographic_msgs.msg.GeoPose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) | unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module | TrekBot2_WS/devel/.private/geographic_msgs/lib/python2.7/dist-packages/geographic_msgs/msg/_GeoPoseStamped.py | deserialize_numpy | Rafcin/RescueRoboticsLHMV | 1 | python | def deserialize_numpy(self, str, numpy):
'\n unpack serialized message in str into this message instance using numpy for array types\n :param str: byte array of serialized message, ``str``\n :param numpy: numpy python module\n '
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.pose is None):
self.pose = geographic_msgs.msg.GeoPose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) | def deserialize_numpy(self, str, numpy):
'\n unpack serialized message in str into this message instance using numpy for array types\n :param str: byte array of serialized message, ``str``\n :param numpy: numpy python module\n '
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.pose is None):
self.pose = geographic_msgs.msg.GeoPose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e)<|docstring|>unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module<|endoftext|> |
9d1b6e8267c581e066375e6463351edc4934d18f8d1ee3919229212ecde43dd9 | def utctoweekseconds(utc=datetime.datetime.utcnow(), leapseconds=37):
' Returns the GPS week, the GPS day, and the seconds\n and microseconds since the beginning of the GPS week '
datetimeformat = '%Y-%m-%d %H:%M:%S'
epoch = datetime.datetime.strptime('1980-01-06 00:00:00', datetimeformat)
tdiff = ((utc - epoch) + datetime.timedelta(seconds=leapseconds))
weeks = (tdiff.days // 7)
gpsWeeks = (weeks % 1024)
gpsSec = (tdiff.total_seconds() - ((((60 * 60) * 24) * 7) * weeks))
return (gpsWeeks, gpsSec) | Returns the GPS week, the GPS day, and the seconds
and microseconds since the beginning of the GPS week | Simulation/python/FMU.py | utctoweekseconds | elke0011/OpenFlightSim | 15 | python | def utctoweekseconds(utc=datetime.datetime.utcnow(), leapseconds=37):
' Returns the GPS week, the GPS day, and the seconds\n and microseconds since the beginning of the GPS week '
datetimeformat = '%Y-%m-%d %H:%M:%S'
epoch = datetime.datetime.strptime('1980-01-06 00:00:00', datetimeformat)
tdiff = ((utc - epoch) + datetime.timedelta(seconds=leapseconds))
weeks = (tdiff.days // 7)
gpsWeeks = (weeks % 1024)
gpsSec = (tdiff.total_seconds() - ((((60 * 60) * 24) * 7) * weeks))
return (gpsWeeks, gpsSec) | def utctoweekseconds(utc=datetime.datetime.utcnow(), leapseconds=37):
' Returns the GPS week, the GPS day, and the seconds\n and microseconds since the beginning of the GPS week '
datetimeformat = '%Y-%m-%d %H:%M:%S'
epoch = datetime.datetime.strptime('1980-01-06 00:00:00', datetimeformat)
tdiff = ((utc - epoch) + datetime.timedelta(seconds=leapseconds))
weeks = (tdiff.days // 7)
gpsWeeks = (weeks % 1024)
gpsSec = (tdiff.total_seconds() - ((((60 * 60) * 24) * 7) * weeks))
return (gpsWeeks, gpsSec)<|docstring|>Returns the GPS week, the GPS day, and the seconds
and microseconds since the beginning of the GPS week<|endoftext|> |
893688f6a64378c1fe02a0a977328ca65d744f61d9a62c0641e8e8e1e015ff7b | def __init__(self, parent):
'\n Initialize a Resource Layer.\n\n :type parent: CoAP\n :param parent: the CoAP server\n '
self._parent = parent | Initialize a Resource Layer.
:type parent: CoAP
:param parent: the CoAP server | src/Bubot_CoAP/layers/resource_layer.py | __init__ | businka/Bubot_CoAP | 0 | python | def __init__(self, parent):
'\n Initialize a Resource Layer.\n\n :type parent: CoAP\n :param parent: the CoAP server\n '
self._parent = parent | def __init__(self, parent):
'\n Initialize a Resource Layer.\n\n :type parent: CoAP\n :param parent: the CoAP server\n '
self._parent = parent<|docstring|>Initialize a Resource Layer.
:type parent: CoAP
:param parent: the CoAP server<|endoftext|> |
a4e54fdf24aaf5cbf742162a6dc2f6d3c06dd2fcb7cc03b0482e997383ea637a | async def edit_resource(self, transaction, path):
'\n Render a POST on an already created resource.\n\n :param path: the path of the resource\n :param transaction: the transaction\n :return: the transaction\n '
resource_node = self._parent.root[path]
transaction.resource = resource_node
if transaction.request.if_match:
if ((None not in transaction.request.if_match) and (str(transaction.resource.etag) not in transaction.request.if_match)):
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
method = getattr(resource_node, 'render_POST', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(resource_node, 'render_POST_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if (resource.path is None):
resource.path = path
resource.observe_count = resource_node.observe_count
if (resource is resource_node):
transaction.response.code = defines.Codes.CHANGED.number
else:
transaction.response.code = defines.Codes.CREATED.number
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (transaction.response.code == defines.Codes.CREATED.number):
transaction.response.location_path = resource.path
if ((resource.location_query is not None) and (len(resource.location_query) > 0)):
transaction.response.location_query = resource.location_query
transaction.response.payload = None
self._parent.root[resource.path] = resource
return transaction | Render a POST on an already created resource.
:param path: the path of the resource
:param transaction: the transaction
:return: the transaction | src/Bubot_CoAP/layers/resource_layer.py | edit_resource | businka/Bubot_CoAP | 0 | python | async def edit_resource(self, transaction, path):
'\n Render a POST on an already created resource.\n\n :param path: the path of the resource\n :param transaction: the transaction\n :return: the transaction\n '
resource_node = self._parent.root[path]
transaction.resource = resource_node
if transaction.request.if_match:
if ((None not in transaction.request.if_match) and (str(transaction.resource.etag) not in transaction.request.if_match)):
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
method = getattr(resource_node, 'render_POST', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(resource_node, 'render_POST_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if (resource.path is None):
resource.path = path
resource.observe_count = resource_node.observe_count
if (resource is resource_node):
transaction.response.code = defines.Codes.CHANGED.number
else:
transaction.response.code = defines.Codes.CREATED.number
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (transaction.response.code == defines.Codes.CREATED.number):
transaction.response.location_path = resource.path
if ((resource.location_query is not None) and (len(resource.location_query) > 0)):
transaction.response.location_query = resource.location_query
transaction.response.payload = None
self._parent.root[resource.path] = resource
return transaction | async def edit_resource(self, transaction, path):
'\n Render a POST on an already created resource.\n\n :param path: the path of the resource\n :param transaction: the transaction\n :return: the transaction\n '
resource_node = self._parent.root[path]
transaction.resource = resource_node
if transaction.request.if_match:
if ((None not in transaction.request.if_match) and (str(transaction.resource.etag) not in transaction.request.if_match)):
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
method = getattr(resource_node, 'render_POST', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(resource_node, 'render_POST_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if (resource.path is None):
resource.path = path
resource.observe_count = resource_node.observe_count
if (resource is resource_node):
transaction.response.code = defines.Codes.CHANGED.number
else:
transaction.response.code = defines.Codes.CREATED.number
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (transaction.response.code == defines.Codes.CREATED.number):
transaction.response.location_path = resource.path
if ((resource.location_query is not None) and (len(resource.location_query) > 0)):
transaction.response.location_query = resource.location_query
transaction.response.payload = None
self._parent.root[resource.path] = resource
return transaction<|docstring|>Render a POST on an already created resource.
:param path: the path of the resource
:param transaction: the transaction
:return: the transaction<|endoftext|> |
8ba2cb1819e4e08ae69403cb6b8617c201ab47c0415d720f1c9effe5fbd61b35 | async def add_resource(self, transaction, parent_resource, lp):
'\n Render a POST on a new resource.\n\n :param transaction: the transaction\n :param parent_resource: the parent of the resource\n :param lp: the location_path attribute of the resource\n :return: the response\n '
method = getattr(parent_resource, 'render_POST', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = (await getattr(parent_resource, 'render_POST_advanced', None))
ret = method(request=transaction.request, response=transaction.response)
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
resource.path = lp
if (resource.etag is not None):
transaction.response.etag = resource.etag
transaction.response.location_path = resource.path
if ((resource.location_query is not None) and (len(resource.location_query) > 0)):
transaction.response.location_query = resource.location_query
transaction.response.code = defines.Codes.CREATED.number
transaction.response.payload = None
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (resource.max_age is not None):
transaction.response.max_age = resource.max_age
resource.changed = True
transaction.resource = resource
self._parent.root[resource.path] = resource
return transaction | Render a POST on a new resource.
:param transaction: the transaction
:param parent_resource: the parent of the resource
:param lp: the location_path attribute of the resource
:return: the response | src/Bubot_CoAP/layers/resource_layer.py | add_resource | businka/Bubot_CoAP | 0 | python | async def add_resource(self, transaction, parent_resource, lp):
'\n Render a POST on a new resource.\n\n :param transaction: the transaction\n :param parent_resource: the parent of the resource\n :param lp: the location_path attribute of the resource\n :return: the response\n '
method = getattr(parent_resource, 'render_POST', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = (await getattr(parent_resource, 'render_POST_advanced', None))
ret = method(request=transaction.request, response=transaction.response)
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
resource.path = lp
if (resource.etag is not None):
transaction.response.etag = resource.etag
transaction.response.location_path = resource.path
if ((resource.location_query is not None) and (len(resource.location_query) > 0)):
transaction.response.location_query = resource.location_query
transaction.response.code = defines.Codes.CREATED.number
transaction.response.payload = None
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (resource.max_age is not None):
transaction.response.max_age = resource.max_age
resource.changed = True
transaction.resource = resource
self._parent.root[resource.path] = resource
return transaction | async def add_resource(self, transaction, parent_resource, lp):
'\n Render a POST on a new resource.\n\n :param transaction: the transaction\n :param parent_resource: the parent of the resource\n :param lp: the location_path attribute of the resource\n :return: the response\n '
method = getattr(parent_resource, 'render_POST', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = (await getattr(parent_resource, 'render_POST_advanced', None))
ret = method(request=transaction.request, response=transaction.response)
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CREATED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
resource.path = lp
if (resource.etag is not None):
transaction.response.etag = resource.etag
transaction.response.location_path = resource.path
if ((resource.location_query is not None) and (len(resource.location_query) > 0)):
transaction.response.location_query = resource.location_query
transaction.response.code = defines.Codes.CREATED.number
transaction.response.payload = None
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (resource.max_age is not None):
transaction.response.max_age = resource.max_age
resource.changed = True
transaction.resource = resource
self._parent.root[resource.path] = resource
return transaction<|docstring|>Render a POST on a new resource.
:param transaction: the transaction
:param parent_resource: the parent of the resource
:param lp: the location_path attribute of the resource
:return: the response<|endoftext|> |
465c5779d0e1f93e813e8e270b429a8ebeb93ab9952fed46d926ca713e55ec33 | async def create_resource(self, path, transaction):
'\n Render a POST request.\n\n :param path: the path of the request\n :param transaction: the transaction\n :return: the response\n '
t = self._parent.root.with_prefix(path)
max_len = 0
imax = None
for i in t:
if (i == path):
return (await self.edit_resource(transaction, path))
elif (len(i) > max_len):
imax = i
max_len = len(i)
lp = path
parent_resource = self._parent.root[imax]
if parent_resource.allow_children:
return (await self.add_resource(transaction, parent_resource, lp))
else:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction | Render a POST request.
:param path: the path of the request
:param transaction: the transaction
:return: the response | src/Bubot_CoAP/layers/resource_layer.py | create_resource | businka/Bubot_CoAP | 0 | python | async def create_resource(self, path, transaction):
'\n Render a POST request.\n\n :param path: the path of the request\n :param transaction: the transaction\n :return: the response\n '
t = self._parent.root.with_prefix(path)
max_len = 0
imax = None
for i in t:
if (i == path):
return (await self.edit_resource(transaction, path))
elif (len(i) > max_len):
imax = i
max_len = len(i)
lp = path
parent_resource = self._parent.root[imax]
if parent_resource.allow_children:
return (await self.add_resource(transaction, parent_resource, lp))
else:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction | async def create_resource(self, path, transaction):
'\n Render a POST request.\n\n :param path: the path of the request\n :param transaction: the transaction\n :return: the response\n '
t = self._parent.root.with_prefix(path)
max_len = 0
imax = None
for i in t:
if (i == path):
return (await self.edit_resource(transaction, path))
elif (len(i) > max_len):
imax = i
max_len = len(i)
lp = path
parent_resource = self._parent.root[imax]
if parent_resource.allow_children:
return (await self.add_resource(transaction, parent_resource, lp))
else:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction<|docstring|>Render a POST request.
:param path: the path of the request
:param transaction: the transaction
:return: the response<|endoftext|> |
671ebdc6eeb186be54b54d58cd54cd90ae2c891ed9260db4f1c6cd13bea353bf | async def update_resource(self, transaction):
'\n Render a PUT request.\n\n :param transaction: the transaction\n :return: the response\n '
if transaction.request.if_match:
if ((None not in transaction.request.if_match) and (str(transaction.resource.etag) not in transaction.request.if_match)):
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
if transaction.request.if_none_match:
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
method = getattr(transaction.resource, 'render_PUT', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(transaction.resource, 'render_PUT_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CHANGED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CHANGED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if (resource.etag is not None):
transaction.response.etag = resource.etag
transaction.response.code = defines.Codes.CHANGED.number
transaction.response.payload = None
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (resource.max_age is not None):
transaction.response.max_age = resource.max_age
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
return transaction | Render a PUT request.
:param transaction: the transaction
:return: the response | src/Bubot_CoAP/layers/resource_layer.py | update_resource | businka/Bubot_CoAP | 0 | python | async def update_resource(self, transaction):
'\n Render a PUT request.\n\n :param transaction: the transaction\n :return: the response\n '
if transaction.request.if_match:
if ((None not in transaction.request.if_match) and (str(transaction.resource.etag) not in transaction.request.if_match)):
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
if transaction.request.if_none_match:
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
method = getattr(transaction.resource, 'render_PUT', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(transaction.resource, 'render_PUT_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CHANGED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CHANGED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if (resource.etag is not None):
transaction.response.etag = resource.etag
transaction.response.code = defines.Codes.CHANGED.number
transaction.response.payload = None
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (resource.max_age is not None):
transaction.response.max_age = resource.max_age
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
return transaction | async def update_resource(self, transaction):
'\n Render a PUT request.\n\n :param transaction: the transaction\n :return: the response\n '
if transaction.request.if_match:
if ((None not in transaction.request.if_match) and (str(transaction.resource.etag) not in transaction.request.if_match)):
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
if transaction.request.if_none_match:
transaction.response.code = defines.Codes.PRECONDITION_FAILED.number
return transaction
method = getattr(transaction.resource, 'render_PUT', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(transaction.resource, 'render_PUT_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CHANGED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CHANGED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if (resource.etag is not None):
transaction.response.etag = resource.etag
transaction.response.code = defines.Codes.CHANGED.number
transaction.response.payload = None
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (resource.max_age is not None):
transaction.response.max_age = resource.max_age
resource.changed = True
resource.observe_count += 1
transaction.resource = resource
return transaction<|docstring|>Render a PUT request.
:param transaction: the transaction
:return: the response<|endoftext|> |
93146a3f224279b4de48928eae3642d41d7edb9422ae5882f1b7d5d57613fd84 | async def delete_resource(self, transaction, path):
'\n Render a DELETE request.\n\n :param transaction: the transaction\n :param path: the path\n :return: the response\n '
resource = transaction.resource
method = getattr(resource, 'render_DELETE', None)
try:
ret = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(transaction.resource, 'render_DELETE_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], bool)):
(delete, response) = ret
if delete:
del self._parent.root[path]
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.DELETED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], bool) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(delete, response) = ret
if delete:
del self._parent.root[path]
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.DELETED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(ret, bool):
pass
elif (isinstance(ret, tuple) and (len(ret) == 2)):
(resource, callback) = ret
ret = (await self._handle_separate(transaction, callback))
if (not isinstance(ret, bool)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if ret:
del self._parent.root[path]
transaction.response.code = defines.Codes.DELETED.number
transaction.response.payload = None
transaction.resource.deleted = True
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction | Render a DELETE request.
:param transaction: the transaction
:param path: the path
:return: the response | src/Bubot_CoAP/layers/resource_layer.py | delete_resource | businka/Bubot_CoAP | 0 | python | async def delete_resource(self, transaction, path):
'\n Render a DELETE request.\n\n :param transaction: the transaction\n :param path: the path\n :return: the response\n '
resource = transaction.resource
method = getattr(resource, 'render_DELETE', None)
try:
ret = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(transaction.resource, 'render_DELETE_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], bool)):
(delete, response) = ret
if delete:
del self._parent.root[path]
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.DELETED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], bool) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(delete, response) = ret
if delete:
del self._parent.root[path]
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.DELETED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(ret, bool):
pass
elif (isinstance(ret, tuple) and (len(ret) == 2)):
(resource, callback) = ret
ret = (await self._handle_separate(transaction, callback))
if (not isinstance(ret, bool)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if ret:
del self._parent.root[path]
transaction.response.code = defines.Codes.DELETED.number
transaction.response.payload = None
transaction.resource.deleted = True
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction | async def delete_resource(self, transaction, path):
'\n Render a DELETE request.\n\n :param transaction: the transaction\n :param path: the path\n :return: the response\n '
resource = transaction.resource
method = getattr(resource, 'render_DELETE', None)
try:
ret = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(transaction.resource, 'render_DELETE_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], bool)):
(delete, response) = ret
if delete:
del self._parent.root[path]
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.DELETED.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], bool) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(delete, response) = ret
if delete:
del self._parent.root[path]
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.DELETED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(ret, bool):
pass
elif (isinstance(ret, tuple) and (len(ret) == 2)):
(resource, callback) = ret
ret = (await self._handle_separate(transaction, callback))
if (not isinstance(ret, bool)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
if ret:
del self._parent.root[path]
transaction.response.code = defines.Codes.DELETED.number
transaction.response.payload = None
transaction.resource.deleted = True
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction<|docstring|>Render a DELETE request.
:param transaction: the transaction
:param path: the path
:return: the response<|endoftext|> |
625e2ca5c81448dcb8f838650cb1cbf2065b561761cea9f6ccbc72bd918f59ea | async def get_resource(self, transaction):
'\n Render a GET request.\n\n :param transaction: the transaction\n :return: the transaction\n '
method = getattr(transaction.resource, 'render_GET', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(transaction.resource, 'render_GET_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CONTENT.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CONTENT.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction.response
if (resource.etag in transaction.request.etag):
transaction.response.code = defines.Codes.VALID.number
else:
transaction.response.code = defines.Codes.CONTENT.number
try:
if ((resource.actual_content_type is not None) and (resource.actual_content_type != defines.Content_types['text/plain'])):
transaction.response.content_type = resource.actual_content_type
if isinstance(resource.payload, bytes):
transaction.response.payload = resource.payload
else:
transaction.response.encode_payload(resource.payload)
except KeyError:
transaction.response.code = defines.Codes.NOT_ACCEPTABLE.number
return transaction.response
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (resource.max_age is not None):
transaction.response.max_age = resource.max_age
transaction.resource = resource
return transaction | Render a GET request.
:param transaction: the transaction
:return: the transaction | src/Bubot_CoAP/layers/resource_layer.py | get_resource | businka/Bubot_CoAP | 0 | python | async def get_resource(self, transaction):
'\n Render a GET request.\n\n :param transaction: the transaction\n :return: the transaction\n '
method = getattr(transaction.resource, 'render_GET', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(transaction.resource, 'render_GET_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CONTENT.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CONTENT.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction.response
if (resource.etag in transaction.request.etag):
transaction.response.code = defines.Codes.VALID.number
else:
transaction.response.code = defines.Codes.CONTENT.number
try:
if ((resource.actual_content_type is not None) and (resource.actual_content_type != defines.Content_types['text/plain'])):
transaction.response.content_type = resource.actual_content_type
if isinstance(resource.payload, bytes):
transaction.response.payload = resource.payload
else:
transaction.response.encode_payload(resource.payload)
except KeyError:
transaction.response.code = defines.Codes.NOT_ACCEPTABLE.number
return transaction.response
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (resource.max_age is not None):
transaction.response.max_age = resource.max_age
transaction.resource = resource
return transaction | async def get_resource(self, transaction):
'\n Render a GET request.\n\n :param transaction: the transaction\n :return: the transaction\n '
method = getattr(transaction.resource, 'render_GET', None)
try:
resource = (await method(request=transaction.request))
except NotImplementedError:
try:
method = getattr(transaction.resource, 'render_GET_advanced', None)
ret = (await method(request=transaction.request, response=transaction.response))
if (isinstance(ret, tuple) and (len(ret) == 2) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response) = ret
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CONTENT.number
return transaction
elif (isinstance(ret, tuple) and (len(ret) == 3) and isinstance(ret[1], Response) and isinstance(ret[0], Resource)):
(resource, response, callback) = ret
ret = (await self._handle_separate_advanced(transaction, callback))
if ((not isinstance(ret, tuple)) or (not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)))):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
transaction.resource = resource
transaction.response = response
if (transaction.response.code is None):
transaction.response.code = defines.Codes.CONTENT.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif (isinstance(resource, tuple) and (len(resource) == 2)):
(resource, callback) = resource
resource = (await self._handle_separate(transaction, callback))
if (not isinstance(resource, Resource)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction.response
if (resource.etag in transaction.request.etag):
transaction.response.code = defines.Codes.VALID.number
else:
transaction.response.code = defines.Codes.CONTENT.number
try:
if ((resource.actual_content_type is not None) and (resource.actual_content_type != defines.Content_types['text/plain'])):
transaction.response.content_type = resource.actual_content_type
if isinstance(resource.payload, bytes):
transaction.response.payload = resource.payload
else:
transaction.response.encode_payload(resource.payload)
except KeyError:
transaction.response.code = defines.Codes.NOT_ACCEPTABLE.number
return transaction.response
assert isinstance(resource, Resource)
if (resource.etag is not None):
transaction.response.etag = resource.etag
if (resource.max_age is not None):
transaction.response.max_age = resource.max_age
transaction.resource = resource
return transaction<|docstring|>Render a GET request.
:param transaction: the transaction
:return: the transaction<|endoftext|> |
bd6f53ef40ab041f94c77729a10ea71ba554dde572c94b8230e31996e4f54919 | async def discover(self, transaction):
'\n Render a GET request to the .well-know/core link.\n\n :param transaction: the transaction\n :return: the transaction\n '
transaction.response.code = defines.Codes.CONTENT.number
payload = ''
for i in self._parent.root.dump():
if (i == '/'):
continue
resource = self._parent.root[i]
if resource.visible:
ret = self.valid(transaction.request.uri_query, resource.attributes)
if ret:
payload += self.corelinkformat(resource)
transaction.response.payload = payload
transaction.response.content_type = defines.Content_types['application/link-format']
return transaction | Render a GET request to the .well-know/core link.
:param transaction: the transaction
:return: the transaction | src/Bubot_CoAP/layers/resource_layer.py | discover | businka/Bubot_CoAP | 0 | python | async def discover(self, transaction):
'\n Render a GET request to the .well-know/core link.\n\n :param transaction: the transaction\n :return: the transaction\n '
transaction.response.code = defines.Codes.CONTENT.number
payload =
for i in self._parent.root.dump():
if (i == '/'):
continue
resource = self._parent.root[i]
if resource.visible:
ret = self.valid(transaction.request.uri_query, resource.attributes)
if ret:
payload += self.corelinkformat(resource)
transaction.response.payload = payload
transaction.response.content_type = defines.Content_types['application/link-format']
return transaction | async def discover(self, transaction):
'\n Render a GET request to the .well-know/core link.\n\n :param transaction: the transaction\n :return: the transaction\n '
transaction.response.code = defines.Codes.CONTENT.number
payload =
for i in self._parent.root.dump():
if (i == '/'):
continue
resource = self._parent.root[i]
if resource.visible:
ret = self.valid(transaction.request.uri_query, resource.attributes)
if ret:
payload += self.corelinkformat(resource)
transaction.response.payload = payload
transaction.response.content_type = defines.Content_types['application/link-format']
return transaction<|docstring|>Render a GET request to the .well-know/core link.
:param transaction: the transaction
:return: the transaction<|endoftext|> |
aa6247e0de8ab3e034a7359ac49f0c84e42588aadf4a67fd9b5fcf109c25f2da | @staticmethod
def corelinkformat(resource):
'\n Return a formatted string representation of the corelinkformat in the tree.\n\n :return: the string\n '
msg = (('<' + resource.path) + '>;')
assert isinstance(resource, Resource)
keys = sorted(list(resource.attributes.keys()))
for k in keys:
method = getattr(resource, defines.corelinkformat[k], None)
if ((method is not None) and (method != '')):
v = method
msg = (((msg[:(- 1)] + ';') + str(v)) + ',')
else:
v = resource.attributes[k]
if (v is not None):
msg = (((((msg[:(- 1)] + ';') + k) + '=') + v) + ',')
return msg | Return a formatted string representation of the corelinkformat in the tree.
:return: the string | src/Bubot_CoAP/layers/resource_layer.py | corelinkformat | businka/Bubot_CoAP | 0 | python | @staticmethod
def corelinkformat(resource):
'\n Return a formatted string representation of the corelinkformat in the tree.\n\n :return: the string\n '
msg = (('<' + resource.path) + '>;')
assert isinstance(resource, Resource)
keys = sorted(list(resource.attributes.keys()))
for k in keys:
method = getattr(resource, defines.corelinkformat[k], None)
if ((method is not None) and (method != )):
v = method
msg = (((msg[:(- 1)] + ';') + str(v)) + ',')
else:
v = resource.attributes[k]
if (v is not None):
msg = (((((msg[:(- 1)] + ';') + k) + '=') + v) + ',')
return msg | @staticmethod
def corelinkformat(resource):
'\n Return a formatted string representation of the corelinkformat in the tree.\n\n :return: the string\n '
msg = (('<' + resource.path) + '>;')
assert isinstance(resource, Resource)
keys = sorted(list(resource.attributes.keys()))
for k in keys:
method = getattr(resource, defines.corelinkformat[k], None)
if ((method is not None) and (method != )):
v = method
msg = (((msg[:(- 1)] + ';') + str(v)) + ',')
else:
v = resource.attributes[k]
if (v is not None):
msg = (((((msg[:(- 1)] + ';') + k) + '=') + v) + ',')
return msg<|docstring|>Return a formatted string representation of the corelinkformat in the tree.
:return: the string<|endoftext|> |
f824b27641433887977d4221e74d1400a4aeddad338625e7077a2bf306454119 | def load_data(messages_filepath, categories_filepath):
'Load messages and categories data from the given file paths, process them and merge them\n \n Args:\n messages_filepath: \n categories_filepath:\n\n Returns:\n df: pandas.DataFrame: dataframe containing the messages data combined with their category classifications \n '
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
categories_split = categories.categories.str.split(';', expand=True)
category_colnames = list(categories_split.iloc[0].apply((lambda x: str.split(x, '-')[0])))
categories_split.columns = category_colnames
categories = pd.concat([categories, categories_split], axis=1)
for column in category_colnames:
categories[column] = categories[column].apply((lambda x: str.split(x, '-')[1]))
categories[column] = categories[column].apply(pd.to_numeric)
df = pd.merge(messages, categories, on='id')
df = df.drop('categories', axis=1)
all_messages_combined_words_cleaned = list(itertools.chain.from_iterable(df['message'].apply(tokenize).apply((lambda x: list(filter((lambda x: (not x.isnumeric())), x))))))
word_count_df = pd.DataFrame.from_dict(dict(Counter(all_messages_combined_words_cleaned)), orient='index', columns=['count'])
word_count_df = word_count_df.assign(frequency=word_count_df['count'].apply((lambda x: (x / len(all_messages_combined_words_cleaned)))))
word_count_df = word_count_df.sort_values('frequency', ascending=False).reset_index()
word_count_df.rename(index=str, columns={'index': 'word'}, inplace=True)
return (df, word_count_df) | Load messages and categories data from the given file paths, process them and merge them
Args:
messages_filepath:
categories_filepath:
Returns:
df: pandas.DataFrame: dataframe containing the messages data combined with their category classifications | process_data.py | load_data | karthikvijayakumar/Disaster-Response-Text-Classification | 1 | python | def load_data(messages_filepath, categories_filepath):
'Load messages and categories data from the given file paths, process them and merge them\n \n Args:\n messages_filepath: \n categories_filepath:\n\n Returns:\n df: pandas.DataFrame: dataframe containing the messages data combined with their category classifications \n '
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
categories_split = categories.categories.str.split(';', expand=True)
category_colnames = list(categories_split.iloc[0].apply((lambda x: str.split(x, '-')[0])))
categories_split.columns = category_colnames
categories = pd.concat([categories, categories_split], axis=1)
for column in category_colnames:
categories[column] = categories[column].apply((lambda x: str.split(x, '-')[1]))
categories[column] = categories[column].apply(pd.to_numeric)
df = pd.merge(messages, categories, on='id')
df = df.drop('categories', axis=1)
all_messages_combined_words_cleaned = list(itertools.chain.from_iterable(df['message'].apply(tokenize).apply((lambda x: list(filter((lambda x: (not x.isnumeric())), x))))))
word_count_df = pd.DataFrame.from_dict(dict(Counter(all_messages_combined_words_cleaned)), orient='index', columns=['count'])
word_count_df = word_count_df.assign(frequency=word_count_df['count'].apply((lambda x: (x / len(all_messages_combined_words_cleaned)))))
word_count_df = word_count_df.sort_values('frequency', ascending=False).reset_index()
word_count_df.rename(index=str, columns={'index': 'word'}, inplace=True)
return (df, word_count_df) | def load_data(messages_filepath, categories_filepath):
'Load messages and categories data from the given file paths, process them and merge them\n \n Args:\n messages_filepath: \n categories_filepath:\n\n Returns:\n df: pandas.DataFrame: dataframe containing the messages data combined with their category classifications \n '
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
categories_split = categories.categories.str.split(';', expand=True)
category_colnames = list(categories_split.iloc[0].apply((lambda x: str.split(x, '-')[0])))
categories_split.columns = category_colnames
categories = pd.concat([categories, categories_split], axis=1)
for column in category_colnames:
categories[column] = categories[column].apply((lambda x: str.split(x, '-')[1]))
categories[column] = categories[column].apply(pd.to_numeric)
df = pd.merge(messages, categories, on='id')
df = df.drop('categories', axis=1)
all_messages_combined_words_cleaned = list(itertools.chain.from_iterable(df['message'].apply(tokenize).apply((lambda x: list(filter((lambda x: (not x.isnumeric())), x))))))
word_count_df = pd.DataFrame.from_dict(dict(Counter(all_messages_combined_words_cleaned)), orient='index', columns=['count'])
word_count_df = word_count_df.assign(frequency=word_count_df['count'].apply((lambda x: (x / len(all_messages_combined_words_cleaned)))))
word_count_df = word_count_df.sort_values('frequency', ascending=False).reset_index()
word_count_df.rename(index=str, columns={'index': 'word'}, inplace=True)
return (df, word_count_df)<|docstring|>Load messages and categories data from the given file paths, process them and merge them
Args:
messages_filepath:
categories_filepath:
Returns:
df: pandas.DataFrame: dataframe containing the messages data combined with their category classifications<|endoftext|> |
24dde391793e88edf9891dd925014d406c668cc82e4ddce35b8165d0756d78f3 | def clean_data(df):
'Removes duplicates from the dataset\n Args:\n df: pandas.DataFrame: Input data containing messages and their classifications into multiple categories\n\n Returns:\n df: pandas.DataFrame: Deduplicated input data \n '
return df.drop_duplicates() | Removes duplicates from the dataset
Args:
df: pandas.DataFrame: Input data containing messages and their classifications into multiple categories
Returns:
df: pandas.DataFrame: Deduplicated input data | process_data.py | clean_data | karthikvijayakumar/Disaster-Response-Text-Classification | 1 | python | def clean_data(df):
'Removes duplicates from the dataset\n Args:\n df: pandas.DataFrame: Input data containing messages and their classifications into multiple categories\n\n Returns:\n df: pandas.DataFrame: Deduplicated input data \n '
return df.drop_duplicates() | def clean_data(df):
'Removes duplicates from the dataset\n Args:\n df: pandas.DataFrame: Input data containing messages and their classifications into multiple categories\n\n Returns:\n df: pandas.DataFrame: Deduplicated input data \n '
return df.drop_duplicates()<|docstring|>Removes duplicates from the dataset
Args:
df: pandas.DataFrame: Input data containing messages and their classifications into multiple categories
Returns:
df: pandas.DataFrame: Deduplicated input data<|endoftext|> |
f8875a2ac4aec92143751e8a53d9672e6b833ad00c7d555bd7c463942593513c | def save_data(df, table_name, database_filename):
'Writes the dataframe into a sqlite database at the given location\n \n Args:\n df: pandas.DataFrame: Input data containing messages and their classifications into multiple categories \n table_name: string. Table to write the input data frame to\n database_filename: File location to create and store SQLite database\n\n Returns:\n None\n \n '
engine = create_engine(('sqlite:///' + database_filename), echo=True)
df.to_sql(table_name, engine, index=False, if_exists='replace', chunksize=100) | Writes the dataframe into a sqlite database at the given location
Args:
df: pandas.DataFrame: Input data containing messages and their classifications into multiple categories
table_name: string. Table to write the input data frame to
database_filename: File location to create and store SQLite database
Returns:
None | process_data.py | save_data | karthikvijayakumar/Disaster-Response-Text-Classification | 1 | python | def save_data(df, table_name, database_filename):
'Writes the dataframe into a sqlite database at the given location\n \n Args:\n df: pandas.DataFrame: Input data containing messages and their classifications into multiple categories \n table_name: string. Table to write the input data frame to\n database_filename: File location to create and store SQLite database\n\n Returns:\n None\n \n '
engine = create_engine(('sqlite:///' + database_filename), echo=True)
df.to_sql(table_name, engine, index=False, if_exists='replace', chunksize=100) | def save_data(df, table_name, database_filename):
'Writes the dataframe into a sqlite database at the given location\n \n Args:\n df: pandas.DataFrame: Input data containing messages and their classifications into multiple categories \n table_name: string. Table to write the input data frame to\n database_filename: File location to create and store SQLite database\n\n Returns:\n None\n \n '
engine = create_engine(('sqlite:///' + database_filename), echo=True)
df.to_sql(table_name, engine, index=False, if_exists='replace', chunksize=100)<|docstring|>Writes the dataframe into a sqlite database at the given location
Args:
df: pandas.DataFrame: Input data containing messages and their classifications into multiple categories
table_name: string. Table to write the input data frame to
database_filename: File location to create and store SQLite database
Returns:
None<|endoftext|> |
b832c292b18df7c95166f67666622390484393765e78eee3d7d3d0b6b3aa425a | def main():
'Main function for the file. This is entry point of execution\n \n Args:\n None\n\n Returns:\n None\n\n '
if (len(sys.argv) == 4):
(messages_filepath, categories_filepath, database_filepath) = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'.format(messages_filepath, categories_filepath))
(df, word_count_df) = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, 'messages', database_filepath)
save_data(word_count_df, 'word_count', database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories datasets as the first and second argument respectively, as well as the filepath of the database to save the cleaned data to as the third argument. \n\nExample: python process_data.py disaster_messages.csv disaster_categories.csv DisasterResponse.db') | Main function for the file. This is entry point of execution
Args:
None
Returns:
None | process_data.py | main | karthikvijayakumar/Disaster-Response-Text-Classification | 1 | python | def main():
'Main function for the file. This is entry point of execution\n \n Args:\n None\n\n Returns:\n None\n\n '
if (len(sys.argv) == 4):
(messages_filepath, categories_filepath, database_filepath) = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'.format(messages_filepath, categories_filepath))
(df, word_count_df) = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, 'messages', database_filepath)
save_data(word_count_df, 'word_count', database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories datasets as the first and second argument respectively, as well as the filepath of the database to save the cleaned data to as the third argument. \n\nExample: python process_data.py disaster_messages.csv disaster_categories.csv DisasterResponse.db') | def main():
'Main function for the file. This is entry point of execution\n \n Args:\n None\n\n Returns:\n None\n\n '
if (len(sys.argv) == 4):
(messages_filepath, categories_filepath, database_filepath) = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'.format(messages_filepath, categories_filepath))
(df, word_count_df) = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, 'messages', database_filepath)
save_data(word_count_df, 'word_count', database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories datasets as the first and second argument respectively, as well as the filepath of the database to save the cleaned data to as the third argument. \n\nExample: python process_data.py disaster_messages.csv disaster_categories.csv DisasterResponse.db')<|docstring|>Main function for the file. This is entry point of execution
Args:
None
Returns:
None<|endoftext|> |
a2d6fcfbd01c97f8b3c33ec88d0f9acd0a47cc173c70c0c4b10e9ef2ccaaa5f8 | def deprecated(func):
'This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.'
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(f'Call to deprecated function {func.__name__}.', category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return new_func | This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used. | src/helpers/decorators.py | deprecated | Lakoc/bachelor_thesis | 0 | python | def deprecated(func):
'This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.'
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(f'Call to deprecated function {func.__name__}.', category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return new_func | def deprecated(func):
'This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.'
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(f'Call to deprecated function {func.__name__}.', category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return new_func<|docstring|>This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.<|endoftext|> |
092ade2784f9fac8b3df89ff37a86faf0e8eb3f3bc2a44068fc1bb33bddd555e | def timeit(func):
'This is a decorator which can be used to measure function time spent.'
@functools.wraps(func)
def new_func(*args, **kwargs):
start_time = time.time()
ret_val = func(*args, **kwargs)
elapsed_time = (time.time() - start_time)
print(f'function [{func.__name__}] finished in {int((elapsed_time * 1000))} ms')
return ret_val
return new_func | This is a decorator which can be used to measure function time spent. | src/helpers/decorators.py | timeit | Lakoc/bachelor_thesis | 0 | python | def timeit(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
start_time = time.time()
ret_val = func(*args, **kwargs)
elapsed_time = (time.time() - start_time)
print(f'function [{func.__name__}] finished in {int((elapsed_time * 1000))} ms')
return ret_val
return new_func | def timeit(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
start_time = time.time()
ret_val = func(*args, **kwargs)
elapsed_time = (time.time() - start_time)
print(f'function [{func.__name__}] finished in {int((elapsed_time * 1000))} ms')
return ret_val
return new_func<|docstring|>This is a decorator which can be used to measure function time spent.<|endoftext|> |
71a0573133235de5923e716950a1653146e0bdeaf1ed8023f224198c53d0920e | def frankie_angles_from_g(g, verbo=True, energy=50):
"\n Converted from David's code, which converted from Bob's code.\n I9 internal simulation coordinates: x ray direction is positive x direction, positive z direction is upward, y direction can be determined by right hand rule.\n I9 mic file coordinates: x, y directions are the same as the simulation coordinates.\n I9 detector images (include bin and ascii files): J is the same as y, K is the opposite z direction.\n The omega is along positive z direction.\n\n Parameters\n ------------\n g: array\n One recipropcal vector in the sample frame when omega==0. Unit is ANGSTROM^-1.\n energy:\n Experimental parameters. If use 'wavelength', the unit is 10^-10 meter; if use 'energy', the unit is keV.\n\n Returns\n -------------\n 2Theta and eta are in radian, chi, omega_a and omega_b are in degree. omega_a corresponding to positive y direction scatter, omega_b is negative y direction scatter.\n "
ghat = (g / np.linalg.norm(g))
sin_theta = ((np.linalg.norm(g) / (energy * 0.506773182)) / 2)
cos_theta = np.sqrt((1 - (sin_theta ** 2)))
cos_chi = ghat[2]
sin_chi = np.sqrt((1 - (cos_chi ** 2)))
omega_0 = np.arctan2(ghat[0], ghat[1])
if (np.fabs(sin_theta) <= np.fabs(sin_chi)):
phi = np.arccos((sin_theta / sin_chi))
sin_phi = np.sin(phi)
eta = np.arcsin(((sin_chi * sin_phi) / cos_theta))
delta_omega = np.arctan2(ghat[0], ghat[1])
delta_omega_b1 = np.arcsin((sin_theta / sin_chi))
delta_omega_b2 = (np.pi - delta_omega_b1)
omega_res1 = (delta_omega + delta_omega_b1)
omega_res2 = (delta_omega + delta_omega_b2)
if (omega_res1 > np.pi):
omega_res1 -= (2 * np.pi)
if (omega_res1 < (- np.pi)):
omega_res1 += (2 * np.pi)
if (omega_res2 > np.pi):
omega_res2 -= (2 * np.pi)
if (omega_res2 < (- np.pi)):
omega_res2 += (2 * np.pi)
else:
return (- 1)
if (verbo == True):
print('2theta: ', (((2 * np.arcsin(sin_theta)) * 180) / np.pi))
print('chi: ', ((np.arccos(cos_chi) * 180) / np.pi))
print('phi: ', ((phi * 180) / np.pi))
print('omega_0: ', ((omega_0 * 180) / np.pi))
print('omega_a: ', ((omega_res1 * 180) / np.pi))
print('omega_b: ', ((omega_res2 * 180) / np.pi))
print('eta: ', ((eta * 180) / np.pi))
return {'chi': ((np.arccos(cos_chi) * 180) / np.pi), '2Theta': (2 * np.arcsin(sin_theta)), 'eta': eta, 'omega_a': ((omega_res1 * 180) / np.pi), 'omega_b': ((omega_res2 * 180) / np.pi), 'omega_0': ((omega_0 * 180) / np.pi)} | Converted from David's code, which converted from Bob's code.
I9 internal simulation coordinates: x ray direction is positive x direction, positive z direction is upward, y direction can be determined by right hand rule.
I9 mic file coordinates: x, y directions are the same as the simulation coordinates.
I9 detector images (include bin and ascii files): J is the same as y, K is the opposite z direction.
The omega is along positive z direction.
Parameters
------------
g: array
One recipropcal vector in the sample frame when omega==0. Unit is ANGSTROM^-1.
energy:
Experimental parameters. If use 'wavelength', the unit is 10^-10 meter; if use 'energy', the unit is keV.
Returns
-------------
2Theta and eta are in radian, chi, omega_a and omega_b are in degree. omega_a corresponding to positive y direction scatter, omega_b is negative y direction scatter. | util/Simulation.py | frankie_angles_from_g | Yufeng-shen/StrainRecon | 0 | python | def frankie_angles_from_g(g, verbo=True, energy=50):
"\n Converted from David's code, which converted from Bob's code.\n I9 internal simulation coordinates: x ray direction is positive x direction, positive z direction is upward, y direction can be determined by right hand rule.\n I9 mic file coordinates: x, y directions are the same as the simulation coordinates.\n I9 detector images (include bin and ascii files): J is the same as y, K is the opposite z direction.\n The omega is along positive z direction.\n\n Parameters\n ------------\n g: array\n One recipropcal vector in the sample frame when omega==0. Unit is ANGSTROM^-1.\n energy:\n Experimental parameters. If use 'wavelength', the unit is 10^-10 meter; if use 'energy', the unit is keV.\n\n Returns\n -------------\n 2Theta and eta are in radian, chi, omega_a and omega_b are in degree. omega_a corresponding to positive y direction scatter, omega_b is negative y direction scatter.\n "
ghat = (g / np.linalg.norm(g))
sin_theta = ((np.linalg.norm(g) / (energy * 0.506773182)) / 2)
cos_theta = np.sqrt((1 - (sin_theta ** 2)))
cos_chi = ghat[2]
sin_chi = np.sqrt((1 - (cos_chi ** 2)))
omega_0 = np.arctan2(ghat[0], ghat[1])
if (np.fabs(sin_theta) <= np.fabs(sin_chi)):
phi = np.arccos((sin_theta / sin_chi))
sin_phi = np.sin(phi)
eta = np.arcsin(((sin_chi * sin_phi) / cos_theta))
delta_omega = np.arctan2(ghat[0], ghat[1])
delta_omega_b1 = np.arcsin((sin_theta / sin_chi))
delta_omega_b2 = (np.pi - delta_omega_b1)
omega_res1 = (delta_omega + delta_omega_b1)
omega_res2 = (delta_omega + delta_omega_b2)
if (omega_res1 > np.pi):
omega_res1 -= (2 * np.pi)
if (omega_res1 < (- np.pi)):
omega_res1 += (2 * np.pi)
if (omega_res2 > np.pi):
omega_res2 -= (2 * np.pi)
if (omega_res2 < (- np.pi)):
omega_res2 += (2 * np.pi)
else:
return (- 1)
if (verbo == True):
print('2theta: ', (((2 * np.arcsin(sin_theta)) * 180) / np.pi))
print('chi: ', ((np.arccos(cos_chi) * 180) / np.pi))
print('phi: ', ((phi * 180) / np.pi))
print('omega_0: ', ((omega_0 * 180) / np.pi))
print('omega_a: ', ((omega_res1 * 180) / np.pi))
print('omega_b: ', ((omega_res2 * 180) / np.pi))
print('eta: ', ((eta * 180) / np.pi))
return {'chi': ((np.arccos(cos_chi) * 180) / np.pi), '2Theta': (2 * np.arcsin(sin_theta)), 'eta': eta, 'omega_a': ((omega_res1 * 180) / np.pi), 'omega_b': ((omega_res2 * 180) / np.pi), 'omega_0': ((omega_0 * 180) / np.pi)} | def frankie_angles_from_g(g, verbo=True, energy=50):
"\n Converted from David's code, which converted from Bob's code.\n I9 internal simulation coordinates: x ray direction is positive x direction, positive z direction is upward, y direction can be determined by right hand rule.\n I9 mic file coordinates: x, y directions are the same as the simulation coordinates.\n I9 detector images (include bin and ascii files): J is the same as y, K is the opposite z direction.\n The omega is along positive z direction.\n\n Parameters\n ------------\n g: array\n One recipropcal vector in the sample frame when omega==0. Unit is ANGSTROM^-1.\n energy:\n Experimental parameters. If use 'wavelength', the unit is 10^-10 meter; if use 'energy', the unit is keV.\n\n Returns\n -------------\n 2Theta and eta are in radian, chi, omega_a and omega_b are in degree. omega_a corresponding to positive y direction scatter, omega_b is negative y direction scatter.\n "
ghat = (g / np.linalg.norm(g))
sin_theta = ((np.linalg.norm(g) / (energy * 0.506773182)) / 2)
cos_theta = np.sqrt((1 - (sin_theta ** 2)))
cos_chi = ghat[2]
sin_chi = np.sqrt((1 - (cos_chi ** 2)))
omega_0 = np.arctan2(ghat[0], ghat[1])
if (np.fabs(sin_theta) <= np.fabs(sin_chi)):
phi = np.arccos((sin_theta / sin_chi))
sin_phi = np.sin(phi)
eta = np.arcsin(((sin_chi * sin_phi) / cos_theta))
delta_omega = np.arctan2(ghat[0], ghat[1])
delta_omega_b1 = np.arcsin((sin_theta / sin_chi))
delta_omega_b2 = (np.pi - delta_omega_b1)
omega_res1 = (delta_omega + delta_omega_b1)
omega_res2 = (delta_omega + delta_omega_b2)
if (omega_res1 > np.pi):
omega_res1 -= (2 * np.pi)
if (omega_res1 < (- np.pi)):
omega_res1 += (2 * np.pi)
if (omega_res2 > np.pi):
omega_res2 -= (2 * np.pi)
if (omega_res2 < (- np.pi)):
omega_res2 += (2 * np.pi)
else:
return (- 1)
if (verbo == True):
print('2theta: ', (((2 * np.arcsin(sin_theta)) * 180) / np.pi))
print('chi: ', ((np.arccos(cos_chi) * 180) / np.pi))
print('phi: ', ((phi * 180) / np.pi))
print('omega_0: ', ((omega_0 * 180) / np.pi))
print('omega_a: ', ((omega_res1 * 180) / np.pi))
print('omega_b: ', ((omega_res2 * 180) / np.pi))
print('eta: ', ((eta * 180) / np.pi))
return {'chi': ((np.arccos(cos_chi) * 180) / np.pi), '2Theta': (2 * np.arcsin(sin_theta)), 'eta': eta, 'omega_a': ((omega_res1 * 180) / np.pi), 'omega_b': ((omega_res2 * 180) / np.pi), 'omega_0': ((omega_0 * 180) / np.pi)}<|docstring|>Converted from David's code, which converted from Bob's code.
I9 internal simulation coordinates: x ray direction is positive x direction, positive z direction is upward, y direction can be determined by right hand rule.
I9 mic file coordinates: x, y directions are the same as the simulation coordinates.
I9 detector images (include bin and ascii files): J is the same as y, K is the opposite z direction.
The omega is along positive z direction.
Parameters
------------
g: array
One recipropcal vector in the sample frame when omega==0. Unit is ANGSTROM^-1.
energy:
Experimental parameters. If use 'wavelength', the unit is 10^-10 meter; if use 'energy', the unit is keV.
Returns
-------------
2Theta and eta are in radian, chi, omega_a and omega_b are in degree. omega_a corresponding to positive y direction scatter, omega_b is negative y direction scatter.<|endoftext|> |
1664484bd3026d18e9fb119dcfe468db4cb8bceb0e37727d767150332e30f9d8 | def GetProjectedVertex(Det1, sample, orien, etalimit, grainpos, getPeaksInfo=False, bIdx=True, omegaL=(- 90), omegaU=90, energy=50):
'\n Get the observable projected vertex on a single detector and their G vectors.\n Caution!!! This function only works for traditional nf-HEDM experiment setup.\n\n Parameters\n ------------\n Det1: Detector\n Remember to move this detector object to correct position first.\n sample: CrystalStr\n Must calculated G list\n orien: ndarray\n Active rotation matrix of orientation at that vertex\n etalimit: scalar\n Limit of eta value. Usually is about 85.\n grainpos: array\n Position of that vertex in mic file, unit is mm.\n energy: scalar\n X ray energy in the unit of KeV\n\n Returns\n ------------\n Peaks: ndarray\n N*3 ndarray, records position of each peak. The first column is the J value, second is K value, third is omega value in degree.\n Gs: ndarray\n N*3 ndarray, records corresponding G vector in sample frame.\n '
Peaks = []
Gs = []
PeaksInfo = []
rotatedG = orien.dot(sample.Gs.T).T
for ii in range(len(rotatedG)):
g1 = rotatedG[ii]
res = frankie_angles_from_g(g1, verbo=False, energy=energy)
if (res == (- 1)):
pass
elif (res['chi'] >= 90):
pass
elif (res['eta'] > etalimit):
pass
else:
if (omegaL <= res['omega_a'] <= omegaU):
omega = ((res['omega_a'] / 180.0) * np.pi)
newgrainx = ((np.cos(omega) * grainpos[0]) - (np.sin(omega) * grainpos[1]))
newgrainy = ((np.cos(omega) * grainpos[1]) + (np.sin(omega) * grainpos[0]))
idx = Det1.IntersectionIdx(np.array([newgrainx, newgrainy, 0]), res['2Theta'], res['eta'], bIdx)
if (idx != (- 1)):
Peaks.append([idx[0], idx[1], res['omega_a']])
Gs.append(g1)
if getPeaksInfo:
PeaksInfo.append({'WhichOmega': 'a', 'chi': res['chi'], 'omega_0': res['omega_0'], '2Theta': res['2Theta'], 'eta': res['eta'], 'hkl': sample.hkls[ii]})
if (omegaL <= res['omega_b'] <= omegaU):
omega = ((res['omega_b'] / 180.0) * np.pi)
newgrainx = ((np.cos(omega) * grainpos[0]) - (np.sin(omega) * grainpos[1]))
newgrainy = ((np.cos(omega) * grainpos[1]) + (np.sin(omega) * grainpos[0]))
idx = Det1.IntersectionIdx(np.array([newgrainx, newgrainy, 0]), res['2Theta'], (- res['eta']), bIdx)
if (idx != (- 1)):
Peaks.append([idx[0], idx[1], res['omega_b']])
Gs.append(g1)
if getPeaksInfo:
PeaksInfo.append({'WhichOmega': 'b', 'chi': res['chi'], 'omega_0': res['omega_0'], '2Theta': res['2Theta'], 'eta': (- res['eta']), 'hkl': sample.hkls[ii]})
Peaks = np.array(Peaks)
Gs = np.array(Gs)
if getPeaksInfo:
return (Peaks, Gs, PeaksInfo)
return (Peaks, Gs) | Get the observable projected vertex on a single detector and their G vectors.
Caution!!! This function only works for traditional nf-HEDM experiment setup.
Parameters
------------
Det1: Detector
Remember to move this detector object to correct position first.
sample: CrystalStr
Must calculated G list
orien: ndarray
Active rotation matrix of orientation at that vertex
etalimit: scalar
Limit of eta value. Usually is about 85.
grainpos: array
Position of that vertex in mic file, unit is mm.
energy: scalar
X ray energy in the unit of KeV
Returns
------------
Peaks: ndarray
N*3 ndarray, records position of each peak. The first column is the J value, second is K value, third is omega value in degree.
Gs: ndarray
N*3 ndarray, records corresponding G vector in sample frame. | util/Simulation.py | GetProjectedVertex | Yufeng-shen/StrainRecon | 0 | python | def GetProjectedVertex(Det1, sample, orien, etalimit, grainpos, getPeaksInfo=False, bIdx=True, omegaL=(- 90), omegaU=90, energy=50):
'\n Get the observable projected vertex on a single detector and their G vectors.\n Caution!!! This function only works for traditional nf-HEDM experiment setup.\n\n Parameters\n ------------\n Det1: Detector\n Remember to move this detector object to correct position first.\n sample: CrystalStr\n Must calculated G list\n orien: ndarray\n Active rotation matrix of orientation at that vertex\n etalimit: scalar\n Limit of eta value. Usually is about 85.\n grainpos: array\n Position of that vertex in mic file, unit is mm.\n energy: scalar\n X ray energy in the unit of KeV\n\n Returns\n ------------\n Peaks: ndarray\n N*3 ndarray, records position of each peak. The first column is the J value, second is K value, third is omega value in degree.\n Gs: ndarray\n N*3 ndarray, records corresponding G vector in sample frame.\n '
Peaks = []
Gs = []
PeaksInfo = []
rotatedG = orien.dot(sample.Gs.T).T
for ii in range(len(rotatedG)):
g1 = rotatedG[ii]
res = frankie_angles_from_g(g1, verbo=False, energy=energy)
if (res == (- 1)):
pass
elif (res['chi'] >= 90):
pass
elif (res['eta'] > etalimit):
pass
else:
if (omegaL <= res['omega_a'] <= omegaU):
omega = ((res['omega_a'] / 180.0) * np.pi)
newgrainx = ((np.cos(omega) * grainpos[0]) - (np.sin(omega) * grainpos[1]))
newgrainy = ((np.cos(omega) * grainpos[1]) + (np.sin(omega) * grainpos[0]))
idx = Det1.IntersectionIdx(np.array([newgrainx, newgrainy, 0]), res['2Theta'], res['eta'], bIdx)
if (idx != (- 1)):
Peaks.append([idx[0], idx[1], res['omega_a']])
Gs.append(g1)
if getPeaksInfo:
PeaksInfo.append({'WhichOmega': 'a', 'chi': res['chi'], 'omega_0': res['omega_0'], '2Theta': res['2Theta'], 'eta': res['eta'], 'hkl': sample.hkls[ii]})
if (omegaL <= res['omega_b'] <= omegaU):
omega = ((res['omega_b'] / 180.0) * np.pi)
newgrainx = ((np.cos(omega) * grainpos[0]) - (np.sin(omega) * grainpos[1]))
newgrainy = ((np.cos(omega) * grainpos[1]) + (np.sin(omega) * grainpos[0]))
idx = Det1.IntersectionIdx(np.array([newgrainx, newgrainy, 0]), res['2Theta'], (- res['eta']), bIdx)
if (idx != (- 1)):
Peaks.append([idx[0], idx[1], res['omega_b']])
Gs.append(g1)
if getPeaksInfo:
PeaksInfo.append({'WhichOmega': 'b', 'chi': res['chi'], 'omega_0': res['omega_0'], '2Theta': res['2Theta'], 'eta': (- res['eta']), 'hkl': sample.hkls[ii]})
Peaks = np.array(Peaks)
Gs = np.array(Gs)
if getPeaksInfo:
return (Peaks, Gs, PeaksInfo)
return (Peaks, Gs) | def GetProjectedVertex(Det1, sample, orien, etalimit, grainpos, getPeaksInfo=False, bIdx=True, omegaL=(- 90), omegaU=90, energy=50):
'\n Get the observable projected vertex on a single detector and their G vectors.\n Caution!!! This function only works for traditional nf-HEDM experiment setup.\n\n Parameters\n ------------\n Det1: Detector\n Remember to move this detector object to correct position first.\n sample: CrystalStr\n Must calculated G list\n orien: ndarray\n Active rotation matrix of orientation at that vertex\n etalimit: scalar\n Limit of eta value. Usually is about 85.\n grainpos: array\n Position of that vertex in mic file, unit is mm.\n energy: scalar\n X ray energy in the unit of KeV\n\n Returns\n ------------\n Peaks: ndarray\n N*3 ndarray, records position of each peak. The first column is the J value, second is K value, third is omega value in degree.\n Gs: ndarray\n N*3 ndarray, records corresponding G vector in sample frame.\n '
Peaks = []
Gs = []
PeaksInfo = []
rotatedG = orien.dot(sample.Gs.T).T
for ii in range(len(rotatedG)):
g1 = rotatedG[ii]
res = frankie_angles_from_g(g1, verbo=False, energy=energy)
if (res == (- 1)):
pass
elif (res['chi'] >= 90):
pass
elif (res['eta'] > etalimit):
pass
else:
if (omegaL <= res['omega_a'] <= omegaU):
omega = ((res['omega_a'] / 180.0) * np.pi)
newgrainx = ((np.cos(omega) * grainpos[0]) - (np.sin(omega) * grainpos[1]))
newgrainy = ((np.cos(omega) * grainpos[1]) + (np.sin(omega) * grainpos[0]))
idx = Det1.IntersectionIdx(np.array([newgrainx, newgrainy, 0]), res['2Theta'], res['eta'], bIdx)
if (idx != (- 1)):
Peaks.append([idx[0], idx[1], res['omega_a']])
Gs.append(g1)
if getPeaksInfo:
PeaksInfo.append({'WhichOmega': 'a', 'chi': res['chi'], 'omega_0': res['omega_0'], '2Theta': res['2Theta'], 'eta': res['eta'], 'hkl': sample.hkls[ii]})
if (omegaL <= res['omega_b'] <= omegaU):
omega = ((res['omega_b'] / 180.0) * np.pi)
newgrainx = ((np.cos(omega) * grainpos[0]) - (np.sin(omega) * grainpos[1]))
newgrainy = ((np.cos(omega) * grainpos[1]) + (np.sin(omega) * grainpos[0]))
idx = Det1.IntersectionIdx(np.array([newgrainx, newgrainy, 0]), res['2Theta'], (- res['eta']), bIdx)
if (idx != (- 1)):
Peaks.append([idx[0], idx[1], res['omega_b']])
Gs.append(g1)
if getPeaksInfo:
PeaksInfo.append({'WhichOmega': 'b', 'chi': res['chi'], 'omega_0': res['omega_0'], '2Theta': res['2Theta'], 'eta': (- res['eta']), 'hkl': sample.hkls[ii]})
Peaks = np.array(Peaks)
Gs = np.array(Gs)
if getPeaksInfo:
return (Peaks, Gs, PeaksInfo)
return (Peaks, Gs)<|docstring|>Get the observable projected vertex on a single detector and their G vectors.
Caution!!! This function only works for traditional nf-HEDM experiment setup.
Parameters
------------
Det1: Detector
Remember to move this detector object to correct position first.
sample: CrystalStr
Must calculated G list
orien: ndarray
Active rotation matrix of orientation at that vertex
etalimit: scalar
Limit of eta value. Usually is about 85.
grainpos: array
Position of that vertex in mic file, unit is mm.
energy: scalar
X ray energy in the unit of KeV
Returns
------------
Peaks: ndarray
N*3 ndarray, records position of each peak. The first column is the J value, second is K value, third is omega value in degree.
Gs: ndarray
N*3 ndarray, records corresponding G vector in sample frame.<|endoftext|> |
c759171fa6e1f200cb71807caac6ce0a6a3e8804222fcd140a1f3163ecda0029 | def digitize(xy):
'\n xy: ndarray shape(4,2)\n J and K indices in float, four points. This digitize method is far from ideal\n\n Returns\n -------------\n f: list\n list of integer tuples (J,K) that is hitted. (filled polygon)\n\n '
p = path.Path(xy)
def line(pixels, x0, y0, x1, y1):
if ((x0 == x1) and (y0 == y1)):
pixels.append((x0, y0))
return
brev = True
if (abs((y1 - y0)) <= abs((x1 - x0))):
(x0, y0, x1, y1) = (y0, x0, y1, x1)
brev = False
if (x1 < x0):
(x0, y0, x1, y1) = (x1, y1, x0, y0)
leny = abs((y1 - y0))
for i in range((leny + 1)):
if brev:
pixels.append(tuple(((int(round((Fraction(i, leny) * (x1 - x0)))) + x0), ((int((1 if (y1 > y0) else (- 1))) * i) + y0))))
else:
pixels.append(tuple((((int((1 if (y1 > y0) else (- 1))) * i) + y0), (int(round((Fraction(i, leny) * (x1 - x0)))) + x0))))
bnd = p.get_extents().get_points().astype(int)
ixy = xy.astype(int)
pixels = []
line(pixels, ixy[(0, 0)], ixy[(0, 1)], ixy[(1, 0)], ixy[(1, 1)])
line(pixels, ixy[(1, 0)], ixy[(1, 1)], ixy[(2, 0)], ixy[(2, 1)])
line(pixels, ixy[(2, 0)], ixy[(2, 1)], ixy[(3, 0)], ixy[(3, 1)])
line(pixels, ixy[(3, 0)], ixy[(3, 1)], ixy[(0, 0)], ixy[(0, 1)])
points = []
for jj in range(bnd[(0, 0)], (bnd[(1, 0)] + 1)):
for kk in range(bnd[(0, 1)], (bnd[(1, 1)] + 1)):
points.append((jj, kk))
points = np.asarray(points)
mask = p.contains_points(points)
ipoints = points[mask]
f = list([tuple(ii) for ii in ipoints])
f.extend(pixels)
return f | xy: ndarray shape(4,2)
J and K indices in float, four points. This digitize method is far from ideal
Returns
-------------
f: list
list of integer tuples (J,K) that is hitted. (filled polygon) | util/Simulation.py | digitize | Yufeng-shen/StrainRecon | 0 | python | def digitize(xy):
'\n xy: ndarray shape(4,2)\n J and K indices in float, four points. This digitize method is far from ideal\n\n Returns\n -------------\n f: list\n list of integer tuples (J,K) that is hitted. (filled polygon)\n\n '
p = path.Path(xy)
def line(pixels, x0, y0, x1, y1):
if ((x0 == x1) and (y0 == y1)):
pixels.append((x0, y0))
return
brev = True
if (abs((y1 - y0)) <= abs((x1 - x0))):
(x0, y0, x1, y1) = (y0, x0, y1, x1)
brev = False
if (x1 < x0):
(x0, y0, x1, y1) = (x1, y1, x0, y0)
leny = abs((y1 - y0))
for i in range((leny + 1)):
if brev:
pixels.append(tuple(((int(round((Fraction(i, leny) * (x1 - x0)))) + x0), ((int((1 if (y1 > y0) else (- 1))) * i) + y0))))
else:
pixels.append(tuple((((int((1 if (y1 > y0) else (- 1))) * i) + y0), (int(round((Fraction(i, leny) * (x1 - x0)))) + x0))))
bnd = p.get_extents().get_points().astype(int)
ixy = xy.astype(int)
pixels = []
line(pixels, ixy[(0, 0)], ixy[(0, 1)], ixy[(1, 0)], ixy[(1, 1)])
line(pixels, ixy[(1, 0)], ixy[(1, 1)], ixy[(2, 0)], ixy[(2, 1)])
line(pixels, ixy[(2, 0)], ixy[(2, 1)], ixy[(3, 0)], ixy[(3, 1)])
line(pixels, ixy[(3, 0)], ixy[(3, 1)], ixy[(0, 0)], ixy[(0, 1)])
points = []
for jj in range(bnd[(0, 0)], (bnd[(1, 0)] + 1)):
for kk in range(bnd[(0, 1)], (bnd[(1, 1)] + 1)):
points.append((jj, kk))
points = np.asarray(points)
mask = p.contains_points(points)
ipoints = points[mask]
f = list([tuple(ii) for ii in ipoints])
f.extend(pixels)
return f | def digitize(xy):
'\n xy: ndarray shape(4,2)\n J and K indices in float, four points. This digitize method is far from ideal\n\n Returns\n -------------\n f: list\n list of integer tuples (J,K) that is hitted. (filled polygon)\n\n '
p = path.Path(xy)
def line(pixels, x0, y0, x1, y1):
if ((x0 == x1) and (y0 == y1)):
pixels.append((x0, y0))
return
brev = True
if (abs((y1 - y0)) <= abs((x1 - x0))):
(x0, y0, x1, y1) = (y0, x0, y1, x1)
brev = False
if (x1 < x0):
(x0, y0, x1, y1) = (x1, y1, x0, y0)
leny = abs((y1 - y0))
for i in range((leny + 1)):
if brev:
pixels.append(tuple(((int(round((Fraction(i, leny) * (x1 - x0)))) + x0), ((int((1 if (y1 > y0) else (- 1))) * i) + y0))))
else:
pixels.append(tuple((((int((1 if (y1 > y0) else (- 1))) * i) + y0), (int(round((Fraction(i, leny) * (x1 - x0)))) + x0))))
bnd = p.get_extents().get_points().astype(int)
ixy = xy.astype(int)
pixels = []
line(pixels, ixy[(0, 0)], ixy[(0, 1)], ixy[(1, 0)], ixy[(1, 1)])
line(pixels, ixy[(1, 0)], ixy[(1, 1)], ixy[(2, 0)], ixy[(2, 1)])
line(pixels, ixy[(2, 0)], ixy[(2, 1)], ixy[(3, 0)], ixy[(3, 1)])
line(pixels, ixy[(3, 0)], ixy[(3, 1)], ixy[(0, 0)], ixy[(0, 1)])
points = []
for jj in range(bnd[(0, 0)], (bnd[(1, 0)] + 1)):
for kk in range(bnd[(0, 1)], (bnd[(1, 1)] + 1)):
points.append((jj, kk))
points = np.asarray(points)
mask = p.contains_points(points)
ipoints = points[mask]
f = list([tuple(ii) for ii in ipoints])
f.extend(pixels)
return f<|docstring|>xy: ndarray shape(4,2)
J and K indices in float, four points. This digitize method is far from ideal
Returns
-------------
f: list
list of integer tuples (J,K) that is hitted. (filled polygon)<|endoftext|> |
3f88052c4f0b2313aceb753633f595b83966c02beae0ced43948add7e15d97db | def BackProj(self, HitPos, omega, TwoTheta, eta):
'\n HitPos: ndarray (3,)\n The position of hitted point on lab coord, unit in mm\n '
scatterdir = np.array([np.cos(TwoTheta), (np.sin(TwoTheta) * np.sin(eta)), (np.sin(TwoTheta) * np.cos(eta))])
t = (HitPos[2] / (np.sin(TwoTheta) * np.cos(eta)))
x = (HitPos[0] - (t * np.cos(TwoTheta)))
y = (HitPos[1] - ((t * np.sin(TwoTheta)) * np.sin(eta)))
truex = ((np.cos(omega) * x) + (np.sin(omega) * y))
truey = (((- np.sin(omega)) * x) + (np.cos(omega) * y))
return np.array([truex, truey]) | HitPos: ndarray (3,)
The position of hitted point on lab coord, unit in mm | util/Simulation.py | BackProj | Yufeng-shen/StrainRecon | 0 | python | def BackProj(self, HitPos, omega, TwoTheta, eta):
'\n HitPos: ndarray (3,)\n The position of hitted point on lab coord, unit in mm\n '
scatterdir = np.array([np.cos(TwoTheta), (np.sin(TwoTheta) * np.sin(eta)), (np.sin(TwoTheta) * np.cos(eta))])
t = (HitPos[2] / (np.sin(TwoTheta) * np.cos(eta)))
x = (HitPos[0] - (t * np.cos(TwoTheta)))
y = (HitPos[1] - ((t * np.sin(TwoTheta)) * np.sin(eta)))
truex = ((np.cos(omega) * x) + (np.sin(omega) * y))
truey = (((- np.sin(omega)) * x) + (np.cos(omega) * y))
return np.array([truex, truey]) | def BackProj(self, HitPos, omega, TwoTheta, eta):
'\n HitPos: ndarray (3,)\n The position of hitted point on lab coord, unit in mm\n '
scatterdir = np.array([np.cos(TwoTheta), (np.sin(TwoTheta) * np.sin(eta)), (np.sin(TwoTheta) * np.cos(eta))])
t = (HitPos[2] / (np.sin(TwoTheta) * np.cos(eta)))
x = (HitPos[0] - (t * np.cos(TwoTheta)))
y = (HitPos[1] - ((t * np.sin(TwoTheta)) * np.sin(eta)))
truex = ((np.cos(omega) * x) + (np.sin(omega) * y))
truey = (((- np.sin(omega)) * x) + (np.cos(omega) * y))
return np.array([truex, truey])<|docstring|>HitPos: ndarray (3,)
The position of hitted point on lab coord, unit in mm<|endoftext|> |
5edb27b47b45fe96c4b77cee9fe28542094b5590b7331b29deb0cb22a7dc56cb | def _get_file_as_dict(self, file_path):
'Open file path and return as dict.'
with open(file_path) as f:
return json.load(f) | Open file path and return as dict. | rdsslib/taxonomy/taxonomy_client.py | _get_file_as_dict | JiscSD/rdss-shared-libraries | 0 | python | def _get_file_as_dict(self, file_path):
with open(file_path) as f:
return json.load(f) | def _get_file_as_dict(self, file_path):
with open(file_path) as f:
return json.load(f)<|docstring|>Open file path and return as dict.<|endoftext|> |
0473eeadcf74c9df64e5abdb67293f9791104a52a03b6aa729f0418e698fac2c | def _get_vocab_dict(self, vocab_id):
'Get a vocabulary by ID.'
base_dir = self._get_filedir()
file_name = None
try:
file_name = VOCAB_FILE_LOOKUP[vocab_id]
except KeyError:
raise VocabularyNotFound
path = os.path.join(base_dir, file_name)
return self._get_file_as_dict(path) | Get a vocabulary by ID. | rdsslib/taxonomy/taxonomy_client.py | _get_vocab_dict | JiscSD/rdss-shared-libraries | 0 | python | def _get_vocab_dict(self, vocab_id):
base_dir = self._get_filedir()
file_name = None
try:
file_name = VOCAB_FILE_LOOKUP[vocab_id]
except KeyError:
raise VocabularyNotFound
path = os.path.join(base_dir, file_name)
return self._get_file_as_dict(path) | def _get_vocab_dict(self, vocab_id):
base_dir = self._get_filedir()
file_name = None
try:
file_name = VOCAB_FILE_LOOKUP[vocab_id]
except KeyError:
raise VocabularyNotFound
path = os.path.join(base_dir, file_name)
return self._get_file_as_dict(path)<|docstring|>Get a vocabulary by ID.<|endoftext|> |
7bfff14bad6cee278deaebef796e0056cf80272875c0a9634ec303b631dc9f6e | def get_by_name(self, vocab_id, name):
'Get a vocab item by name.'
values_dict = self._get_vocab_dict(vocab_id)
values = values_dict.get('vocabularyValues', [])
for val in values:
val_name = val['valueName']
if (val_name == name):
return val['valueId']
raise ValueNotFound | Get a vocab item by name. | rdsslib/taxonomy/taxonomy_client.py | get_by_name | JiscSD/rdss-shared-libraries | 0 | python | def get_by_name(self, vocab_id, name):
values_dict = self._get_vocab_dict(vocab_id)
values = values_dict.get('vocabularyValues', [])
for val in values:
val_name = val['valueName']
if (val_name == name):
return val['valueId']
raise ValueNotFound | def get_by_name(self, vocab_id, name):
values_dict = self._get_vocab_dict(vocab_id)
values = values_dict.get('vocabularyValues', [])
for val in values:
val_name = val['valueName']
if (val_name == name):
return val['valueId']
raise ValueNotFound<|docstring|>Get a vocab item by name.<|endoftext|> |
7acb5fbf249c945c6faace458b7e5bcc2ca9b48086967d8c269cfecd77667091 | def globus_initFlow():
'\n Retrieve cached/Create a new access token\n and use it to create an OAuth2WebServerFlow\n '
userAndPass = ('%s:%s' % (auth_settings.GLOBUS_OAUTH_ID, auth_settings.GLOBUS_OAUTH_SECRET))
b64_userAndPass = b64encode(userAndPass)
auth_header = ('Basic %s' % b64_userAndPass)
flow = OAuth2WebServerFlow(client_id=auth_settings.GLOBUS_OAUTH_ID, scope=auth_settings.GLOBUS_OAUTH_ATMOSPHERE_SCOPE, authorization_header=auth_header, redirect_uri=auth_settings.OAUTH_CLIENT_CALLBACK, auth_uri=auth_settings.GLOBUS_AUTH_URL, token_info_uri=auth_settings.GLOBUS_TOKENINFO_URL, token_uri=auth_settings.GLOBUS_TOKEN_URL)
return flow | Retrieve cached/Create a new access token
and use it to create an OAuth2WebServerFlow | django_cyverse_auth/protocol/globus.py | globus_initFlow | simpsonw/django-cyverse-auth | 1 | python | def globus_initFlow():
'\n Retrieve cached/Create a new access token\n and use it to create an OAuth2WebServerFlow\n '
userAndPass = ('%s:%s' % (auth_settings.GLOBUS_OAUTH_ID, auth_settings.GLOBUS_OAUTH_SECRET))
b64_userAndPass = b64encode(userAndPass)
auth_header = ('Basic %s' % b64_userAndPass)
flow = OAuth2WebServerFlow(client_id=auth_settings.GLOBUS_OAUTH_ID, scope=auth_settings.GLOBUS_OAUTH_ATMOSPHERE_SCOPE, authorization_header=auth_header, redirect_uri=auth_settings.OAUTH_CLIENT_CALLBACK, auth_uri=auth_settings.GLOBUS_AUTH_URL, token_info_uri=auth_settings.GLOBUS_TOKENINFO_URL, token_uri=auth_settings.GLOBUS_TOKEN_URL)
return flow | def globus_initFlow():
'\n Retrieve cached/Create a new access token\n and use it to create an OAuth2WebServerFlow\n '
userAndPass = ('%s:%s' % (auth_settings.GLOBUS_OAUTH_ID, auth_settings.GLOBUS_OAUTH_SECRET))
b64_userAndPass = b64encode(userAndPass)
auth_header = ('Basic %s' % b64_userAndPass)
flow = OAuth2WebServerFlow(client_id=auth_settings.GLOBUS_OAUTH_ID, scope=auth_settings.GLOBUS_OAUTH_ATMOSPHERE_SCOPE, authorization_header=auth_header, redirect_uri=auth_settings.OAUTH_CLIENT_CALLBACK, auth_uri=auth_settings.GLOBUS_AUTH_URL, token_info_uri=auth_settings.GLOBUS_TOKENINFO_URL, token_uri=auth_settings.GLOBUS_TOKEN_URL)
return flow<|docstring|>Retrieve cached/Create a new access token
and use it to create an OAuth2WebServerFlow<|endoftext|> |
d91225ab7a1147273349dcd31acc7c05d79923a47bfc1f32c0dcab9b33ce6bd4 | def globus_logout(redirect_uri, redirect_name='Jetstream'):
'\n Redirect to logout of globus\n '
flow = globus_initFlow()
auth_uri = flow.auth_uri
web_logout_url = auth_uri.replace('oauth2/authorize', 'web/logout')
web_logout_url += ('?client_id=%s&redirect_name=%s&redirect_uri=%s' % (flow.client_id, redirect_name, redirect_uri))
logger.info(web_logout_url)
return HttpResponseRedirect(web_logout_url) | Redirect to logout of globus | django_cyverse_auth/protocol/globus.py | globus_logout | simpsonw/django-cyverse-auth | 1 | python | def globus_logout(redirect_uri, redirect_name='Jetstream'):
'\n \n '
flow = globus_initFlow()
auth_uri = flow.auth_uri
web_logout_url = auth_uri.replace('oauth2/authorize', 'web/logout')
web_logout_url += ('?client_id=%s&redirect_name=%s&redirect_uri=%s' % (flow.client_id, redirect_name, redirect_uri))
logger.info(web_logout_url)
return HttpResponseRedirect(web_logout_url) | def globus_logout(redirect_uri, redirect_name='Jetstream'):
'\n \n '
flow = globus_initFlow()
auth_uri = flow.auth_uri
web_logout_url = auth_uri.replace('oauth2/authorize', 'web/logout')
web_logout_url += ('?client_id=%s&redirect_name=%s&redirect_uri=%s' % (flow.client_id, redirect_name, redirect_uri))
logger.info(web_logout_url)
return HttpResponseRedirect(web_logout_url)<|docstring|>Redirect to logout of globus<|endoftext|> |
1f54562ea0b4ffc36f04149dd61fb7fbac0699d8f181c501193be358e416f31b | def globus_authorize(request):
"\n Redirect to the IdP based on 'flow'\n "
flow = globus_initFlow()
auth_uri = flow.step1_get_authorize_url()
auth_uri += '&authentication_hint=36007761-2cf2-4e74-a068-7473afc1d054'
auth_uri = auth_uri.replace('access_type=offline', 'access_type=online')
logger.warn(auth_uri)
return HttpResponseRedirect(auth_uri) | Redirect to the IdP based on 'flow' | django_cyverse_auth/protocol/globus.py | globus_authorize | simpsonw/django-cyverse-auth | 1 | python | def globus_authorize(request):
"\n \n "
flow = globus_initFlow()
auth_uri = flow.step1_get_authorize_url()
auth_uri += '&authentication_hint=36007761-2cf2-4e74-a068-7473afc1d054'
auth_uri = auth_uri.replace('access_type=offline', 'access_type=online')
logger.warn(auth_uri)
return HttpResponseRedirect(auth_uri) | def globus_authorize(request):
"\n \n "
flow = globus_initFlow()
auth_uri = flow.step1_get_authorize_url()
auth_uri += '&authentication_hint=36007761-2cf2-4e74-a068-7473afc1d054'
auth_uri = auth_uri.replace('access_type=offline', 'access_type=online')
logger.warn(auth_uri)
return HttpResponseRedirect(auth_uri)<|docstring|>Redirect to the IdP based on 'flow'<|endoftext|> |
88c4e35a2f8edf664ffb0e321fee605538f164f895036249e4da4e977b279bc2 | def _extract_user_from_email(raw_username):
'\n Usernames come from the globus provider in the form:\n [email protected]\n '
if (not raw_username):
return None
return raw_username.split('@')[0] | Usernames come from the globus provider in the form:
[email protected] | django_cyverse_auth/protocol/globus.py | _extract_user_from_email | simpsonw/django-cyverse-auth | 1 | python | def _extract_user_from_email(raw_username):
'\n Usernames come from the globus provider in the form:\n [email protected]\n '
if (not raw_username):
return None
return raw_username.split('@')[0] | def _extract_user_from_email(raw_username):
'\n Usernames come from the globus provider in the form:\n [email protected]\n '
if (not raw_username):
return None
return raw_username.split('@')[0]<|docstring|>Usernames come from the globus provider in the form:
[email protected]<|endoftext|> |
2a8f612b8c2c35bc6eee227d0553c6636f997932ac4bfad3319915f082ca55f5 | def _map_email_to_user(raw_username):
'\n Input: [email protected]\n Output: test\n '
if (not auth_settings.GLOBUS_MAPPING_FILE):
logger.info('GLOBUS_MAPPING_FILE NOT defined. Check your auth settings!!')
return raw_username
if (not os.path.exists(auth_settings.GLOBUS_MAPPING_FILE)):
logger.warn(('GLOBUS_MAPPING_FILE %s does not exist!' % auth_settings.GLOBUS_MAPPING_FILE))
return None
try:
with open(auth_settings.GLOBUS_MAPPING_FILE) as the_file:
text = the_file.read()
user_mapping = json.loads(text)
except:
logger.warn(('GLOBUS_MAPPING_FILE %s is NOT VALID JSON!' % auth_settings.GLOBUS_MAPPING_FILE))
user_mapping = {}
if (raw_username not in user_mapping):
return None
username = user_mapping[raw_username]
logger.info(('GLOBUS_MAPPING_FILE identified %s -> %s' % (raw_username, username)))
return username | Input: [email protected]
Output: test | django_cyverse_auth/protocol/globus.py | _map_email_to_user | simpsonw/django-cyverse-auth | 1 | python | def _map_email_to_user(raw_username):
'\n Input: [email protected]\n Output: test\n '
if (not auth_settings.GLOBUS_MAPPING_FILE):
logger.info('GLOBUS_MAPPING_FILE NOT defined. Check your auth settings!!')
return raw_username
if (not os.path.exists(auth_settings.GLOBUS_MAPPING_FILE)):
logger.warn(('GLOBUS_MAPPING_FILE %s does not exist!' % auth_settings.GLOBUS_MAPPING_FILE))
return None
try:
with open(auth_settings.GLOBUS_MAPPING_FILE) as the_file:
text = the_file.read()
user_mapping = json.loads(text)
except:
logger.warn(('GLOBUS_MAPPING_FILE %s is NOT VALID JSON!' % auth_settings.GLOBUS_MAPPING_FILE))
user_mapping = {}
if (raw_username not in user_mapping):
return None
username = user_mapping[raw_username]
logger.info(('GLOBUS_MAPPING_FILE identified %s -> %s' % (raw_username, username)))
return username | def _map_email_to_user(raw_username):
'\n Input: [email protected]\n Output: test\n '
if (not auth_settings.GLOBUS_MAPPING_FILE):
logger.info('GLOBUS_MAPPING_FILE NOT defined. Check your auth settings!!')
return raw_username
if (not os.path.exists(auth_settings.GLOBUS_MAPPING_FILE)):
logger.warn(('GLOBUS_MAPPING_FILE %s does not exist!' % auth_settings.GLOBUS_MAPPING_FILE))
return None
try:
with open(auth_settings.GLOBUS_MAPPING_FILE) as the_file:
text = the_file.read()
user_mapping = json.loads(text)
except:
logger.warn(('GLOBUS_MAPPING_FILE %s is NOT VALID JSON!' % auth_settings.GLOBUS_MAPPING_FILE))
user_mapping = {}
if (raw_username not in user_mapping):
return None
username = user_mapping[raw_username]
logger.info(('GLOBUS_MAPPING_FILE identified %s -> %s' % (raw_username, username)))
return username<|docstring|>Input: [email protected]
Output: test<|endoftext|> |
178258e8b5201fc9603449f53b76b874ab3b0c6cc4e63eeadb94043f909952f9 | def globus_validate_code(request):
"\n This flow is used to create a new Token on behalf of a Service Client\n (Like Troposphere)\n Validates 'code' returned from the IdP\n If valid: Return new AuthToken to be passed to the Resource Provider.\n else: Return None\n "
code = request.GET.get('code', '')
error = request.GET.get('error', '')
error_description = request.GET.get('error_description', '')
if error:
error_msg = (('%s: %s' % (error, error_description)) if error_description else error)
raise Unauthorized(error_msg)
if (not code):
logger.warn('User returned from Login prompt but there was NO `code` to validate!')
return None
if (type(code) == list):
code = code[0]
flow = globus_initFlow()
try:
credentials = flow.step2_exchange(code)
logger.info(credentials.__dict__)
except OAuthError as err:
logger.exception('Error exchanging code w/ globus')
return None
except Exception as err:
logger.exception('Unknown Error occurred while exchanging code w/ globus')
return None
try:
user_access_token = parse_atmosphere_token(credentials.token_response)
token_profile = credentials.id_token
expiry_date = credentials.token_expiry
except Exception as err:
logger.exception('Parse of the credentials response failed. Ask a developer for help!')
return None
raw_username = token_profile['preferred_username']
email = token_profile['email']
username = _extract_user_from_email(raw_username)
if (not username):
logger.info(('No user provided in token_profile: Check output %s' % token_profile))
return None
full_name = token_profile['name']
issuer = token_profile['iss']
(first_name, last_name) = _extract_first_last_name(full_name)
username = username.lower()
user_profile = {'username': username, 'firstName': first_name, 'lastName': last_name, 'email': email}
user = get_or_create_user(user_profile['username'], user_profile)
auth_token = get_or_create_token(user, user_access_token, token_expire=expiry_date, issuer='OpenstackLoginBackend')
return auth_token | This flow is used to create a new Token on behalf of a Service Client
(Like Troposphere)
Validates 'code' returned from the IdP
If valid: Return new AuthToken to be passed to the Resource Provider.
else: Return None | django_cyverse_auth/protocol/globus.py | globus_validate_code | simpsonw/django-cyverse-auth | 1 | python | def globus_validate_code(request):
"\n This flow is used to create a new Token on behalf of a Service Client\n (Like Troposphere)\n Validates 'code' returned from the IdP\n If valid: Return new AuthToken to be passed to the Resource Provider.\n else: Return None\n "
code = request.GET.get('code', )
error = request.GET.get('error', )
error_description = request.GET.get('error_description', )
if error:
error_msg = (('%s: %s' % (error, error_description)) if error_description else error)
raise Unauthorized(error_msg)
if (not code):
logger.warn('User returned from Login prompt but there was NO `code` to validate!')
return None
if (type(code) == list):
code = code[0]
flow = globus_initFlow()
try:
credentials = flow.step2_exchange(code)
logger.info(credentials.__dict__)
except OAuthError as err:
logger.exception('Error exchanging code w/ globus')
return None
except Exception as err:
logger.exception('Unknown Error occurred while exchanging code w/ globus')
return None
try:
user_access_token = parse_atmosphere_token(credentials.token_response)
token_profile = credentials.id_token
expiry_date = credentials.token_expiry
except Exception as err:
logger.exception('Parse of the credentials response failed. Ask a developer for help!')
return None
raw_username = token_profile['preferred_username']
email = token_profile['email']
username = _extract_user_from_email(raw_username)
if (not username):
logger.info(('No user provided in token_profile: Check output %s' % token_profile))
return None
full_name = token_profile['name']
issuer = token_profile['iss']
(first_name, last_name) = _extract_first_last_name(full_name)
username = username.lower()
user_profile = {'username': username, 'firstName': first_name, 'lastName': last_name, 'email': email}
user = get_or_create_user(user_profile['username'], user_profile)
auth_token = get_or_create_token(user, user_access_token, token_expire=expiry_date, issuer='OpenstackLoginBackend')
return auth_token | def globus_validate_code(request):
"\n This flow is used to create a new Token on behalf of a Service Client\n (Like Troposphere)\n Validates 'code' returned from the IdP\n If valid: Return new AuthToken to be passed to the Resource Provider.\n else: Return None\n "
code = request.GET.get('code', )
error = request.GET.get('error', )
error_description = request.GET.get('error_description', )
if error:
error_msg = (('%s: %s' % (error, error_description)) if error_description else error)
raise Unauthorized(error_msg)
if (not code):
logger.warn('User returned from Login prompt but there was NO `code` to validate!')
return None
if (type(code) == list):
code = code[0]
flow = globus_initFlow()
try:
credentials = flow.step2_exchange(code)
logger.info(credentials.__dict__)
except OAuthError as err:
logger.exception('Error exchanging code w/ globus')
return None
except Exception as err:
logger.exception('Unknown Error occurred while exchanging code w/ globus')
return None
try:
user_access_token = parse_atmosphere_token(credentials.token_response)
token_profile = credentials.id_token
expiry_date = credentials.token_expiry
except Exception as err:
logger.exception('Parse of the credentials response failed. Ask a developer for help!')
return None
raw_username = token_profile['preferred_username']
email = token_profile['email']
username = _extract_user_from_email(raw_username)
if (not username):
logger.info(('No user provided in token_profile: Check output %s' % token_profile))
return None
full_name = token_profile['name']
issuer = token_profile['iss']
(first_name, last_name) = _extract_first_last_name(full_name)
username = username.lower()
user_profile = {'username': username, 'firstName': first_name, 'lastName': last_name, 'email': email}
user = get_or_create_user(user_profile['username'], user_profile)
auth_token = get_or_create_token(user, user_access_token, token_expire=expiry_date, issuer='OpenstackLoginBackend')
return auth_token<|docstring|>This flow is used to create a new Token on behalf of a Service Client
(Like Troposphere)
Validates 'code' returned from the IdP
If valid: Return new AuthToken to be passed to the Resource Provider.
else: Return None<|endoftext|> |
d2288a60ac3cf094c3f0c1023f40dd14b44376e9f76f4f7b52e3eead0e87a79f | def create_user_token_from_globus_profile(profile, access_token):
"\n Use this method on your Resource Provider (Like Atmosphere)\n to exchange a profile (that was retrieved via a tokeninfo endpoint)\n for a UserToken that can then be internally validated in an 'authorize' authBackend step..\n "
logger.info(profile)
expiry = profile['exp']
expiry = _extract_expiry_date(expiry)
issuer = profile['iss']
issued_at = profile['iat']
raw_username = profile['username']
raw_name = profile['name']
email = profile['email']
username = _extract_user_from_email(raw_username)
(first_name, last_name) = _extract_first_last_name(raw_name)
profile_dict = {'username': username, 'firstName': first_name, 'lastName': last_name, 'email': email}
user = get_or_create_user(profile_dict['username'], profile_dict)
auth_token = get_or_create_token(user, access_token, token_expire=expiry, issuer=issuer)
return auth_token | Use this method on your Resource Provider (Like Atmosphere)
to exchange a profile (that was retrieved via a tokeninfo endpoint)
for a UserToken that can then be internally validated in an 'authorize' authBackend step.. | django_cyverse_auth/protocol/globus.py | create_user_token_from_globus_profile | simpsonw/django-cyverse-auth | 1 | python | def create_user_token_from_globus_profile(profile, access_token):
"\n Use this method on your Resource Provider (Like Atmosphere)\n to exchange a profile (that was retrieved via a tokeninfo endpoint)\n for a UserToken that can then be internally validated in an 'authorize' authBackend step..\n "
logger.info(profile)
expiry = profile['exp']
expiry = _extract_expiry_date(expiry)
issuer = profile['iss']
issued_at = profile['iat']
raw_username = profile['username']
raw_name = profile['name']
email = profile['email']
username = _extract_user_from_email(raw_username)
(first_name, last_name) = _extract_first_last_name(raw_name)
profile_dict = {'username': username, 'firstName': first_name, 'lastName': last_name, 'email': email}
user = get_or_create_user(profile_dict['username'], profile_dict)
auth_token = get_or_create_token(user, access_token, token_expire=expiry, issuer=issuer)
return auth_token | def create_user_token_from_globus_profile(profile, access_token):
"\n Use this method on your Resource Provider (Like Atmosphere)\n to exchange a profile (that was retrieved via a tokeninfo endpoint)\n for a UserToken that can then be internally validated in an 'authorize' authBackend step..\n "
logger.info(profile)
expiry = profile['exp']
expiry = _extract_expiry_date(expiry)
issuer = profile['iss']
issued_at = profile['iat']
raw_username = profile['username']
raw_name = profile['name']
email = profile['email']
username = _extract_user_from_email(raw_username)
(first_name, last_name) = _extract_first_last_name(raw_name)
profile_dict = {'username': username, 'firstName': first_name, 'lastName': last_name, 'email': email}
user = get_or_create_user(profile_dict['username'], profile_dict)
auth_token = get_or_create_token(user, access_token, token_expire=expiry, issuer=issuer)
return auth_token<|docstring|>Use this method on your Resource Provider (Like Atmosphere)
to exchange a profile (that was retrieved via a tokeninfo endpoint)
for a UserToken that can then be internally validated in an 'authorize' authBackend step..<|endoftext|> |
2e5eeeda409a8c07640bed9b353d546016c6b031a8463107ab1f6d2073676277 | def analyze(model, force_warning=False, num_error_samples=1000, pre_equilibrium_approx=False, skip_checks=False, min_time=0.0, max_time=None):
'Perform all the analysis steps at once.'
curdir = os.getcwd()
analysis = Analysis(model, force_warning, num_error_samples)
analysis.extract_data(min_time=min_time, max_time=max_time)
if (not skip_checks):
analysis.check_extraction()
analysis.fill_out_data_samples()
analysis.process_data_samples(pre_equilibrium_approx)
os.chdir(curdir)
return analysis | Perform all the analysis steps at once. | seekr2/analyze.py | analyze | seekrcentral/seekr2 | 1 | python | def analyze(model, force_warning=False, num_error_samples=1000, pre_equilibrium_approx=False, skip_checks=False, min_time=0.0, max_time=None):
curdir = os.getcwd()
analysis = Analysis(model, force_warning, num_error_samples)
analysis.extract_data(min_time=min_time, max_time=max_time)
if (not skip_checks):
analysis.check_extraction()
analysis.fill_out_data_samples()
analysis.process_data_samples(pre_equilibrium_approx)
os.chdir(curdir)
return analysis | def analyze(model, force_warning=False, num_error_samples=1000, pre_equilibrium_approx=False, skip_checks=False, min_time=0.0, max_time=None):
curdir = os.getcwd()
analysis = Analysis(model, force_warning, num_error_samples)
analysis.extract_data(min_time=min_time, max_time=max_time)
if (not skip_checks):
analysis.check_extraction()
analysis.fill_out_data_samples()
analysis.process_data_samples(pre_equilibrium_approx)
os.chdir(curdir)
return analysis<|docstring|>Perform all the analysis steps at once.<|endoftext|> |
45453bce1550f82eb976ab0029c9bbd313e355d215584423b4a873079d37e28b | def __init__(self, model, force_warning=False, num_error_samples=0):
'\n Creates the Analyze() object, which applies transition \n statistics and times, as well as MMVT theory, to compute \n kinetics and thermodynamics quantities.\n '
self.model = model
self.anchor_stats_list = []
self.main_data_sample = None
self.data_sample_list = []
self.pi_alpha = None
self.pi_alpha_error = None
self.p_i = None
self.p_i_error = None
self.free_energy_profile = None
self.free_energy_profile_err = None
self.MFPTs = {}
self.MFPTs_error = {}
self.k_off = None
self.k_off_error = None
self.k_ons = {}
self.k_ons_error = {}
self.force_warning = force_warning
self.num_error_samples = num_error_samples
return | Creates the Analyze() object, which applies transition
statistics and times, as well as MMVT theory, to compute
kinetics and thermodynamics quantities. | seekr2/analyze.py | __init__ | seekrcentral/seekr2 | 1 | python | def __init__(self, model, force_warning=False, num_error_samples=0):
'\n Creates the Analyze() object, which applies transition \n statistics and times, as well as MMVT theory, to compute \n kinetics and thermodynamics quantities.\n '
self.model = model
self.anchor_stats_list = []
self.main_data_sample = None
self.data_sample_list = []
self.pi_alpha = None
self.pi_alpha_error = None
self.p_i = None
self.p_i_error = None
self.free_energy_profile = None
self.free_energy_profile_err = None
self.MFPTs = {}
self.MFPTs_error = {}
self.k_off = None
self.k_off_error = None
self.k_ons = {}
self.k_ons_error = {}
self.force_warning = force_warning
self.num_error_samples = num_error_samples
return | def __init__(self, model, force_warning=False, num_error_samples=0):
'\n Creates the Analyze() object, which applies transition \n statistics and times, as well as MMVT theory, to compute \n kinetics and thermodynamics quantities.\n '
self.model = model
self.anchor_stats_list = []
self.main_data_sample = None
self.data_sample_list = []
self.pi_alpha = None
self.pi_alpha_error = None
self.p_i = None
self.p_i_error = None
self.free_energy_profile = None
self.free_energy_profile_err = None
self.MFPTs = {}
self.MFPTs_error = {}
self.k_off = None
self.k_off_error = None
self.k_ons = {}
self.k_ons_error = {}
self.force_warning = force_warning
self.num_error_samples = num_error_samples
return<|docstring|>Creates the Analyze() object, which applies transition
statistics and times, as well as MMVT theory, to compute
kinetics and thermodynamics quantities.<|endoftext|> |
13b700c2ccf45841ae8a69d7a71e614eba5322a315c47cf8cc053ab7f81b72f4 | def elber_check_anchor_stats(self, silent=False):
'\n Check the anchor statistics to make sure that enough bounces\n have been observed to perform the analysis\n '
anchors_missing_statistics = []
for (i, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
anchor_stats = self.anchor_stats_list[i]
existing_alias_ids = []
existing_alias_transitions = []
for key in anchor_stats.N_i_j:
existing_alias_transitions.append(key)
existing_alias_transitions.append(2)
for milestone in anchor.milestones:
found_problem = False
if (milestone.alias_index not in existing_alias_transitions):
anchors_missing_statistics.append(anchor.index)
break
if found_problem:
break
if (len(anchors_missing_statistics) > 0):
if silent:
return False
else:
error_warning_string = 'Anchor(s) {0} are missing sufficient statistics. Consider running simulations of anchor(s) {0} for longer time scales or readjust anchor locations to make transitions more frequent. You may skip this check with the --skip_checks (-s) option.'.format(anchors_missing_statistics)
if self.force_warning:
warnings.warn(error_warning_string)
else:
raise common_analyze.MissingStatisticsError(error_warning_string)
return True | Check the anchor statistics to make sure that enough bounces
have been observed to perform the analysis | seekr2/analyze.py | elber_check_anchor_stats | seekrcentral/seekr2 | 1 | python | def elber_check_anchor_stats(self, silent=False):
'\n Check the anchor statistics to make sure that enough bounces\n have been observed to perform the analysis\n '
anchors_missing_statistics = []
for (i, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
anchor_stats = self.anchor_stats_list[i]
existing_alias_ids = []
existing_alias_transitions = []
for key in anchor_stats.N_i_j:
existing_alias_transitions.append(key)
existing_alias_transitions.append(2)
for milestone in anchor.milestones:
found_problem = False
if (milestone.alias_index not in existing_alias_transitions):
anchors_missing_statistics.append(anchor.index)
break
if found_problem:
break
if (len(anchors_missing_statistics) > 0):
if silent:
return False
else:
error_warning_string = 'Anchor(s) {0} are missing sufficient statistics. Consider running simulations of anchor(s) {0} for longer time scales or readjust anchor locations to make transitions more frequent. You may skip this check with the --skip_checks (-s) option.'.format(anchors_missing_statistics)
if self.force_warning:
warnings.warn(error_warning_string)
else:
raise common_analyze.MissingStatisticsError(error_warning_string)
return True | def elber_check_anchor_stats(self, silent=False):
'\n Check the anchor statistics to make sure that enough bounces\n have been observed to perform the analysis\n '
anchors_missing_statistics = []
for (i, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
anchor_stats = self.anchor_stats_list[i]
existing_alias_ids = []
existing_alias_transitions = []
for key in anchor_stats.N_i_j:
existing_alias_transitions.append(key)
existing_alias_transitions.append(2)
for milestone in anchor.milestones:
found_problem = False
if (milestone.alias_index not in existing_alias_transitions):
anchors_missing_statistics.append(anchor.index)
break
if found_problem:
break
if (len(anchors_missing_statistics) > 0):
if silent:
return False
else:
error_warning_string = 'Anchor(s) {0} are missing sufficient statistics. Consider running simulations of anchor(s) {0} for longer time scales or readjust anchor locations to make transitions more frequent. You may skip this check with the --skip_checks (-s) option.'.format(anchors_missing_statistics)
if self.force_warning:
warnings.warn(error_warning_string)
else:
raise common_analyze.MissingStatisticsError(error_warning_string)
return True<|docstring|>Check the anchor statistics to make sure that enough bounces
have been observed to perform the analysis<|endoftext|> |
76e92edbad392b7b8b71a992b048720fdbc93f7d160be48cd26a7c1f17d3b96d | def mmvt_check_anchor_stats(self, silent=False):
'\n Check the anchor statistics to make sure that enough transitions\n have been observed to perform the analysis\n '
anchors_missing_statistics = []
for (i, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
anchor_stats = self.anchor_stats_list[i]
existing_alias_ids = []
existing_alias_transitions = []
for key in anchor_stats.k_alpha_beta:
existing_alias_ids.append(key)
assert (anchor_stats.k_alpha_beta[key] >= 0.0), 'negative k_alpha_beta values not allowed.'
for key in anchor_stats.N_i_j_alpha:
existing_alias_transitions.append(key)
for milestone in anchor.milestones:
found_problem = False
if (milestone.alias_index not in existing_alias_ids):
anchors_missing_statistics.append(anchor.index)
break
for milestone2 in anchor.milestones:
if (milestone.alias_index == milestone2.alias_index):
continue
if ((milestone.alias_index, milestone2.alias_index) not in existing_alias_transitions):
anchors_missing_statistics.append(anchor.index)
found_problem = True
break
if found_problem:
break
if (len(anchors_missing_statistics) > 0):
if silent:
return False
else:
error_warning_string = 'Anchor(s) {0} are missing sufficient statistics. Consider running simulations of anchor(s) {0} for longer time scales or readjust anchor locations to make transitions more frequent. You may skip this check with the --skip_checks (-s) option.'.format(anchors_missing_statistics)
if self.force_warning:
warnings.warn(error_warning_string)
else:
raise common_analyze.MissingStatisticsError(error_warning_string)
return True | Check the anchor statistics to make sure that enough transitions
have been observed to perform the analysis | seekr2/analyze.py | mmvt_check_anchor_stats | seekrcentral/seekr2 | 1 | python | def mmvt_check_anchor_stats(self, silent=False):
'\n Check the anchor statistics to make sure that enough transitions\n have been observed to perform the analysis\n '
anchors_missing_statistics = []
for (i, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
anchor_stats = self.anchor_stats_list[i]
existing_alias_ids = []
existing_alias_transitions = []
for key in anchor_stats.k_alpha_beta:
existing_alias_ids.append(key)
assert (anchor_stats.k_alpha_beta[key] >= 0.0), 'negative k_alpha_beta values not allowed.'
for key in anchor_stats.N_i_j_alpha:
existing_alias_transitions.append(key)
for milestone in anchor.milestones:
found_problem = False
if (milestone.alias_index not in existing_alias_ids):
anchors_missing_statistics.append(anchor.index)
break
for milestone2 in anchor.milestones:
if (milestone.alias_index == milestone2.alias_index):
continue
if ((milestone.alias_index, milestone2.alias_index) not in existing_alias_transitions):
anchors_missing_statistics.append(anchor.index)
found_problem = True
break
if found_problem:
break
if (len(anchors_missing_statistics) > 0):
if silent:
return False
else:
error_warning_string = 'Anchor(s) {0} are missing sufficient statistics. Consider running simulations of anchor(s) {0} for longer time scales or readjust anchor locations to make transitions more frequent. You may skip this check with the --skip_checks (-s) option.'.format(anchors_missing_statistics)
if self.force_warning:
warnings.warn(error_warning_string)
else:
raise common_analyze.MissingStatisticsError(error_warning_string)
return True | def mmvt_check_anchor_stats(self, silent=False):
'\n Check the anchor statistics to make sure that enough transitions\n have been observed to perform the analysis\n '
anchors_missing_statistics = []
for (i, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
anchor_stats = self.anchor_stats_list[i]
existing_alias_ids = []
existing_alias_transitions = []
for key in anchor_stats.k_alpha_beta:
existing_alias_ids.append(key)
assert (anchor_stats.k_alpha_beta[key] >= 0.0), 'negative k_alpha_beta values not allowed.'
for key in anchor_stats.N_i_j_alpha:
existing_alias_transitions.append(key)
for milestone in anchor.milestones:
found_problem = False
if (milestone.alias_index not in existing_alias_ids):
anchors_missing_statistics.append(anchor.index)
break
for milestone2 in anchor.milestones:
if (milestone.alias_index == milestone2.alias_index):
continue
if ((milestone.alias_index, milestone2.alias_index) not in existing_alias_transitions):
anchors_missing_statistics.append(anchor.index)
found_problem = True
break
if found_problem:
break
if (len(anchors_missing_statistics) > 0):
if silent:
return False
else:
error_warning_string = 'Anchor(s) {0} are missing sufficient statistics. Consider running simulations of anchor(s) {0} for longer time scales or readjust anchor locations to make transitions more frequent. You may skip this check with the --skip_checks (-s) option.'.format(anchors_missing_statistics)
if self.force_warning:
warnings.warn(error_warning_string)
else:
raise common_analyze.MissingStatisticsError(error_warning_string)
return True<|docstring|>Check the anchor statistics to make sure that enough transitions
have been observed to perform the analysis<|endoftext|> |
7b8493c4d09a31fc212fd844c2171df2f3c8a99e3ce662266cb1b5fb40e6befc | def extract_data(self, min_time=None, max_time=None, max_step_list=None, silence_errors=True):
'\n Extract the data from simulations used in this analysis.\n '
files_already_read = False
if (len(self.anchor_stats_list) > 0):
files_already_read = True
if (self.model.openmm_settings is not None):
timestep = self.model.openmm_settings.langevin_integrator.timestep
elif (self.model.namd_settings is not None):
timestep = self.model.namd_settings.langevin_integrator.timestep
else:
raise Exception('No OpenMM or NAMD simulation settings in model.')
for (alpha, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
if (max_step_list is not None):
max_time = (max_step_list[alpha] * timestep)
else:
max_time = max_time
if (not files_already_read):
if (self.model.get_type() == 'mmvt'):
anchor_stats = mmvt_analyze.MMVT_anchor_statistics(alpha)
elif (self.model.get_type() == 'elber'):
anchor_stats = elber_analyze.Elber_anchor_statistics(alpha)
else:
anchor_stats = self.anchor_stats_list[alpha]
if anchor.md:
output_file_glob = os.path.join(self.model.anchor_rootdir, anchor.directory, anchor.production_directory, anchor.md_output_glob)
output_file_list = glob.glob(output_file_glob)
output_file_list = base.order_files_numerically(output_file_list)
if (not silence_errors):
assert (len(output_file_list) > 0), ('Files not found: %s' % output_file_glob)
if (self.model.openmm_settings is not None):
anchor_stats.read_output_file_list('openmm', output_file_list, min_time, max_time, anchor, timestep)
elif (self.model.namd_settings is not None):
anchor_stats.read_output_file_list('namd', output_file_list, min_time, max_time, anchor, timestep)
else:
raise Exception('Both OpenMM and NAMD settings missing. One of these must be present in the model XML.')
else:
pass
if (not files_already_read):
self.anchor_stats_list.append(anchor_stats)
return | Extract the data from simulations used in this analysis. | seekr2/analyze.py | extract_data | seekrcentral/seekr2 | 1 | python | def extract_data(self, min_time=None, max_time=None, max_step_list=None, silence_errors=True):
'\n \n '
files_already_read = False
if (len(self.anchor_stats_list) > 0):
files_already_read = True
if (self.model.openmm_settings is not None):
timestep = self.model.openmm_settings.langevin_integrator.timestep
elif (self.model.namd_settings is not None):
timestep = self.model.namd_settings.langevin_integrator.timestep
else:
raise Exception('No OpenMM or NAMD simulation settings in model.')
for (alpha, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
if (max_step_list is not None):
max_time = (max_step_list[alpha] * timestep)
else:
max_time = max_time
if (not files_already_read):
if (self.model.get_type() == 'mmvt'):
anchor_stats = mmvt_analyze.MMVT_anchor_statistics(alpha)
elif (self.model.get_type() == 'elber'):
anchor_stats = elber_analyze.Elber_anchor_statistics(alpha)
else:
anchor_stats = self.anchor_stats_list[alpha]
if anchor.md:
output_file_glob = os.path.join(self.model.anchor_rootdir, anchor.directory, anchor.production_directory, anchor.md_output_glob)
output_file_list = glob.glob(output_file_glob)
output_file_list = base.order_files_numerically(output_file_list)
if (not silence_errors):
assert (len(output_file_list) > 0), ('Files not found: %s' % output_file_glob)
if (self.model.openmm_settings is not None):
anchor_stats.read_output_file_list('openmm', output_file_list, min_time, max_time, anchor, timestep)
elif (self.model.namd_settings is not None):
anchor_stats.read_output_file_list('namd', output_file_list, min_time, max_time, anchor, timestep)
else:
raise Exception('Both OpenMM and NAMD settings missing. One of these must be present in the model XML.')
else:
pass
if (not files_already_read):
self.anchor_stats_list.append(anchor_stats)
return | def extract_data(self, min_time=None, max_time=None, max_step_list=None, silence_errors=True):
'\n \n '
files_already_read = False
if (len(self.anchor_stats_list) > 0):
files_already_read = True
if (self.model.openmm_settings is not None):
timestep = self.model.openmm_settings.langevin_integrator.timestep
elif (self.model.namd_settings is not None):
timestep = self.model.namd_settings.langevin_integrator.timestep
else:
raise Exception('No OpenMM or NAMD simulation settings in model.')
for (alpha, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
if (max_step_list is not None):
max_time = (max_step_list[alpha] * timestep)
else:
max_time = max_time
if (not files_already_read):
if (self.model.get_type() == 'mmvt'):
anchor_stats = mmvt_analyze.MMVT_anchor_statistics(alpha)
elif (self.model.get_type() == 'elber'):
anchor_stats = elber_analyze.Elber_anchor_statistics(alpha)
else:
anchor_stats = self.anchor_stats_list[alpha]
if anchor.md:
output_file_glob = os.path.join(self.model.anchor_rootdir, anchor.directory, anchor.production_directory, anchor.md_output_glob)
output_file_list = glob.glob(output_file_glob)
output_file_list = base.order_files_numerically(output_file_list)
if (not silence_errors):
assert (len(output_file_list) > 0), ('Files not found: %s' % output_file_glob)
if (self.model.openmm_settings is not None):
anchor_stats.read_output_file_list('openmm', output_file_list, min_time, max_time, anchor, timestep)
elif (self.model.namd_settings is not None):
anchor_stats.read_output_file_list('namd', output_file_list, min_time, max_time, anchor, timestep)
else:
raise Exception('Both OpenMM and NAMD settings missing. One of these must be present in the model XML.')
else:
pass
if (not files_already_read):
self.anchor_stats_list.append(anchor_stats)
return<|docstring|>Extract the data from simulations used in this analysis.<|endoftext|> |
a0d8afd58160615446cf60049bb2bb637cc640f5d31f54d96895f17aee96e71b | def check_extraction(self, silent=False):
'\n Check whether sufficient and correct anchor statistics can \n be used for analysis.\n '
if (self.model.get_type() == 'mmvt'):
result = self.mmvt_check_anchor_stats(silent)
if (self.model.get_type() == 'elber'):
result = self.elber_check_anchor_stats(silent)
return result | Check whether sufficient and correct anchor statistics can
be used for analysis. | seekr2/analyze.py | check_extraction | seekrcentral/seekr2 | 1 | python | def check_extraction(self, silent=False):
'\n Check whether sufficient and correct anchor statistics can \n be used for analysis.\n '
if (self.model.get_type() == 'mmvt'):
result = self.mmvt_check_anchor_stats(silent)
if (self.model.get_type() == 'elber'):
result = self.elber_check_anchor_stats(silent)
return result | def check_extraction(self, silent=False):
'\n Check whether sufficient and correct anchor statistics can \n be used for analysis.\n '
if (self.model.get_type() == 'mmvt'):
result = self.mmvt_check_anchor_stats(silent)
if (self.model.get_type() == 'elber'):
result = self.elber_check_anchor_stats(silent)
return result<|docstring|>Check whether sufficient and correct anchor statistics can
be used for analysis.<|endoftext|> |
fc5043d35a61bf954f5a3b37d3018ec9fa25c219b63aaf1af148665c65c515da | def fill_out_data_samples_mmvt(self):
'\n Now that the statistics for each anchor have been extracted\n from the output files, construct the global transition\n statistics objects. Applies to systems using MMVT milestoning.\n '
N_alpha_beta = defaultdict(int)
k_alpha_beta = defaultdict(float)
N_i_j_alpha = []
R_i_alpha_total = []
R_i_alpha_average = []
R_i_alpha_std_dev = []
R_i_alpha_count = []
T_alpha_total = []
T_alpha_average = []
T_alpha_std_dev = []
T_alpha_count = []
for (alpha, anchor1) in enumerate(self.model.anchors):
if anchor1.bulkstate:
continue
anchor_N_alpha_beta = self.anchor_stats_list[alpha].N_alpha_beta
anchor_k_alpha_beta = self.anchor_stats_list[alpha].k_alpha_beta
for (beta, anchor2) in enumerate(self.model.anchors):
if anchor2.bulkstate:
continue
if (alpha == beta):
continue
id_alias = anchor1.alias_from_neighbor_id(anchor2.index)
if (id_alias is None):
continue
if (id_alias in anchor_N_alpha_beta):
N_alpha_beta[(alpha, beta)] = anchor_N_alpha_beta[id_alias]
k_alpha_beta[(alpha, beta)] = anchor_k_alpha_beta[id_alias]
else:
N_alpha_beta[(alpha, beta)] = 0
k_alpha_beta[(alpha, beta)] = 0.0
anchor_N_i_j_alpha = self.anchor_stats_list[alpha].N_i_j_alpha
N_i_j_alpha_element = defaultdict(int)
for key in anchor_N_i_j_alpha:
(alias_id_i, alias_id_j) = key
id_i = anchor1.id_from_alias(alias_id_i)
id_j = anchor1.id_from_alias(alias_id_j)
new_key = (id_i, id_j)
N_i_j_alpha_element[new_key] = anchor_N_i_j_alpha[key]
N_i_j_alpha.append(N_i_j_alpha_element)
anchor_R_i_alpha = self.anchor_stats_list[alpha].R_i_alpha_total
anchor_R_i_alpha_std = self.anchor_stats_list[alpha].R_i_alpha_std_dev
anchor_R_i_alpha_list = self.anchor_stats_list[alpha].R_i_alpha_list
R_i_alpha_element = defaultdict(float)
R_i_alpha_count_element = defaultdict(int)
R_i_alpha_std_element = defaultdict(float)
for key in anchor_R_i_alpha:
alias_id_i = key
id_i = anchor1.id_from_alias(alias_id_i)
R_i_alpha_element[id_i] = anchor_R_i_alpha[key]
R_i_alpha_std_element[id_i] = anchor_R_i_alpha_std[key]
R_i_alpha_count_element[id_i] = len(anchor_R_i_alpha_list[key])
R_i_alpha_total.append(R_i_alpha_element)
R_i_alpha_std_dev.append(R_i_alpha_std_element)
R_i_alpha_count.append(R_i_alpha_count_element)
anchor_T_alpha = self.anchor_stats_list[alpha].T_alpha_total
anchor_T_alpha_std = self.anchor_stats_list[alpha].T_alpha_std_dev
anchor_T_alpha_list = self.anchor_stats_list[alpha].T_alpha_list
T_alpha_total.append(anchor_T_alpha)
T_alpha_std_dev.append(anchor_T_alpha_std)
T_alpha_count.append(len(anchor_T_alpha_list))
self.main_data_sample = mmvt_analyze.MMVT_data_sample(self.model, N_alpha_beta, k_alpha_beta, N_i_j_alpha, R_i_alpha_total, T_alpha_total)
return | Now that the statistics for each anchor have been extracted
from the output files, construct the global transition
statistics objects. Applies to systems using MMVT milestoning. | seekr2/analyze.py | fill_out_data_samples_mmvt | seekrcentral/seekr2 | 1 | python | def fill_out_data_samples_mmvt(self):
'\n Now that the statistics for each anchor have been extracted\n from the output files, construct the global transition\n statistics objects. Applies to systems using MMVT milestoning.\n '
N_alpha_beta = defaultdict(int)
k_alpha_beta = defaultdict(float)
N_i_j_alpha = []
R_i_alpha_total = []
R_i_alpha_average = []
R_i_alpha_std_dev = []
R_i_alpha_count = []
T_alpha_total = []
T_alpha_average = []
T_alpha_std_dev = []
T_alpha_count = []
for (alpha, anchor1) in enumerate(self.model.anchors):
if anchor1.bulkstate:
continue
anchor_N_alpha_beta = self.anchor_stats_list[alpha].N_alpha_beta
anchor_k_alpha_beta = self.anchor_stats_list[alpha].k_alpha_beta
for (beta, anchor2) in enumerate(self.model.anchors):
if anchor2.bulkstate:
continue
if (alpha == beta):
continue
id_alias = anchor1.alias_from_neighbor_id(anchor2.index)
if (id_alias is None):
continue
if (id_alias in anchor_N_alpha_beta):
N_alpha_beta[(alpha, beta)] = anchor_N_alpha_beta[id_alias]
k_alpha_beta[(alpha, beta)] = anchor_k_alpha_beta[id_alias]
else:
N_alpha_beta[(alpha, beta)] = 0
k_alpha_beta[(alpha, beta)] = 0.0
anchor_N_i_j_alpha = self.anchor_stats_list[alpha].N_i_j_alpha
N_i_j_alpha_element = defaultdict(int)
for key in anchor_N_i_j_alpha:
(alias_id_i, alias_id_j) = key
id_i = anchor1.id_from_alias(alias_id_i)
id_j = anchor1.id_from_alias(alias_id_j)
new_key = (id_i, id_j)
N_i_j_alpha_element[new_key] = anchor_N_i_j_alpha[key]
N_i_j_alpha.append(N_i_j_alpha_element)
anchor_R_i_alpha = self.anchor_stats_list[alpha].R_i_alpha_total
anchor_R_i_alpha_std = self.anchor_stats_list[alpha].R_i_alpha_std_dev
anchor_R_i_alpha_list = self.anchor_stats_list[alpha].R_i_alpha_list
R_i_alpha_element = defaultdict(float)
R_i_alpha_count_element = defaultdict(int)
R_i_alpha_std_element = defaultdict(float)
for key in anchor_R_i_alpha:
alias_id_i = key
id_i = anchor1.id_from_alias(alias_id_i)
R_i_alpha_element[id_i] = anchor_R_i_alpha[key]
R_i_alpha_std_element[id_i] = anchor_R_i_alpha_std[key]
R_i_alpha_count_element[id_i] = len(anchor_R_i_alpha_list[key])
R_i_alpha_total.append(R_i_alpha_element)
R_i_alpha_std_dev.append(R_i_alpha_std_element)
R_i_alpha_count.append(R_i_alpha_count_element)
anchor_T_alpha = self.anchor_stats_list[alpha].T_alpha_total
anchor_T_alpha_std = self.anchor_stats_list[alpha].T_alpha_std_dev
anchor_T_alpha_list = self.anchor_stats_list[alpha].T_alpha_list
T_alpha_total.append(anchor_T_alpha)
T_alpha_std_dev.append(anchor_T_alpha_std)
T_alpha_count.append(len(anchor_T_alpha_list))
self.main_data_sample = mmvt_analyze.MMVT_data_sample(self.model, N_alpha_beta, k_alpha_beta, N_i_j_alpha, R_i_alpha_total, T_alpha_total)
return | def fill_out_data_samples_mmvt(self):
'\n Now that the statistics for each anchor have been extracted\n from the output files, construct the global transition\n statistics objects. Applies to systems using MMVT milestoning.\n '
N_alpha_beta = defaultdict(int)
k_alpha_beta = defaultdict(float)
N_i_j_alpha = []
R_i_alpha_total = []
R_i_alpha_average = []
R_i_alpha_std_dev = []
R_i_alpha_count = []
T_alpha_total = []
T_alpha_average = []
T_alpha_std_dev = []
T_alpha_count = []
for (alpha, anchor1) in enumerate(self.model.anchors):
if anchor1.bulkstate:
continue
anchor_N_alpha_beta = self.anchor_stats_list[alpha].N_alpha_beta
anchor_k_alpha_beta = self.anchor_stats_list[alpha].k_alpha_beta
for (beta, anchor2) in enumerate(self.model.anchors):
if anchor2.bulkstate:
continue
if (alpha == beta):
continue
id_alias = anchor1.alias_from_neighbor_id(anchor2.index)
if (id_alias is None):
continue
if (id_alias in anchor_N_alpha_beta):
N_alpha_beta[(alpha, beta)] = anchor_N_alpha_beta[id_alias]
k_alpha_beta[(alpha, beta)] = anchor_k_alpha_beta[id_alias]
else:
N_alpha_beta[(alpha, beta)] = 0
k_alpha_beta[(alpha, beta)] = 0.0
anchor_N_i_j_alpha = self.anchor_stats_list[alpha].N_i_j_alpha
N_i_j_alpha_element = defaultdict(int)
for key in anchor_N_i_j_alpha:
(alias_id_i, alias_id_j) = key
id_i = anchor1.id_from_alias(alias_id_i)
id_j = anchor1.id_from_alias(alias_id_j)
new_key = (id_i, id_j)
N_i_j_alpha_element[new_key] = anchor_N_i_j_alpha[key]
N_i_j_alpha.append(N_i_j_alpha_element)
anchor_R_i_alpha = self.anchor_stats_list[alpha].R_i_alpha_total
anchor_R_i_alpha_std = self.anchor_stats_list[alpha].R_i_alpha_std_dev
anchor_R_i_alpha_list = self.anchor_stats_list[alpha].R_i_alpha_list
R_i_alpha_element = defaultdict(float)
R_i_alpha_count_element = defaultdict(int)
R_i_alpha_std_element = defaultdict(float)
for key in anchor_R_i_alpha:
alias_id_i = key
id_i = anchor1.id_from_alias(alias_id_i)
R_i_alpha_element[id_i] = anchor_R_i_alpha[key]
R_i_alpha_std_element[id_i] = anchor_R_i_alpha_std[key]
R_i_alpha_count_element[id_i] = len(anchor_R_i_alpha_list[key])
R_i_alpha_total.append(R_i_alpha_element)
R_i_alpha_std_dev.append(R_i_alpha_std_element)
R_i_alpha_count.append(R_i_alpha_count_element)
anchor_T_alpha = self.anchor_stats_list[alpha].T_alpha_total
anchor_T_alpha_std = self.anchor_stats_list[alpha].T_alpha_std_dev
anchor_T_alpha_list = self.anchor_stats_list[alpha].T_alpha_list
T_alpha_total.append(anchor_T_alpha)
T_alpha_std_dev.append(anchor_T_alpha_std)
T_alpha_count.append(len(anchor_T_alpha_list))
self.main_data_sample = mmvt_analyze.MMVT_data_sample(self.model, N_alpha_beta, k_alpha_beta, N_i_j_alpha, R_i_alpha_total, T_alpha_total)
return<|docstring|>Now that the statistics for each anchor have been extracted
from the output files, construct the global transition
statistics objects. Applies to systems using MMVT milestoning.<|endoftext|> |
c9ac71397771c10fcb19c6468bac89e794cc743d71b949071aec13b0e3cce755 | def process_data_samples_mmvt(self, pre_equilibrium_approx=False):
'\n Since the global, system-side statistics have been gathered, \n compute the thermodynamic and kinetic quantities and their\n uncertainties. Applies to systems using MMVT milestoning.\n '
self.main_data_sample.calculate_pi_alpha()
self.main_data_sample.fill_out_data_quantities()
if (self.model.k_on_info is not None):
self.main_data_sample.parse_browndye_results()
self.main_data_sample.compute_rate_matrix()
self.main_data_sample.calculate_thermodynamics()
self.main_data_sample.calculate_kinetics(pre_equilibrium_approx)
self.pi_alpha = self.main_data_sample.pi_alpha
self.p_i = self.main_data_sample.p_i
self.free_energy_profile = self.main_data_sample.free_energy_profile
self.MFPTs = self.main_data_sample.MFPTs
self.k_off = self.main_data_sample.k_off
self.k_ons = self.main_data_sample.k_ons
if (self.num_error_samples > 0):
(data_sample_list, p_i_error, free_energy_profile_err, MFPTs_error, k_off_error, k_ons_error) = mmvt_analyze.monte_carlo_milestoning_error(self.main_data_sample, num=self.num_error_samples, pre_equilibrium_approx=pre_equilibrium_approx)
self.data_sample_list = data_sample_list
self.pi_alpha_error = None
self.p_i_error = p_i_error
self.free_energy_profile_err = free_energy_profile_err
self.MFPTs_error = MFPTs_error
self.k_off_error = k_off_error
self.k_ons_error = k_ons_error
return | Since the global, system-side statistics have been gathered,
compute the thermodynamic and kinetic quantities and their
uncertainties. Applies to systems using MMVT milestoning. | seekr2/analyze.py | process_data_samples_mmvt | seekrcentral/seekr2 | 1 | python | def process_data_samples_mmvt(self, pre_equilibrium_approx=False):
'\n Since the global, system-side statistics have been gathered, \n compute the thermodynamic and kinetic quantities and their\n uncertainties. Applies to systems using MMVT milestoning.\n '
self.main_data_sample.calculate_pi_alpha()
self.main_data_sample.fill_out_data_quantities()
if (self.model.k_on_info is not None):
self.main_data_sample.parse_browndye_results()
self.main_data_sample.compute_rate_matrix()
self.main_data_sample.calculate_thermodynamics()
self.main_data_sample.calculate_kinetics(pre_equilibrium_approx)
self.pi_alpha = self.main_data_sample.pi_alpha
self.p_i = self.main_data_sample.p_i
self.free_energy_profile = self.main_data_sample.free_energy_profile
self.MFPTs = self.main_data_sample.MFPTs
self.k_off = self.main_data_sample.k_off
self.k_ons = self.main_data_sample.k_ons
if (self.num_error_samples > 0):
(data_sample_list, p_i_error, free_energy_profile_err, MFPTs_error, k_off_error, k_ons_error) = mmvt_analyze.monte_carlo_milestoning_error(self.main_data_sample, num=self.num_error_samples, pre_equilibrium_approx=pre_equilibrium_approx)
self.data_sample_list = data_sample_list
self.pi_alpha_error = None
self.p_i_error = p_i_error
self.free_energy_profile_err = free_energy_profile_err
self.MFPTs_error = MFPTs_error
self.k_off_error = k_off_error
self.k_ons_error = k_ons_error
return | def process_data_samples_mmvt(self, pre_equilibrium_approx=False):
'\n Since the global, system-side statistics have been gathered, \n compute the thermodynamic and kinetic quantities and their\n uncertainties. Applies to systems using MMVT milestoning.\n '
self.main_data_sample.calculate_pi_alpha()
self.main_data_sample.fill_out_data_quantities()
if (self.model.k_on_info is not None):
self.main_data_sample.parse_browndye_results()
self.main_data_sample.compute_rate_matrix()
self.main_data_sample.calculate_thermodynamics()
self.main_data_sample.calculate_kinetics(pre_equilibrium_approx)
self.pi_alpha = self.main_data_sample.pi_alpha
self.p_i = self.main_data_sample.p_i
self.free_energy_profile = self.main_data_sample.free_energy_profile
self.MFPTs = self.main_data_sample.MFPTs
self.k_off = self.main_data_sample.k_off
self.k_ons = self.main_data_sample.k_ons
if (self.num_error_samples > 0):
(data_sample_list, p_i_error, free_energy_profile_err, MFPTs_error, k_off_error, k_ons_error) = mmvt_analyze.monte_carlo_milestoning_error(self.main_data_sample, num=self.num_error_samples, pre_equilibrium_approx=pre_equilibrium_approx)
self.data_sample_list = data_sample_list
self.pi_alpha_error = None
self.p_i_error = p_i_error
self.free_energy_profile_err = free_energy_profile_err
self.MFPTs_error = MFPTs_error
self.k_off_error = k_off_error
self.k_ons_error = k_ons_error
return<|docstring|>Since the global, system-side statistics have been gathered,
compute the thermodynamic and kinetic quantities and their
uncertainties. Applies to systems using MMVT milestoning.<|endoftext|> |
cbdf63bfc005dcafcf3fae2272dc2c15206591faa99dab590d0c557b7a5e3004 | def fill_out_data_samples_elber(self):
'\n Now that the statistics for each anchor have been extracted\n from the output files, construct the global transition\n statistics objects. Applies to systems using Elber milestoning.\n '
N_i_j_list = []
R_i_total = []
R_i_average = []
R_i_std_dev = []
R_i_count = []
bulkstate = None
for (i, anchor1) in enumerate(self.model.anchors):
if anchor1.bulkstate:
bulkstate = i
continue
anchor_N_i_j = self.anchor_stats_list[i].N_i_j
N_i_j_element = defaultdict(int)
for key in anchor_N_i_j:
alias_id_j = key
id_j = anchor1.id_from_alias(alias_id_j)
new_key = (i, id_j)
N_i_j_element[new_key] = anchor_N_i_j[key]
N_i_j_list.append(N_i_j_element)
anchor_R_i = self.anchor_stats_list[i].R_i_total
anchor_R_i_std = self.anchor_stats_list[i].R_i_std_dev
anchor_R_i_list = self.anchor_stats_list[i].R_i_list
R_i_element = defaultdict(float)
R_i_count_element = defaultdict(int)
R_i_std_element = defaultdict(float)
R_i_element[i] = anchor_R_i
R_i_std_element[i] = anchor_R_i_std
R_i_count_element[i] = len(anchor_R_i_list)
R_i_total.append(R_i_element)
R_i_std_dev.append(R_i_std_element)
R_i_count.append(R_i_count_element)
self.main_data_sample = elber_analyze.Elber_data_sample(self.model, N_i_j_list, R_i_total)
self.main_data_sample.fill_out_data_quantities()
error_sample = elber_analyze.Elber_data_sample(self.model, N_i_j_list, R_i_total)
error_sample.fill_out_data_quantities()
self.data_sample_list.append(error_sample)
return | Now that the statistics for each anchor have been extracted
from the output files, construct the global transition
statistics objects. Applies to systems using Elber milestoning. | seekr2/analyze.py | fill_out_data_samples_elber | seekrcentral/seekr2 | 1 | python | def fill_out_data_samples_elber(self):
'\n Now that the statistics for each anchor have been extracted\n from the output files, construct the global transition\n statistics objects. Applies to systems using Elber milestoning.\n '
N_i_j_list = []
R_i_total = []
R_i_average = []
R_i_std_dev = []
R_i_count = []
bulkstate = None
for (i, anchor1) in enumerate(self.model.anchors):
if anchor1.bulkstate:
bulkstate = i
continue
anchor_N_i_j = self.anchor_stats_list[i].N_i_j
N_i_j_element = defaultdict(int)
for key in anchor_N_i_j:
alias_id_j = key
id_j = anchor1.id_from_alias(alias_id_j)
new_key = (i, id_j)
N_i_j_element[new_key] = anchor_N_i_j[key]
N_i_j_list.append(N_i_j_element)
anchor_R_i = self.anchor_stats_list[i].R_i_total
anchor_R_i_std = self.anchor_stats_list[i].R_i_std_dev
anchor_R_i_list = self.anchor_stats_list[i].R_i_list
R_i_element = defaultdict(float)
R_i_count_element = defaultdict(int)
R_i_std_element = defaultdict(float)
R_i_element[i] = anchor_R_i
R_i_std_element[i] = anchor_R_i_std
R_i_count_element[i] = len(anchor_R_i_list)
R_i_total.append(R_i_element)
R_i_std_dev.append(R_i_std_element)
R_i_count.append(R_i_count_element)
self.main_data_sample = elber_analyze.Elber_data_sample(self.model, N_i_j_list, R_i_total)
self.main_data_sample.fill_out_data_quantities()
error_sample = elber_analyze.Elber_data_sample(self.model, N_i_j_list, R_i_total)
error_sample.fill_out_data_quantities()
self.data_sample_list.append(error_sample)
return | def fill_out_data_samples_elber(self):
'\n Now that the statistics for each anchor have been extracted\n from the output files, construct the global transition\n statistics objects. Applies to systems using Elber milestoning.\n '
N_i_j_list = []
R_i_total = []
R_i_average = []
R_i_std_dev = []
R_i_count = []
bulkstate = None
for (i, anchor1) in enumerate(self.model.anchors):
if anchor1.bulkstate:
bulkstate = i
continue
anchor_N_i_j = self.anchor_stats_list[i].N_i_j
N_i_j_element = defaultdict(int)
for key in anchor_N_i_j:
alias_id_j = key
id_j = anchor1.id_from_alias(alias_id_j)
new_key = (i, id_j)
N_i_j_element[new_key] = anchor_N_i_j[key]
N_i_j_list.append(N_i_j_element)
anchor_R_i = self.anchor_stats_list[i].R_i_total
anchor_R_i_std = self.anchor_stats_list[i].R_i_std_dev
anchor_R_i_list = self.anchor_stats_list[i].R_i_list
R_i_element = defaultdict(float)
R_i_count_element = defaultdict(int)
R_i_std_element = defaultdict(float)
R_i_element[i] = anchor_R_i
R_i_std_element[i] = anchor_R_i_std
R_i_count_element[i] = len(anchor_R_i_list)
R_i_total.append(R_i_element)
R_i_std_dev.append(R_i_std_element)
R_i_count.append(R_i_count_element)
self.main_data_sample = elber_analyze.Elber_data_sample(self.model, N_i_j_list, R_i_total)
self.main_data_sample.fill_out_data_quantities()
error_sample = elber_analyze.Elber_data_sample(self.model, N_i_j_list, R_i_total)
error_sample.fill_out_data_quantities()
self.data_sample_list.append(error_sample)
return<|docstring|>Now that the statistics for each anchor have been extracted
from the output files, construct the global transition
statistics objects. Applies to systems using Elber milestoning.<|endoftext|> |
ab71766e0d9de83ad5210f11a21b36a4873d67ac8e163cc6c7a4619a81fe935d | def process_data_samples_elber(self, pre_equilibrium_approx=False):
'\n Since the global, system-side statistics have been gathered, \n compute the thermodynamic and kinetic quantities and their\n uncertainties. Applies to systems using Elber milestoning.\n '
if (self.model.k_on_info is not None):
self.main_data_sample.parse_browndye_results()
self.main_data_sample.compute_rate_matrix()
self.main_data_sample.calculate_thermodynamics()
self.main_data_sample.calculate_kinetics(pre_equilibrium_approx)
self.p_i = self.main_data_sample.p_i
self.free_energy_profile = self.main_data_sample.free_energy_profile
self.MFPTs = self.main_data_sample.MFPTs
self.k_off = self.main_data_sample.k_off
self.k_ons = self.main_data_sample.k_ons
if (self.num_error_samples > 0):
(data_sample_list, p_i_error, free_energy_profile_err, MFPTs_error, k_off_error, k_ons_error) = elber_analyze.monte_carlo_milestoning_error(self.main_data_sample, num=self.num_error_samples, pre_equilibrium_approx=pre_equilibrium_approx)
self.data_sample_list = data_sample_list
self.p_i_error = p_i_error
self.free_energy_profile_err = free_energy_profile_err
self.MFPTs_error = MFPTs_error
self.k_off_error = k_off_error
self.k_ons_error = k_ons_error
return | Since the global, system-side statistics have been gathered,
compute the thermodynamic and kinetic quantities and their
uncertainties. Applies to systems using Elber milestoning. | seekr2/analyze.py | process_data_samples_elber | seekrcentral/seekr2 | 1 | python | def process_data_samples_elber(self, pre_equilibrium_approx=False):
'\n Since the global, system-side statistics have been gathered, \n compute the thermodynamic and kinetic quantities and their\n uncertainties. Applies to systems using Elber milestoning.\n '
if (self.model.k_on_info is not None):
self.main_data_sample.parse_browndye_results()
self.main_data_sample.compute_rate_matrix()
self.main_data_sample.calculate_thermodynamics()
self.main_data_sample.calculate_kinetics(pre_equilibrium_approx)
self.p_i = self.main_data_sample.p_i
self.free_energy_profile = self.main_data_sample.free_energy_profile
self.MFPTs = self.main_data_sample.MFPTs
self.k_off = self.main_data_sample.k_off
self.k_ons = self.main_data_sample.k_ons
if (self.num_error_samples > 0):
(data_sample_list, p_i_error, free_energy_profile_err, MFPTs_error, k_off_error, k_ons_error) = elber_analyze.monte_carlo_milestoning_error(self.main_data_sample, num=self.num_error_samples, pre_equilibrium_approx=pre_equilibrium_approx)
self.data_sample_list = data_sample_list
self.p_i_error = p_i_error
self.free_energy_profile_err = free_energy_profile_err
self.MFPTs_error = MFPTs_error
self.k_off_error = k_off_error
self.k_ons_error = k_ons_error
return | def process_data_samples_elber(self, pre_equilibrium_approx=False):
'\n Since the global, system-side statistics have been gathered, \n compute the thermodynamic and kinetic quantities and their\n uncertainties. Applies to systems using Elber milestoning.\n '
if (self.model.k_on_info is not None):
self.main_data_sample.parse_browndye_results()
self.main_data_sample.compute_rate_matrix()
self.main_data_sample.calculate_thermodynamics()
self.main_data_sample.calculate_kinetics(pre_equilibrium_approx)
self.p_i = self.main_data_sample.p_i
self.free_energy_profile = self.main_data_sample.free_energy_profile
self.MFPTs = self.main_data_sample.MFPTs
self.k_off = self.main_data_sample.k_off
self.k_ons = self.main_data_sample.k_ons
if (self.num_error_samples > 0):
(data_sample_list, p_i_error, free_energy_profile_err, MFPTs_error, k_off_error, k_ons_error) = elber_analyze.monte_carlo_milestoning_error(self.main_data_sample, num=self.num_error_samples, pre_equilibrium_approx=pre_equilibrium_approx)
self.data_sample_list = data_sample_list
self.p_i_error = p_i_error
self.free_energy_profile_err = free_energy_profile_err
self.MFPTs_error = MFPTs_error
self.k_off_error = k_off_error
self.k_ons_error = k_ons_error
return<|docstring|>Since the global, system-side statistics have been gathered,
compute the thermodynamic and kinetic quantities and their
uncertainties. Applies to systems using Elber milestoning.<|endoftext|> |
cfe765b37af9e0ecb84861318a9b0d223974c7971440b14228a568cfaac1d167 | def fill_out_data_samples(self):
'\n Based on the type of milestoning, construct the data samples\n and fill out their statistics.\n '
if (self.model.get_type() == 'mmvt'):
self.fill_out_data_samples_mmvt()
elif (self.model.get_type() == 'elber'):
self.fill_out_data_samples_elber()
return | Based on the type of milestoning, construct the data samples
and fill out their statistics. | seekr2/analyze.py | fill_out_data_samples | seekrcentral/seekr2 | 1 | python | def fill_out_data_samples(self):
'\n Based on the type of milestoning, construct the data samples\n and fill out their statistics.\n '
if (self.model.get_type() == 'mmvt'):
self.fill_out_data_samples_mmvt()
elif (self.model.get_type() == 'elber'):
self.fill_out_data_samples_elber()
return | def fill_out_data_samples(self):
'\n Based on the type of milestoning, construct the data samples\n and fill out their statistics.\n '
if (self.model.get_type() == 'mmvt'):
self.fill_out_data_samples_mmvt()
elif (self.model.get_type() == 'elber'):
self.fill_out_data_samples_elber()
return<|docstring|>Based on the type of milestoning, construct the data samples
and fill out their statistics.<|endoftext|> |
4b709d52f970f13a9c96e2b2aff392d7cce490d45ba6cc7687db069798a30865 | def process_data_samples(self, pre_equilibrium_approx=False):
'\n Based on the type of milestoning, use the data samples to \n compute thermo and kinetics quantities and their uncertainties.\n '
if (self.model.get_type() == 'mmvt'):
self.process_data_samples_mmvt(pre_equilibrium_approx)
elif (self.model.get_type() == 'elber'):
self.process_data_samples_elber(pre_equilibrium_approx)
return | Based on the type of milestoning, use the data samples to
compute thermo and kinetics quantities and their uncertainties. | seekr2/analyze.py | process_data_samples | seekrcentral/seekr2 | 1 | python | def process_data_samples(self, pre_equilibrium_approx=False):
'\n Based on the type of milestoning, use the data samples to \n compute thermo and kinetics quantities and their uncertainties.\n '
if (self.model.get_type() == 'mmvt'):
self.process_data_samples_mmvt(pre_equilibrium_approx)
elif (self.model.get_type() == 'elber'):
self.process_data_samples_elber(pre_equilibrium_approx)
return | def process_data_samples(self, pre_equilibrium_approx=False):
'\n Based on the type of milestoning, use the data samples to \n compute thermo and kinetics quantities and their uncertainties.\n '
if (self.model.get_type() == 'mmvt'):
self.process_data_samples_mmvt(pre_equilibrium_approx)
elif (self.model.get_type() == 'elber'):
self.process_data_samples_elber(pre_equilibrium_approx)
return<|docstring|>Based on the type of milestoning, use the data samples to
compute thermo and kinetics quantities and their uncertainties.<|endoftext|> |
376aa3de636f99068f01b66254b97e6a1ed1b31b583177dedd9112de41d5e582 | def resample_k_N_R_T(self, N_alpha_beta, N_i_j_alpha, R_i_alpha_total, R_i_alpha_average, R_i_alpha_std_dev, R_i_alpha_count, T_alpha_total, T_alpha_average, T_alpha_std_dev, T_alpha_count):
'\n Create data samples from a distribution for computing the\n uncertainties of the thermo and kinetics.\n '
sampled_k_alpha_beta = {}
sampled_T_alpha_total = []
sampled_R_i_alpha_total = []
for (alpha, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
element_R_i_alpha_total = {}
for key in R_i_alpha_total[alpha]:
n_R_i = R_i_alpha_count[alpha][key]
if (n_R_i != 0):
R_i_total_std_dev = (R_i_alpha_std_dev[alpha][key] * np.sqrt(n_R_i))
R_fluctuation = np.random.normal(scale=R_i_total_std_dev)
else:
R_fluctuation = 0.0
element_R_i_alpha_total[key] = abs((R_i_alpha_total[alpha][key] + R_fluctuation))
n_T = T_alpha_count[alpha]
if (n_T != 0):
T_total_std_dev = (T_alpha_std_dev[alpha] * np.sqrt(n_T))
T_fluctuation = np.random.normal(scale=T_total_std_dev)
else:
T_fluctuation = 0.0
element_T_alpha_total = abs((T_alpha_total[alpha] + T_fluctuation))
sampled_T_alpha_total.append(np.sum(list(element_R_i_alpha_total.values())))
sampled_R_i_alpha_total.append(element_R_i_alpha_total)
for (beta, anchor2) in enumerate(self.model.anchors):
key = (alpha, beta)
sampled_k_alpha_beta[key] = (N_alpha_beta[key] / sampled_T_alpha_total[alpha])
sampled_N_alpha_beta = deepcopy(N_alpha_beta)
sampled_N_i_j_alpha = deepcopy(N_i_j_alpha)
return (sampled_k_alpha_beta, sampled_N_i_j_alpha, sampled_R_i_alpha_total, sampled_T_alpha_total) | Create data samples from a distribution for computing the
uncertainties of the thermo and kinetics. | seekr2/analyze.py | resample_k_N_R_T | seekrcentral/seekr2 | 1 | python | def resample_k_N_R_T(self, N_alpha_beta, N_i_j_alpha, R_i_alpha_total, R_i_alpha_average, R_i_alpha_std_dev, R_i_alpha_count, T_alpha_total, T_alpha_average, T_alpha_std_dev, T_alpha_count):
'\n Create data samples from a distribution for computing the\n uncertainties of the thermo and kinetics.\n '
sampled_k_alpha_beta = {}
sampled_T_alpha_total = []
sampled_R_i_alpha_total = []
for (alpha, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
element_R_i_alpha_total = {}
for key in R_i_alpha_total[alpha]:
n_R_i = R_i_alpha_count[alpha][key]
if (n_R_i != 0):
R_i_total_std_dev = (R_i_alpha_std_dev[alpha][key] * np.sqrt(n_R_i))
R_fluctuation = np.random.normal(scale=R_i_total_std_dev)
else:
R_fluctuation = 0.0
element_R_i_alpha_total[key] = abs((R_i_alpha_total[alpha][key] + R_fluctuation))
n_T = T_alpha_count[alpha]
if (n_T != 0):
T_total_std_dev = (T_alpha_std_dev[alpha] * np.sqrt(n_T))
T_fluctuation = np.random.normal(scale=T_total_std_dev)
else:
T_fluctuation = 0.0
element_T_alpha_total = abs((T_alpha_total[alpha] + T_fluctuation))
sampled_T_alpha_total.append(np.sum(list(element_R_i_alpha_total.values())))
sampled_R_i_alpha_total.append(element_R_i_alpha_total)
for (beta, anchor2) in enumerate(self.model.anchors):
key = (alpha, beta)
sampled_k_alpha_beta[key] = (N_alpha_beta[key] / sampled_T_alpha_total[alpha])
sampled_N_alpha_beta = deepcopy(N_alpha_beta)
sampled_N_i_j_alpha = deepcopy(N_i_j_alpha)
return (sampled_k_alpha_beta, sampled_N_i_j_alpha, sampled_R_i_alpha_total, sampled_T_alpha_total) | def resample_k_N_R_T(self, N_alpha_beta, N_i_j_alpha, R_i_alpha_total, R_i_alpha_average, R_i_alpha_std_dev, R_i_alpha_count, T_alpha_total, T_alpha_average, T_alpha_std_dev, T_alpha_count):
'\n Create data samples from a distribution for computing the\n uncertainties of the thermo and kinetics.\n '
sampled_k_alpha_beta = {}
sampled_T_alpha_total = []
sampled_R_i_alpha_total = []
for (alpha, anchor) in enumerate(self.model.anchors):
if anchor.bulkstate:
continue
element_R_i_alpha_total = {}
for key in R_i_alpha_total[alpha]:
n_R_i = R_i_alpha_count[alpha][key]
if (n_R_i != 0):
R_i_total_std_dev = (R_i_alpha_std_dev[alpha][key] * np.sqrt(n_R_i))
R_fluctuation = np.random.normal(scale=R_i_total_std_dev)
else:
R_fluctuation = 0.0
element_R_i_alpha_total[key] = abs((R_i_alpha_total[alpha][key] + R_fluctuation))
n_T = T_alpha_count[alpha]
if (n_T != 0):
T_total_std_dev = (T_alpha_std_dev[alpha] * np.sqrt(n_T))
T_fluctuation = np.random.normal(scale=T_total_std_dev)
else:
T_fluctuation = 0.0
element_T_alpha_total = abs((T_alpha_total[alpha] + T_fluctuation))
sampled_T_alpha_total.append(np.sum(list(element_R_i_alpha_total.values())))
sampled_R_i_alpha_total.append(element_R_i_alpha_total)
for (beta, anchor2) in enumerate(self.model.anchors):
key = (alpha, beta)
sampled_k_alpha_beta[key] = (N_alpha_beta[key] / sampled_T_alpha_total[alpha])
sampled_N_alpha_beta = deepcopy(N_alpha_beta)
sampled_N_i_j_alpha = deepcopy(N_i_j_alpha)
return (sampled_k_alpha_beta, sampled_N_i_j_alpha, sampled_R_i_alpha_total, sampled_T_alpha_total)<|docstring|>Create data samples from a distribution for computing the
uncertainties of the thermo and kinetics.<|endoftext|> |
154c86fd1fe266a4ba04a727c84350e93099ad8fa85eff12e03dd80260a14483 | def print_results(self):
'Print all results of the analysis calculation.'
print('Printing results from MMVT SEEKR calculation')
print('k_off (1/s):', common_analyze.pretty_string_value_error(self.k_off, self.k_off_error))
print('k_ons :')
for key in self.k_ons:
k_on = float(self.k_ons[key])
diss_constant = (self.k_off / k_on)
delta_G = ((common_analyze.GAS_CONSTANT * self.model.temperature) * math.log(diss_constant))
if (key in self.k_ons_error):
k_on_err = float(self.k_ons_error[key])
print(' k_on (1/s * 1/M) to state', key, ':', common_analyze.pretty_string_value_error(k_on, k_on_err))
if ((k_on > 0.0) and (self.k_off > 0.0)):
diss_constant_err = (diss_constant * common_analyze.quadriture((k_on_err / k_on), (self.k_off_error / self.k_off)))
else:
diss_constant_err = None
delta_G_err = (((diss_constant_err * common_analyze.GAS_CONSTANT) * self.model.temperature) / diss_constant)
else:
print(' k_on (1/s * 1/M) to state', key, ':', common_analyze.pretty_string_value_error(k_on, None))
diss_constant_err = None
print(' Dissociation constant (M) to state', key, ':', common_analyze.pretty_string_value_error(diss_constant, diss_constant_err))
if (key in self.k_ons_error):
print(' ΔG (kcal/mol) to state', key, ':', common_analyze.pretty_string_value_error(delta_G, delta_G_err))
print('Mean first passage times (s):')
for key in self.MFPTs:
state1 = key[0]
state2 = key[1]
if (key in self.MFPTs_error):
print(' MFPT from state', state1, 'to state', state2, ':', common_analyze.pretty_string_value_error(float((self.MFPTs[key] * 1e-12)), float((self.MFPTs_error[key] * 1e-12))))
else:
print(' MFPT from state', state1, 'to state', state2, ':', common_analyze.pretty_string_value_error(float((self.MFPTs[key] * 1e-12)), None))
return | Print all results of the analysis calculation. | seekr2/analyze.py | print_results | seekrcentral/seekr2 | 1 | python | def print_results(self):
print('Printing results from MMVT SEEKR calculation')
print('k_off (1/s):', common_analyze.pretty_string_value_error(self.k_off, self.k_off_error))
print('k_ons :')
for key in self.k_ons:
k_on = float(self.k_ons[key])
diss_constant = (self.k_off / k_on)
delta_G = ((common_analyze.GAS_CONSTANT * self.model.temperature) * math.log(diss_constant))
if (key in self.k_ons_error):
k_on_err = float(self.k_ons_error[key])
print(' k_on (1/s * 1/M) to state', key, ':', common_analyze.pretty_string_value_error(k_on, k_on_err))
if ((k_on > 0.0) and (self.k_off > 0.0)):
diss_constant_err = (diss_constant * common_analyze.quadriture((k_on_err / k_on), (self.k_off_error / self.k_off)))
else:
diss_constant_err = None
delta_G_err = (((diss_constant_err * common_analyze.GAS_CONSTANT) * self.model.temperature) / diss_constant)
else:
print(' k_on (1/s * 1/M) to state', key, ':', common_analyze.pretty_string_value_error(k_on, None))
diss_constant_err = None
print(' Dissociation constant (M) to state', key, ':', common_analyze.pretty_string_value_error(diss_constant, diss_constant_err))
if (key in self.k_ons_error):
print(' ΔG (kcal/mol) to state', key, ':', common_analyze.pretty_string_value_error(delta_G, delta_G_err))
print('Mean first passage times (s):')
for key in self.MFPTs:
state1 = key[0]
state2 = key[1]
if (key in self.MFPTs_error):
print(' MFPT from state', state1, 'to state', state2, ':', common_analyze.pretty_string_value_error(float((self.MFPTs[key] * 1e-12)), float((self.MFPTs_error[key] * 1e-12))))
else:
print(' MFPT from state', state1, 'to state', state2, ':', common_analyze.pretty_string_value_error(float((self.MFPTs[key] * 1e-12)), None))
return | def print_results(self):
print('Printing results from MMVT SEEKR calculation')
print('k_off (1/s):', common_analyze.pretty_string_value_error(self.k_off, self.k_off_error))
print('k_ons :')
for key in self.k_ons:
k_on = float(self.k_ons[key])
diss_constant = (self.k_off / k_on)
delta_G = ((common_analyze.GAS_CONSTANT * self.model.temperature) * math.log(diss_constant))
if (key in self.k_ons_error):
k_on_err = float(self.k_ons_error[key])
print(' k_on (1/s * 1/M) to state', key, ':', common_analyze.pretty_string_value_error(k_on, k_on_err))
if ((k_on > 0.0) and (self.k_off > 0.0)):
diss_constant_err = (diss_constant * common_analyze.quadriture((k_on_err / k_on), (self.k_off_error / self.k_off)))
else:
diss_constant_err = None
delta_G_err = (((diss_constant_err * common_analyze.GAS_CONSTANT) * self.model.temperature) / diss_constant)
else:
print(' k_on (1/s * 1/M) to state', key, ':', common_analyze.pretty_string_value_error(k_on, None))
diss_constant_err = None
print(' Dissociation constant (M) to state', key, ':', common_analyze.pretty_string_value_error(diss_constant, diss_constant_err))
if (key in self.k_ons_error):
print(' ΔG (kcal/mol) to state', key, ':', common_analyze.pretty_string_value_error(delta_G, delta_G_err))
print('Mean first passage times (s):')
for key in self.MFPTs:
state1 = key[0]
state2 = key[1]
if (key in self.MFPTs_error):
print(' MFPT from state', state1, 'to state', state2, ':', common_analyze.pretty_string_value_error(float((self.MFPTs[key] * 1e-12)), float((self.MFPTs_error[key] * 1e-12))))
else:
print(' MFPT from state', state1, 'to state', state2, ':', common_analyze.pretty_string_value_error(float((self.MFPTs[key] * 1e-12)), None))
return<|docstring|>Print all results of the analysis calculation.<|endoftext|> |
b94b22d3bb36da4e3005f7ee3d0d0b7750d7a188865e706f1e135ba8aff697b6 | def save_plots(self, image_directory):
'\n Save a potentially useful series of plots of some quantities\n obtained during the analysis.\n \n TODO: interact with model, because the way these plots are saved\n depends on the structure of the CVs.\n '
anchor_indices = np.zeros(len(self.model.anchors), dtype=np.int8)
for (i, anchor) in enumerate(self.model.anchors):
anchor_indices[i] = anchor.index
milestone_indices = np.zeros(self.p_i.shape[0], dtype=np.int8)
for i in range(self.p_i.shape[0]):
milestone_indices[i] = i
if (self.model.get_type() == 'mmvt'):
(pi_fig, ax) = plt.subplots()
plt.errorbar(anchor_indices, self.pi_alpha.flatten(), yerr=self.pi_alpha_error, ecolor='k', capsize=2)
plt.ylabel('π_α')
plt.xlabel('anchors')
pi_fig.savefig(os.path.join(image_directory, 'pi_alpha.png'))
(pi_fig, ax) = plt.subplots()
plt.errorbar(milestone_indices, self.p_i, yerr=self.p_i_error, ecolor='k', capsize=2)
plt.ylabel('p_i')
plt.xlabel('milestones')
pi_fig.savefig(os.path.join(image_directory, 'p_i.png'))
(pi_fig, ax) = plt.subplots()
plt.errorbar(milestone_indices, self.free_energy_profile, yerr=self.free_energy_profile_err, ecolor='k', capsize=2)
plt.ylabel('ΔG(milestone) (kcal/mol)')
plt.xlabel('milestones')
pi_fig.savefig(os.path.join(image_directory, 'free_energy_profile.png'))
return | Save a potentially useful series of plots of some quantities
obtained during the analysis.
TODO: interact with model, because the way these plots are saved
depends on the structure of the CVs. | seekr2/analyze.py | save_plots | seekrcentral/seekr2 | 1 | python | def save_plots(self, image_directory):
'\n Save a potentially useful series of plots of some quantities\n obtained during the analysis.\n \n TODO: interact with model, because the way these plots are saved\n depends on the structure of the CVs.\n '
anchor_indices = np.zeros(len(self.model.anchors), dtype=np.int8)
for (i, anchor) in enumerate(self.model.anchors):
anchor_indices[i] = anchor.index
milestone_indices = np.zeros(self.p_i.shape[0], dtype=np.int8)
for i in range(self.p_i.shape[0]):
milestone_indices[i] = i
if (self.model.get_type() == 'mmvt'):
(pi_fig, ax) = plt.subplots()
plt.errorbar(anchor_indices, self.pi_alpha.flatten(), yerr=self.pi_alpha_error, ecolor='k', capsize=2)
plt.ylabel('π_α')
plt.xlabel('anchors')
pi_fig.savefig(os.path.join(image_directory, 'pi_alpha.png'))
(pi_fig, ax) = plt.subplots()
plt.errorbar(milestone_indices, self.p_i, yerr=self.p_i_error, ecolor='k', capsize=2)
plt.ylabel('p_i')
plt.xlabel('milestones')
pi_fig.savefig(os.path.join(image_directory, 'p_i.png'))
(pi_fig, ax) = plt.subplots()
plt.errorbar(milestone_indices, self.free_energy_profile, yerr=self.free_energy_profile_err, ecolor='k', capsize=2)
plt.ylabel('ΔG(milestone) (kcal/mol)')
plt.xlabel('milestones')
pi_fig.savefig(os.path.join(image_directory, 'free_energy_profile.png'))
return | def save_plots(self, image_directory):
'\n Save a potentially useful series of plots of some quantities\n obtained during the analysis.\n \n TODO: interact with model, because the way these plots are saved\n depends on the structure of the CVs.\n '
anchor_indices = np.zeros(len(self.model.anchors), dtype=np.int8)
for (i, anchor) in enumerate(self.model.anchors):
anchor_indices[i] = anchor.index
milestone_indices = np.zeros(self.p_i.shape[0], dtype=np.int8)
for i in range(self.p_i.shape[0]):
milestone_indices[i] = i
if (self.model.get_type() == 'mmvt'):
(pi_fig, ax) = plt.subplots()
plt.errorbar(anchor_indices, self.pi_alpha.flatten(), yerr=self.pi_alpha_error, ecolor='k', capsize=2)
plt.ylabel('π_α')
plt.xlabel('anchors')
pi_fig.savefig(os.path.join(image_directory, 'pi_alpha.png'))
(pi_fig, ax) = plt.subplots()
plt.errorbar(milestone_indices, self.p_i, yerr=self.p_i_error, ecolor='k', capsize=2)
plt.ylabel('p_i')
plt.xlabel('milestones')
pi_fig.savefig(os.path.join(image_directory, 'p_i.png'))
(pi_fig, ax) = plt.subplots()
plt.errorbar(milestone_indices, self.free_energy_profile, yerr=self.free_energy_profile_err, ecolor='k', capsize=2)
plt.ylabel('ΔG(milestone) (kcal/mol)')
plt.xlabel('milestones')
pi_fig.savefig(os.path.join(image_directory, 'free_energy_profile.png'))
return<|docstring|>Save a potentially useful series of plots of some quantities
obtained during the analysis.
TODO: interact with model, because the way these plots are saved
depends on the structure of the CVs.<|endoftext|> |
6f3922edcd56da93c12ba7594f0cf18764fc9dbce02b1be41e11505a9be9e680 | def lowestCommonAncestor(self, root, p, q):
'\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n '
self.pre_order(root, p, q)
return self.ret | :type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode | leetcode/python/lca_bt.py | lowestCommonAncestor | haonancool/OnlineJudge | 0 | python | def lowestCommonAncestor(self, root, p, q):
'\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n '
self.pre_order(root, p, q)
return self.ret | def lowestCommonAncestor(self, root, p, q):
'\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n '
self.pre_order(root, p, q)
return self.ret<|docstring|>:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode<|endoftext|> |
10c3f85824158a27db612162fca14242efd4600c6bce5e582b8b12cbe31acf37 | def load_calib():
'\n 读取内参矩阵\n '
def intrinsics(date):
calib = open((('dataloaders/' + str(date)) + '.txt'), 'r')
lines = calib.readlines()
P_rect_line = lines[25]
Proj_str = P_rect_line.split(':')[1].split(' ')[1:]
Proj = np.reshape(np.array([float(p) for p in Proj_str]), (3, 4)).astype(np.float32)
K = Proj[(:3, :3)]
K[(0, 2)] = (K[(0, 2)] - 13)
K[(1, 2)] = (K[(1, 2)] - 11.5)
return K
date_set = ['2011_09_26', '2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
K_set = {i: intrinsics(i) for i in date_set}
return K_set | 读取内参矩阵 | dataloaders/kitti_loader.py | load_calib | Hansry/Semi-supervised-depth-estimation | 0 | python | def load_calib():
'\n \n '
def intrinsics(date):
calib = open((('dataloaders/' + str(date)) + '.txt'), 'r')
lines = calib.readlines()
P_rect_line = lines[25]
Proj_str = P_rect_line.split(':')[1].split(' ')[1:]
Proj = np.reshape(np.array([float(p) for p in Proj_str]), (3, 4)).astype(np.float32)
K = Proj[(:3, :3)]
K[(0, 2)] = (K[(0, 2)] - 13)
K[(1, 2)] = (K[(1, 2)] - 11.5)
return K
date_set = ['2011_09_26', '2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
K_set = {i: intrinsics(i) for i in date_set}
return K_set | def load_calib():
'\n \n '
def intrinsics(date):
calib = open((('dataloaders/' + str(date)) + '.txt'), 'r')
lines = calib.readlines()
P_rect_line = lines[25]
Proj_str = P_rect_line.split(':')[1].split(' ')[1:]
Proj = np.reshape(np.array([float(p) for p in Proj_str]), (3, 4)).astype(np.float32)
K = Proj[(:3, :3)]
K[(0, 2)] = (K[(0, 2)] - 13)
K[(1, 2)] = (K[(1, 2)] - 11.5)
return K
date_set = ['2011_09_26', '2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
K_set = {i: intrinsics(i) for i in date_set}
return K_set<|docstring|>读取内参矩阵<|endoftext|> |
4d10cf9f23350c1b65804207ae46e70314ad4c96f253b28b1bb6afe7891019e3 | def load_transfrom():
'\n 读取左右相机的转换关系\n '
def load_R_t(date):
calib_t = open((('dataloaders/' + str(date)) + '.txt'), 'r')
transforms = calib_t.readlines()
lines_R_0to2 = transforms[21]
R_0to2_str = lines_R_0to2.split(':')[1].split(' ')[1:]
R_0to2 = np.reshape(np.array([float(p) for p in R_0to2_str]), (3, 3)).astype(np.float32)
lines_t_0to2 = transforms[22]
t_0to2_str = lines_t_0to2.split(':')[1].split(' ')[1:]
t_0to2 = np.reshape(np.array([float(p) for p in t_0to2_str]), (3, 1)).astype(np.float32)
lines_R_0to3 = transforms[29]
R_0to3_str = lines_R_0to3.split(':')[1].split(' ')[1:]
R_0to3 = np.reshape(np.array([float(p) for p in R_0to3_str]), (3, 3)).astype(np.float32)
lines_t_0to3 = transforms[30]
t_0to3_str = lines_t_0to3.split(':')[1].split(' ')[1:]
t_0to3 = np.reshape(np.array([float(p) for p in t_0to3_str]), (3, 1)).astype(np.float32)
R_0to2_inv = np.linalg.inv(R_0to2)
R_0to3_times_R_0to2_inv = np.matmul(R_0to3, R_0to2_inv)
R_0to3_inv = np.linalg.inv(R_0to3)
R_0to2_times_R_0to3_inv = np.matmul(R_0to2, R_0to3_inv)
return (R_0to3_times_R_0to2_inv, t_0to2, t_0to3, R_0to2_times_R_0to3_inv)
date_set = ['2011_09_26', '2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
R_0to3_times_R_0to2_inv_set = {i: load_R_t(i)[0] for i in date_set}
t_0to2_set = {j: load_R_t(j)[1] for j in date_set}
t_0to3_set = {k: load_R_t(k)[2] for k in date_set}
R_0to2_times_R_0to3_inv_set = {h: load_R_t(h)[3] for h in date_set}
return (R_0to3_times_R_0to2_inv_set, t_0to2_set, t_0to3_set, R_0to2_times_R_0to3_inv_set) | 读取左右相机的转换关系 | dataloaders/kitti_loader.py | load_transfrom | Hansry/Semi-supervised-depth-estimation | 0 | python | def load_transfrom():
'\n \n '
def load_R_t(date):
calib_t = open((('dataloaders/' + str(date)) + '.txt'), 'r')
transforms = calib_t.readlines()
lines_R_0to2 = transforms[21]
R_0to2_str = lines_R_0to2.split(':')[1].split(' ')[1:]
R_0to2 = np.reshape(np.array([float(p) for p in R_0to2_str]), (3, 3)).astype(np.float32)
lines_t_0to2 = transforms[22]
t_0to2_str = lines_t_0to2.split(':')[1].split(' ')[1:]
t_0to2 = np.reshape(np.array([float(p) for p in t_0to2_str]), (3, 1)).astype(np.float32)
lines_R_0to3 = transforms[29]
R_0to3_str = lines_R_0to3.split(':')[1].split(' ')[1:]
R_0to3 = np.reshape(np.array([float(p) for p in R_0to3_str]), (3, 3)).astype(np.float32)
lines_t_0to3 = transforms[30]
t_0to3_str = lines_t_0to3.split(':')[1].split(' ')[1:]
t_0to3 = np.reshape(np.array([float(p) for p in t_0to3_str]), (3, 1)).astype(np.float32)
R_0to2_inv = np.linalg.inv(R_0to2)
R_0to3_times_R_0to2_inv = np.matmul(R_0to3, R_0to2_inv)
R_0to3_inv = np.linalg.inv(R_0to3)
R_0to2_times_R_0to3_inv = np.matmul(R_0to2, R_0to3_inv)
return (R_0to3_times_R_0to2_inv, t_0to2, t_0to3, R_0to2_times_R_0to3_inv)
date_set = ['2011_09_26', '2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
R_0to3_times_R_0to2_inv_set = {i: load_R_t(i)[0] for i in date_set}
t_0to2_set = {j: load_R_t(j)[1] for j in date_set}
t_0to3_set = {k: load_R_t(k)[2] for k in date_set}
R_0to2_times_R_0to3_inv_set = {h: load_R_t(h)[3] for h in date_set}
return (R_0to3_times_R_0to2_inv_set, t_0to2_set, t_0to3_set, R_0to2_times_R_0to3_inv_set) | def load_transfrom():
'\n \n '
def load_R_t(date):
calib_t = open((('dataloaders/' + str(date)) + '.txt'), 'r')
transforms = calib_t.readlines()
lines_R_0to2 = transforms[21]
R_0to2_str = lines_R_0to2.split(':')[1].split(' ')[1:]
R_0to2 = np.reshape(np.array([float(p) for p in R_0to2_str]), (3, 3)).astype(np.float32)
lines_t_0to2 = transforms[22]
t_0to2_str = lines_t_0to2.split(':')[1].split(' ')[1:]
t_0to2 = np.reshape(np.array([float(p) for p in t_0to2_str]), (3, 1)).astype(np.float32)
lines_R_0to3 = transforms[29]
R_0to3_str = lines_R_0to3.split(':')[1].split(' ')[1:]
R_0to3 = np.reshape(np.array([float(p) for p in R_0to3_str]), (3, 3)).astype(np.float32)
lines_t_0to3 = transforms[30]
t_0to3_str = lines_t_0to3.split(':')[1].split(' ')[1:]
t_0to3 = np.reshape(np.array([float(p) for p in t_0to3_str]), (3, 1)).astype(np.float32)
R_0to2_inv = np.linalg.inv(R_0to2)
R_0to3_times_R_0to2_inv = np.matmul(R_0to3, R_0to2_inv)
R_0to3_inv = np.linalg.inv(R_0to3)
R_0to2_times_R_0to3_inv = np.matmul(R_0to2, R_0to3_inv)
return (R_0to3_times_R_0to2_inv, t_0to2, t_0to3, R_0to2_times_R_0to3_inv)
date_set = ['2011_09_26', '2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
R_0to3_times_R_0to2_inv_set = {i: load_R_t(i)[0] for i in date_set}
t_0to2_set = {j: load_R_t(j)[1] for j in date_set}
t_0to3_set = {k: load_R_t(k)[2] for k in date_set}
R_0to2_times_R_0to3_inv_set = {h: load_R_t(h)[3] for h in date_set}
return (R_0to3_times_R_0to2_inv_set, t_0to2_set, t_0to3_set, R_0to2_times_R_0to3_inv_set)<|docstring|>读取左右相机的转换关系<|endoftext|> |
fef4bd5e660cc670bdb3b81896592f31ee4f766bbe594c92a38116e26a9386d7 | def sortby(tree, col, descending):
'sort tree contents when a column header is clicked on'
data = [(tree.set(child, col), child) for child in tree.get_children('')]
data.sort(reverse=descending)
for (ix, item) in enumerate(data):
tree.move(item[1], '', ix)
tree.heading(col, command=(lambda col=col: sortby(tree, col, int((not descending))))) | sort tree contents when a column header is clicked on | tkinter/basic/test/test10.py | sortby | sdyz5210/python | 0 | python | def sortby(tree, col, descending):
data = [(tree.set(child, col), child) for child in tree.get_children()]
data.sort(reverse=descending)
for (ix, item) in enumerate(data):
tree.move(item[1], , ix)
tree.heading(col, command=(lambda col=col: sortby(tree, col, int((not descending))))) | def sortby(tree, col, descending):
data = [(tree.set(child, col), child) for child in tree.get_children()]
data.sort(reverse=descending)
for (ix, item) in enumerate(data):
tree.move(item[1], , ix)
tree.heading(col, command=(lambda col=col: sortby(tree, col, int((not descending)))))<|docstring|>sort tree contents when a column header is clicked on<|endoftext|> |
26fc7d8b413b547a0df20a5b9d3140c8888378460af497819b761dbce504ec54 | def create_lat_lon_features(constant_maps):
'\n create latitude and longitude as additional feature for data\n\n Parameters\n ----------\n data: xarray dataarray, with dimensions including latitude and longitude\n\n Returns\n ----------\n latitude_arr\n longitude_arr\n '
(londata, latdata) = np.meshgrid(constant_maps.lon, constant_maps.lat)
latitude_arr = (('latitude', 'longitude'), latdata)
longitude_arr = (('latitude', 'longitude'), londata)
return (latitude_arr, longitude_arr) | create latitude and longitude as additional feature for data
Parameters
----------
data: xarray dataarray, with dimensions including latitude and longitude
Returns
----------
latitude_arr
longitude_arr | climfill/feature_engineering.py | create_lat_lon_features | climachine/climfill | 10 | python | def create_lat_lon_features(constant_maps):
'\n create latitude and longitude as additional feature for data\n\n Parameters\n ----------\n data: xarray dataarray, with dimensions including latitude and longitude\n\n Returns\n ----------\n latitude_arr\n longitude_arr\n '
(londata, latdata) = np.meshgrid(constant_maps.lon, constant_maps.lat)
latitude_arr = (('latitude', 'longitude'), latdata)
longitude_arr = (('latitude', 'longitude'), londata)
return (latitude_arr, longitude_arr) | def create_lat_lon_features(constant_maps):
'\n create latitude and longitude as additional feature for data\n\n Parameters\n ----------\n data: xarray dataarray, with dimensions including latitude and longitude\n\n Returns\n ----------\n latitude_arr\n longitude_arr\n '
(londata, latdata) = np.meshgrid(constant_maps.lon, constant_maps.lat)
latitude_arr = (('latitude', 'longitude'), latdata)
longitude_arr = (('latitude', 'longitude'), londata)
return (latitude_arr, longitude_arr)<|docstring|>create latitude and longitude as additional feature for data
Parameters
----------
data: xarray dataarray, with dimensions including latitude and longitude
Returns
----------
latitude_arr
longitude_arr<|endoftext|> |
e3cd6388b76f6893be8a2b6430b20a570051e030518fa110112a0c9d3c884e8e | def create_time_feature(data):
'\n create timestep as additional feature for data\n\n Parameters\n ----------\n data: xarray dataarray, with dimensions including landpoints, time\n\n Returns\n ----------\n time_arr: xarray with same dimensions as one feature in array describing\n time step\n '
(_, ntimesteps, nlandpts) = data.shape
timedat = np.arange(ntimesteps)
timedat = np.tile(timedat, nlandpts).reshape(nlandpts, *timedat.shape).T
time_arr = (('time', 'landpoints'), timedat)
return time_arr | create timestep as additional feature for data
Parameters
----------
data: xarray dataarray, with dimensions including landpoints, time
Returns
----------
time_arr: xarray with same dimensions as one feature in array describing
time step | climfill/feature_engineering.py | create_time_feature | climachine/climfill | 10 | python | def create_time_feature(data):
'\n create timestep as additional feature for data\n\n Parameters\n ----------\n data: xarray dataarray, with dimensions including landpoints, time\n\n Returns\n ----------\n time_arr: xarray with same dimensions as one feature in array describing\n time step\n '
(_, ntimesteps, nlandpts) = data.shape
timedat = np.arange(ntimesteps)
timedat = np.tile(timedat, nlandpts).reshape(nlandpts, *timedat.shape).T
time_arr = (('time', 'landpoints'), timedat)
return time_arr | def create_time_feature(data):
'\n create timestep as additional feature for data\n\n Parameters\n ----------\n data: xarray dataarray, with dimensions including landpoints, time\n\n Returns\n ----------\n time_arr: xarray with same dimensions as one feature in array describing\n time step\n '
(_, ntimesteps, nlandpts) = data.shape
timedat = np.arange(ntimesteps)
timedat = np.tile(timedat, nlandpts).reshape(nlandpts, *timedat.shape).T
time_arr = (('time', 'landpoints'), timedat)
return time_arr<|docstring|>create timestep as additional feature for data
Parameters
----------
data: xarray dataarray, with dimensions including landpoints, time
Returns
----------
time_arr: xarray with same dimensions as one feature in array describing
time step<|endoftext|> |
cc878b9b12c28276a295d84da5b7dea0fb4e14e1c490625735bb7e34caca01ec | def create_embedded_feature(data, start=(- 7), end=0, name='lag_7b'):
"\n create moving window mean along time axis from day 'start' until\n day 'end' relative to current day using xr.DataArray.rolling\n\n Parameters\n ----------\n data: xarray dataarray, with dimensions including variable, time\n\n start: int, start of moving average in days from current day\n\n end: int, end of moving average in days from current day\n\n name: name of the resulting variable in the returned data\n\n Returns\n ----------\n feature: embedded features of variables to be added to data\n "
varnames = data.coords['variable'].values
length = np.abs((start - end))
offset = max((start * (- 1)), (end * (- 1)))
feature = data.rolling(time=length, center=False, min_periods=1).mean()
feature = feature.assign_coords(time=[(time + np.timedelta64(offset, 'D')) for time in feature.coords['time'].values])
feature = feature.assign_coords(variable=[f'{var}{name}' for var in varnames])
return feature | create moving window mean along time axis from day 'start' until
day 'end' relative to current day using xr.DataArray.rolling
Parameters
----------
data: xarray dataarray, with dimensions including variable, time
start: int, start of moving average in days from current day
end: int, end of moving average in days from current day
name: name of the resulting variable in the returned data
Returns
----------
feature: embedded features of variables to be added to data | climfill/feature_engineering.py | create_embedded_feature | climachine/climfill | 10 | python | def create_embedded_feature(data, start=(- 7), end=0, name='lag_7b'):
"\n create moving window mean along time axis from day 'start' until\n day 'end' relative to current day using xr.DataArray.rolling\n\n Parameters\n ----------\n data: xarray dataarray, with dimensions including variable, time\n\n start: int, start of moving average in days from current day\n\n end: int, end of moving average in days from current day\n\n name: name of the resulting variable in the returned data\n\n Returns\n ----------\n feature: embedded features of variables to be added to data\n "
varnames = data.coords['variable'].values
length = np.abs((start - end))
offset = max((start * (- 1)), (end * (- 1)))
feature = data.rolling(time=length, center=False, min_periods=1).mean()
feature = feature.assign_coords(time=[(time + np.timedelta64(offset, 'D')) for time in feature.coords['time'].values])
feature = feature.assign_coords(variable=[f'{var}{name}' for var in varnames])
return feature | def create_embedded_feature(data, start=(- 7), end=0, name='lag_7b'):
"\n create moving window mean along time axis from day 'start' until\n day 'end' relative to current day using xr.DataArray.rolling\n\n Parameters\n ----------\n data: xarray dataarray, with dimensions including variable, time\n\n start: int, start of moving average in days from current day\n\n end: int, end of moving average in days from current day\n\n name: name of the resulting variable in the returned data\n\n Returns\n ----------\n feature: embedded features of variables to be added to data\n "
varnames = data.coords['variable'].values
length = np.abs((start - end))
offset = max((start * (- 1)), (end * (- 1)))
feature = data.rolling(time=length, center=False, min_periods=1).mean()
feature = feature.assign_coords(time=[(time + np.timedelta64(offset, 'D')) for time in feature.coords['time'].values])
feature = feature.assign_coords(variable=[f'{var}{name}' for var in varnames])
return feature<|docstring|>create moving window mean along time axis from day 'start' until
day 'end' relative to current day using xr.DataArray.rolling
Parameters
----------
data: xarray dataarray, with dimensions including variable, time
start: int, start of moving average in days from current day
end: int, end of moving average in days from current day
name: name of the resulting variable in the returned data
Returns
----------
feature: embedded features of variables to be added to data<|endoftext|> |
6e08697447a51412842041aa6ec8313623645ce4c5b2e3406fd02d4c95b8bca6 | def format_submitter_id(node, args):
'\n Generates "submitter_id" for node with additional identificator values.\n Resulting "submitter_id" only contains lowercase letters, digits, underscore and dash.\n\n Args:\n node (str): node name for "submitter_id"\n args (dict): additional arguments to add to "submitter_id"\n\n Returns:\n str: generated "submitter_id"\n '
submitter_id = node
for v in args.values():
if v:
submitter_id += '_{}'.format(v)
submitter_id = submitter_id.lower()
submitter_id = re.sub('[^a-z0-9-_]+', '-', submitter_id)
return submitter_id.strip('-') | Generates "submitter_id" for node with additional identificator values.
Resulting "submitter_id" only contains lowercase letters, digits, underscore and dash.
Args:
node (str): node name for "submitter_id"
args (dict): additional arguments to add to "submitter_id"
Returns:
str: generated "submitter_id" | covid19-etl/utils/format_helper.py | format_submitter_id | uc-cdis/covid19-tools | 2 | python | def format_submitter_id(node, args):
'\n Generates "submitter_id" for node with additional identificator values.\n Resulting "submitter_id" only contains lowercase letters, digits, underscore and dash.\n\n Args:\n node (str): node name for "submitter_id"\n args (dict): additional arguments to add to "submitter_id"\n\n Returns:\n str: generated "submitter_id"\n '
submitter_id = node
for v in args.values():
if v:
submitter_id += '_{}'.format(v)
submitter_id = submitter_id.lower()
submitter_id = re.sub('[^a-z0-9-_]+', '-', submitter_id)
return submitter_id.strip('-') | def format_submitter_id(node, args):
'\n Generates "submitter_id" for node with additional identificator values.\n Resulting "submitter_id" only contains lowercase letters, digits, underscore and dash.\n\n Args:\n node (str): node name for "submitter_id"\n args (dict): additional arguments to add to "submitter_id"\n\n Returns:\n str: generated "submitter_id"\n '
submitter_id = node
for v in args.values():
if v:
submitter_id += '_{}'.format(v)
submitter_id = submitter_id.lower()
submitter_id = re.sub('[^a-z0-9-_]+', '-', submitter_id)
return submitter_id.strip('-')<|docstring|>Generates "submitter_id" for node with additional identificator values.
Resulting "submitter_id" only contains lowercase letters, digits, underscore and dash.
Args:
node (str): node name for "submitter_id"
args (dict): additional arguments to add to "submitter_id"
Returns:
str: generated "submitter_id"<|endoftext|> |
ef1a92ed7359445f07c31a6ae92d62174fa0cd788cfac41ed6d733ab675eb2c1 | def derived_submitter_id(submitter_id, original_node, derived_node, args):
'\n Derive "submitter_id" for other node.\n\n Args:\n submitter_id (str): "submitter_id" to derive from\n original_node (str): name of original node\n derived_node (str): name of derived node\n args (dict): additional arguments to add to "derived_submitter_id"\n\n Returns:\n str: generated "derived_submitter_id"\n '
derived_submitter_id = submitter_id.replace(original_node, derived_node)
for v in args.values():
derived_submitter_id += '_{}'.format(v)
return derived_submitter_id | Derive "submitter_id" for other node.
Args:
submitter_id (str): "submitter_id" to derive from
original_node (str): name of original node
derived_node (str): name of derived node
args (dict): additional arguments to add to "derived_submitter_id"
Returns:
str: generated "derived_submitter_id" | covid19-etl/utils/format_helper.py | derived_submitter_id | uc-cdis/covid19-tools | 2 | python | def derived_submitter_id(submitter_id, original_node, derived_node, args):
'\n Derive "submitter_id" for other node.\n\n Args:\n submitter_id (str): "submitter_id" to derive from\n original_node (str): name of original node\n derived_node (str): name of derived node\n args (dict): additional arguments to add to "derived_submitter_id"\n\n Returns:\n str: generated "derived_submitter_id"\n '
derived_submitter_id = submitter_id.replace(original_node, derived_node)
for v in args.values():
derived_submitter_id += '_{}'.format(v)
return derived_submitter_id | def derived_submitter_id(submitter_id, original_node, derived_node, args):
'\n Derive "submitter_id" for other node.\n\n Args:\n submitter_id (str): "submitter_id" to derive from\n original_node (str): name of original node\n derived_node (str): name of derived node\n args (dict): additional arguments to add to "derived_submitter_id"\n\n Returns:\n str: generated "derived_submitter_id"\n '
derived_submitter_id = submitter_id.replace(original_node, derived_node)
for v in args.values():
derived_submitter_id += '_{}'.format(v)
return derived_submitter_id<|docstring|>Derive "submitter_id" for other node.
Args:
submitter_id (str): "submitter_id" to derive from
original_node (str): name of original node
derived_node (str): name of derived node
args (dict): additional arguments to add to "derived_submitter_id"
Returns:
str: generated "derived_submitter_id"<|endoftext|> |
a6d018b0d2a770c6b832faedbbd942a0811cb62102a35538e662da26c9dbbbd5 | def idph_get_date(date_json):
'\n Get date from IDPH JSON\n\n Args:\n date_json (dict): JSON date with "year", "month", "date" fields\n\n Returns:\n str: datetime in "%Y-%m-%d" format\n '
date = datetime.date(**date_json)
return date.strftime('%Y-%m-%d') | Get date from IDPH JSON
Args:
date_json (dict): JSON date with "year", "month", "date" fields
Returns:
str: datetime in "%Y-%m-%d" format | covid19-etl/utils/format_helper.py | idph_get_date | uc-cdis/covid19-tools | 2 | python | def idph_get_date(date_json):
'\n Get date from IDPH JSON\n\n Args:\n date_json (dict): JSON date with "year", "month", "date" fields\n\n Returns:\n str: datetime in "%Y-%m-%d" format\n '
date = datetime.date(**date_json)
return date.strftime('%Y-%m-%d') | def idph_get_date(date_json):
'\n Get date from IDPH JSON\n\n Args:\n date_json (dict): JSON date with "year", "month", "date" fields\n\n Returns:\n str: datetime in "%Y-%m-%d" format\n '
date = datetime.date(**date_json)
return date.strftime('%Y-%m-%d')<|docstring|>Get date from IDPH JSON
Args:
date_json (dict): JSON date with "year", "month", "date" fields
Returns:
str: datetime in "%Y-%m-%d" format<|endoftext|> |
f6d0fcd870212350a665d8680ffe7cc7aea4e0769051c27258e2e2d62fe74146 | def idph_last_reported_date(utilization_records):
'\n Fetches the "ReportDate" value from the last record of the utilization array\n\n Args:\n utilization_records (list) : List of all historical hospital utilization records\n\n Returns:\n str: last reported date of the data in "%Y-%m-%d" format\n '
return remove_time_from_date_time(utilization_records[(- 1)]['ReportDate']) | Fetches the "ReportDate" value from the last record of the utilization array
Args:
utilization_records (list) : List of all historical hospital utilization records
Returns:
str: last reported date of the data in "%Y-%m-%d" format | covid19-etl/utils/format_helper.py | idph_last_reported_date | uc-cdis/covid19-tools | 2 | python | def idph_last_reported_date(utilization_records):
'\n Fetches the "ReportDate" value from the last record of the utilization array\n\n Args:\n utilization_records (list) : List of all historical hospital utilization records\n\n Returns:\n str: last reported date of the data in "%Y-%m-%d" format\n '
return remove_time_from_date_time(utilization_records[(- 1)]['ReportDate']) | def idph_last_reported_date(utilization_records):
'\n Fetches the "ReportDate" value from the last record of the utilization array\n\n Args:\n utilization_records (list) : List of all historical hospital utilization records\n\n Returns:\n str: last reported date of the data in "%Y-%m-%d" format\n '
return remove_time_from_date_time(utilization_records[(- 1)]['ReportDate'])<|docstring|>Fetches the "ReportDate" value from the last record of the utilization array
Args:
utilization_records (list) : List of all historical hospital utilization records
Returns:
str: last reported date of the data in "%Y-%m-%d" format<|endoftext|> |
c81961d68d820dd5a5944488f1ac56955d7204b5e5d2a86dce14370930e59aad | def get_date_from_str(date_str):
"\n Receives a date string in %Y-%m-%d format and returns a 'datetime.date' object\n "
return datetime.datetime.strptime(remove_time_from_date_time(date_str), '%Y-%m-%d').date() | Receives a date string in %Y-%m-%d format and returns a 'datetime.date' object | covid19-etl/utils/format_helper.py | get_date_from_str | uc-cdis/covid19-tools | 2 | python | def get_date_from_str(date_str):
"\n \n "
return datetime.datetime.strptime(remove_time_from_date_time(date_str), '%Y-%m-%d').date() | def get_date_from_str(date_str):
"\n \n "
return datetime.datetime.strptime(remove_time_from_date_time(date_str), '%Y-%m-%d').date()<|docstring|>Receives a date string in %Y-%m-%d format and returns a 'datetime.date' object<|endoftext|> |
5b6b7fd834d5fb369ab7ffaed3d6efdd04fddfb4507e13cc66169cb22affb1b0 | def __init__(self, file_name='data.csv', transport=True):
'Computes the aircraft center of gravity at operational empty weight'
self.data = load_data(file_name)
if transport:
self.factors = {'wing': 12.2, 'fuselage': 6.8, 'horizontal_tail': 9.8, 'vertical_tail': 9.8, 'nose_gear': 0.009, 'main_gear': 0.048, 'power_plant': 1.4, 'systems': 0.1}
else:
self.factors = {'wing': 49, 'fuselage': 24, 'horizontal_tail': 27, 'vertical_tail': 27, 'nose_gear': 0.006, 'main_gear': 0.037, 'power_plant': 1.3, 'systems': 0.17}
self.get_cr()
self.areas = self.get_areas()
self.cgs = self.components_cg()
self.mass = self.components_mass()
self.cg = self.aircraft_cg() | Computes the aircraft center of gravity at operational empty weight | aircraft/cg_calculation.py | __init__ | iamlucassantos/tutorial-systems-engineering | 1 | python | def __init__(self, file_name='data.csv', transport=True):
self.data = load_data(file_name)
if transport:
self.factors = {'wing': 12.2, 'fuselage': 6.8, 'horizontal_tail': 9.8, 'vertical_tail': 9.8, 'nose_gear': 0.009, 'main_gear': 0.048, 'power_plant': 1.4, 'systems': 0.1}
else:
self.factors = {'wing': 49, 'fuselage': 24, 'horizontal_tail': 27, 'vertical_tail': 27, 'nose_gear': 0.006, 'main_gear': 0.037, 'power_plant': 1.3, 'systems': 0.17}
self.get_cr()
self.areas = self.get_areas()
self.cgs = self.components_cg()
self.mass = self.components_mass()
self.cg = self.aircraft_cg() | def __init__(self, file_name='data.csv', transport=True):
self.data = load_data(file_name)
if transport:
self.factors = {'wing': 12.2, 'fuselage': 6.8, 'horizontal_tail': 9.8, 'vertical_tail': 9.8, 'nose_gear': 0.009, 'main_gear': 0.048, 'power_plant': 1.4, 'systems': 0.1}
else:
self.factors = {'wing': 49, 'fuselage': 24, 'horizontal_tail': 27, 'vertical_tail': 27, 'nose_gear': 0.006, 'main_gear': 0.037, 'power_plant': 1.3, 'systems': 0.17}
self.get_cr()
self.areas = self.get_areas()
self.cgs = self.components_cg()
self.mass = self.components_mass()
self.cg = self.aircraft_cg()<|docstring|>Computes the aircraft center of gravity at operational empty weight<|endoftext|> |
5403d4307d2846cbb20192013b25797aa958b581ad655286da2a818b169cd3ad | def get_cr(self):
'Computes the chord length at the rooot for wing, vertica tail and horizontal tail'
self.cr = ((2 * self.data['S']) / ((self.data['taper'] + 1) * self.data['b']))
self.cr_h = ((2 * self.data['S_h']) / ((self.data['taper_h'] + 1) * self.data['b_h']))
self.cr_v = ((2 * self.data['S_v']) / (((self.data['taper_v'] + 1) * self.data['b_half_v']) * 2)) | Computes the chord length at the rooot for wing, vertica tail and horizontal tail | aircraft/cg_calculation.py | get_cr | iamlucassantos/tutorial-systems-engineering | 1 | python | def get_cr(self):
self.cr = ((2 * self.data['S']) / ((self.data['taper'] + 1) * self.data['b']))
self.cr_h = ((2 * self.data['S_h']) / ((self.data['taper_h'] + 1) * self.data['b_h']))
self.cr_v = ((2 * self.data['S_v']) / (((self.data['taper_v'] + 1) * self.data['b_half_v']) * 2)) | def get_cr(self):
self.cr = ((2 * self.data['S']) / ((self.data['taper'] + 1) * self.data['b']))
self.cr_h = ((2 * self.data['S_h']) / ((self.data['taper_h'] + 1) * self.data['b_h']))
self.cr_v = ((2 * self.data['S_v']) / (((self.data['taper_v'] + 1) * self.data['b_half_v']) * 2))<|docstring|>Computes the chord length at the rooot for wing, vertica tail and horizontal tail<|endoftext|> |
e9ec2c67176623b221a25fdd91732c785427c4276cb124a60e1cdb4fe0c1c37c | def get_areas(self):
'Returns the areas of each group to estimate their mass'
areas = {}
S = self.data['S']
d_fus = self.data['l_h']
b = self.data['b']
(chord_fuselage, _) = self.chord_at_pctg((d_fus / b), surface='w')
area_w = (S - (d_fus * chord_fuselage))
areas['wing'] = area_w
area_v = self.data['S_v']
areas['vertical_tail'] = area_v
area_h = self.data['S_h']
areas['horizontal_tail'] = area_h
l_fus = self.data['l_f']
d_fus = self.data['l_h']
area_f = ((np.pi * d_fus) * l_fus)
areas['fuselage'] = area_f
areas['power_plant'] = self.data['ME']
areas['systems'] = self.data['MTOW']
return areas | Returns the areas of each group to estimate their mass | aircraft/cg_calculation.py | get_areas | iamlucassantos/tutorial-systems-engineering | 1 | python | def get_areas(self):
areas = {}
S = self.data['S']
d_fus = self.data['l_h']
b = self.data['b']
(chord_fuselage, _) = self.chord_at_pctg((d_fus / b), surface='w')
area_w = (S - (d_fus * chord_fuselage))
areas['wing'] = area_w
area_v = self.data['S_v']
areas['vertical_tail'] = area_v
area_h = self.data['S_h']
areas['horizontal_tail'] = area_h
l_fus = self.data['l_f']
d_fus = self.data['l_h']
area_f = ((np.pi * d_fus) * l_fus)
areas['fuselage'] = area_f
areas['power_plant'] = self.data['ME']
areas['systems'] = self.data['MTOW']
return areas | def get_areas(self):
areas = {}
S = self.data['S']
d_fus = self.data['l_h']
b = self.data['b']
(chord_fuselage, _) = self.chord_at_pctg((d_fus / b), surface='w')
area_w = (S - (d_fus * chord_fuselage))
areas['wing'] = area_w
area_v = self.data['S_v']
areas['vertical_tail'] = area_v
area_h = self.data['S_h']
areas['horizontal_tail'] = area_h
l_fus = self.data['l_f']
d_fus = self.data['l_h']
area_f = ((np.pi * d_fus) * l_fus)
areas['fuselage'] = area_f
areas['power_plant'] = self.data['ME']
areas['systems'] = self.data['MTOW']
return areas<|docstring|>Returns the areas of each group to estimate their mass<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.