repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
39
1.84M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
section_term_radial_distances
def section_term_radial_distances(neurites, neurite_type=NeuriteType.all, origin=None): '''Get the radial distances of the termination sections for a collection of neurites''' return section_radial_distances(neurites, neurite_type=neurite_type, origin=origin, iterator_type=Tree.ileaf)
python
def section_term_radial_distances(neurites, neurite_type=NeuriteType.all, origin=None): return section_radial_distances(neurites, neurite_type=neurite_type, origin=origin, iterator_type=Tree.ileaf)
[ "def", "section_term_radial_distances", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ",", "origin", "=", "None", ")", ":", "return", "section_radial_distances", "(", "neurites", ",", "neurite_type", "=", "neurite_type", ",", "origin", "=", "origin", ",", "iterator_type", "=", "Tree", ".", "ileaf", ")" ]
Get the radial distances of the termination sections for a collection of neurites
[ "Get", "the", "radial", "distances", "of", "the", "termination", "sections", "for", "a", "collection", "of", "neurites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L336-L339
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
section_bif_radial_distances
def section_bif_radial_distances(neurites, neurite_type=NeuriteType.all, origin=None): '''Get the radial distances of the bifurcation sections for a collection of neurites''' return section_radial_distances(neurites, neurite_type=neurite_type, origin=origin, iterator_type=Tree.ibifurcation_point)
python
def section_bif_radial_distances(neurites, neurite_type=NeuriteType.all, origin=None): return section_radial_distances(neurites, neurite_type=neurite_type, origin=origin, iterator_type=Tree.ibifurcation_point)
[ "def", "section_bif_radial_distances", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ",", "origin", "=", "None", ")", ":", "return", "section_radial_distances", "(", "neurites", ",", "neurite_type", "=", "neurite_type", ",", "origin", "=", "origin", ",", "iterator_type", "=", "Tree", ".", "ibifurcation_point", ")" ]
Get the radial distances of the bifurcation sections for a collection of neurites
[ "Get", "the", "radial", "distances", "of", "the", "bifurcation", "sections", "for", "a", "collection", "of", "neurites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L342-L345
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
number_of_sections_per_neurite
def number_of_sections_per_neurite(neurites, neurite_type=NeuriteType.all): '''Get the number of sections per neurite in a collection of neurites''' return list(sum(1 for _ in n.iter_sections()) for n in iter_neurites(neurites, filt=is_type(neurite_type)))
python
def number_of_sections_per_neurite(neurites, neurite_type=NeuriteType.all): return list(sum(1 for _ in n.iter_sections()) for n in iter_neurites(neurites, filt=is_type(neurite_type)))
[ "def", "number_of_sections_per_neurite", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "return", "list", "(", "sum", "(", "1", "for", "_", "in", "n", ".", "iter_sections", "(", ")", ")", "for", "n", "in", "iter_neurites", "(", "neurites", ",", "filt", "=", "is_type", "(", "neurite_type", ")", ")", ")" ]
Get the number of sections per neurite in a collection of neurites
[ "Get", "the", "number", "of", "sections", "per", "neurite", "in", "a", "collection", "of", "neurites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L348-L351
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
total_length_per_neurite
def total_length_per_neurite(neurites, neurite_type=NeuriteType.all): '''Get the path length per neurite in a collection''' return list(sum(s.length for s in n.iter_sections()) for n in iter_neurites(neurites, filt=is_type(neurite_type)))
python
def total_length_per_neurite(neurites, neurite_type=NeuriteType.all): return list(sum(s.length for s in n.iter_sections()) for n in iter_neurites(neurites, filt=is_type(neurite_type)))
[ "def", "total_length_per_neurite", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "return", "list", "(", "sum", "(", "s", ".", "length", "for", "s", "in", "n", ".", "iter_sections", "(", ")", ")", "for", "n", "in", "iter_neurites", "(", "neurites", ",", "filt", "=", "is_type", "(", "neurite_type", ")", ")", ")" ]
Get the path length per neurite in a collection
[ "Get", "the", "path", "length", "per", "neurite", "in", "a", "collection" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L354-L357
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
terminal_path_lengths_per_neurite
def terminal_path_lengths_per_neurite(neurites, neurite_type=NeuriteType.all): '''Get the path lengths to each terminal point per neurite in a collection''' return list(sectionfunc.section_path_length(s) for n in iter_neurites(neurites, filt=is_type(neurite_type)) for s in iter_sections(n, iterator_type=Tree.ileaf))
python
def terminal_path_lengths_per_neurite(neurites, neurite_type=NeuriteType.all): return list(sectionfunc.section_path_length(s) for n in iter_neurites(neurites, filt=is_type(neurite_type)) for s in iter_sections(n, iterator_type=Tree.ileaf))
[ "def", "terminal_path_lengths_per_neurite", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "return", "list", "(", "sectionfunc", ".", "section_path_length", "(", "s", ")", "for", "n", "in", "iter_neurites", "(", "neurites", ",", "filt", "=", "is_type", "(", "neurite_type", ")", ")", "for", "s", "in", "iter_sections", "(", "n", ",", "iterator_type", "=", "Tree", ".", "ileaf", ")", ")" ]
Get the path lengths to each terminal point per neurite in a collection
[ "Get", "the", "path", "lengths", "to", "each", "terminal", "point", "per", "neurite", "in", "a", "collection" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L360-L364
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
total_volume_per_neurite
def total_volume_per_neurite(neurites, neurite_type=NeuriteType.all): '''Get the volume per neurite in a collection''' return list(sum(s.volume for s in n.iter_sections()) for n in iter_neurites(neurites, filt=is_type(neurite_type)))
python
def total_volume_per_neurite(neurites, neurite_type=NeuriteType.all): return list(sum(s.volume for s in n.iter_sections()) for n in iter_neurites(neurites, filt=is_type(neurite_type)))
[ "def", "total_volume_per_neurite", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "return", "list", "(", "sum", "(", "s", ".", "volume", "for", "s", "in", "n", ".", "iter_sections", "(", ")", ")", "for", "n", "in", "iter_neurites", "(", "neurites", ",", "filt", "=", "is_type", "(", "neurite_type", ")", ")", ")" ]
Get the volume per neurite in a collection
[ "Get", "the", "volume", "per", "neurite", "in", "a", "collection" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L367-L370
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
neurite_volume_density
def neurite_volume_density(neurites, neurite_type=NeuriteType.all): '''Get the volume density per neurite The volume density is defined as the ratio of the neurite volume and the volume of the neurite's enclosing convex hull ''' def vol_density(neurite): '''volume density of a single neurite''' return neurite.volume / convex_hull(neurite).volume return list(vol_density(n) for n in iter_neurites(neurites, filt=is_type(neurite_type)))
python
def neurite_volume_density(neurites, neurite_type=NeuriteType.all): def vol_density(neurite): return neurite.volume / convex_hull(neurite).volume return list(vol_density(n) for n in iter_neurites(neurites, filt=is_type(neurite_type)))
[ "def", "neurite_volume_density", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "def", "vol_density", "(", "neurite", ")", ":", "'''volume density of a single neurite'''", "return", "neurite", ".", "volume", "/", "convex_hull", "(", "neurite", ")", ".", "volume", "return", "list", "(", "vol_density", "(", "n", ")", "for", "n", "in", "iter_neurites", "(", "neurites", ",", "filt", "=", "is_type", "(", "neurite_type", ")", ")", ")" ]
Get the volume density per neurite The volume density is defined as the ratio of the neurite volume and the volume of the neurite's enclosing convex hull
[ "Get", "the", "volume", "density", "per", "neurite" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L373-L384
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
section_volumes
def section_volumes(neurites, neurite_type=NeuriteType.all): '''section volumes in a collection of neurites''' return map_sections(sectionfunc.section_volume, neurites, neurite_type=neurite_type)
python
def section_volumes(neurites, neurite_type=NeuriteType.all): return map_sections(sectionfunc.section_volume, neurites, neurite_type=neurite_type)
[ "def", "section_volumes", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "return", "map_sections", "(", "sectionfunc", ".", "section_volume", ",", "neurites", ",", "neurite_type", "=", "neurite_type", ")" ]
section volumes in a collection of neurites
[ "section", "volumes", "in", "a", "collection", "of", "neurites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L387-L389
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
section_areas
def section_areas(neurites, neurite_type=NeuriteType.all): '''section areas in a collection of neurites''' return map_sections(sectionfunc.section_area, neurites, neurite_type=neurite_type)
python
def section_areas(neurites, neurite_type=NeuriteType.all): return map_sections(sectionfunc.section_area, neurites, neurite_type=neurite_type)
[ "def", "section_areas", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "return", "map_sections", "(", "sectionfunc", ".", "section_area", ",", "neurites", ",", "neurite_type", "=", "neurite_type", ")" ]
section areas in a collection of neurites
[ "section", "areas", "in", "a", "collection", "of", "neurites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L392-L394
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
section_tortuosity
def section_tortuosity(neurites, neurite_type=NeuriteType.all): '''section tortuosities in a collection of neurites''' return map_sections(sectionfunc.section_tortuosity, neurites, neurite_type=neurite_type)
python
def section_tortuosity(neurites, neurite_type=NeuriteType.all): return map_sections(sectionfunc.section_tortuosity, neurites, neurite_type=neurite_type)
[ "def", "section_tortuosity", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "return", "map_sections", "(", "sectionfunc", ".", "section_tortuosity", ",", "neurites", ",", "neurite_type", "=", "neurite_type", ")" ]
section tortuosities in a collection of neurites
[ "section", "tortuosities", "in", "a", "collection", "of", "neurites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L397-L399
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
section_end_distances
def section_end_distances(neurites, neurite_type=NeuriteType.all): '''section end to end distances in a collection of neurites''' return map_sections(sectionfunc.section_end_distance, neurites, neurite_type=neurite_type)
python
def section_end_distances(neurites, neurite_type=NeuriteType.all): return map_sections(sectionfunc.section_end_distance, neurites, neurite_type=neurite_type)
[ "def", "section_end_distances", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "return", "map_sections", "(", "sectionfunc", ".", "section_end_distance", ",", "neurites", ",", "neurite_type", "=", "neurite_type", ")" ]
section end to end distances in a collection of neurites
[ "section", "end", "to", "end", "distances", "in", "a", "collection", "of", "neurites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L402-L404
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
principal_direction_extents
def principal_direction_extents(neurites, neurite_type=NeuriteType.all, direction=0): '''Principal direction extent of neurites in neurons''' def _pde(neurite): '''Get the PDE of a single neurite''' # Get the X, Y,Z coordinates of the points in each section points = neurite.points[:, :3] return morphmath.principal_direction_extent(points)[direction] return map(_pde, iter_neurites(neurites, filt=is_type(neurite_type)))
python
def principal_direction_extents(neurites, neurite_type=NeuriteType.all, direction=0): def _pde(neurite): points = neurite.points[:, :3] return morphmath.principal_direction_extent(points)[direction] return map(_pde, iter_neurites(neurites, filt=is_type(neurite_type)))
[ "def", "principal_direction_extents", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ",", "direction", "=", "0", ")", ":", "def", "_pde", "(", "neurite", ")", ":", "'''Get the PDE of a single neurite'''", "# Get the X, Y,Z coordinates of the points in each section", "points", "=", "neurite", ".", "points", "[", ":", ",", ":", "3", "]", "return", "morphmath", ".", "principal_direction_extent", "(", "points", ")", "[", "direction", "]", "return", "map", "(", "_pde", ",", "iter_neurites", "(", "neurites", ",", "filt", "=", "is_type", "(", "neurite_type", ")", ")", ")" ]
Principal direction extent of neurites in neurons
[ "Principal", "direction", "extent", "of", "neurites", "in", "neurons" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L407-L415
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
section_strahler_orders
def section_strahler_orders(neurites, neurite_type=NeuriteType.all): '''Inter-segment opening angles in a section''' return map_sections(sectionfunc.strahler_order, neurites, neurite_type)
python
def section_strahler_orders(neurites, neurite_type=NeuriteType.all): return map_sections(sectionfunc.strahler_order, neurites, neurite_type)
[ "def", "section_strahler_orders", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "return", "map_sections", "(", "sectionfunc", ".", "strahler_order", ",", "neurites", ",", "neurite_type", ")" ]
Inter-segment opening angles in a section
[ "Inter", "-", "segment", "opening", "angles", "in", "a", "section" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L418-L420
BlueBrain/NeuroM
neurom/view/view.py
_plane2col
def _plane2col(plane): '''take a string like 'xy', and return the indices from COLS.*''' planes = ('xy', 'yx', 'xz', 'zx', 'yz', 'zy') assert plane in planes, 'No such plane found! Please select one of: ' + str(planes) return (getattr(COLS, plane[0].capitalize()), getattr(COLS, plane[1].capitalize()), )
python
def _plane2col(plane): planes = ('xy', 'yx', 'xz', 'zx', 'yz', 'zy') assert plane in planes, 'No such plane found! Please select one of: ' + str(planes) return (getattr(COLS, plane[0].capitalize()), getattr(COLS, plane[1].capitalize()), )
[ "def", "_plane2col", "(", "plane", ")", ":", "planes", "=", "(", "'xy'", ",", "'yx'", ",", "'xz'", ",", "'zx'", ",", "'yz'", ",", "'zy'", ")", "assert", "plane", "in", "planes", ",", "'No such plane found! Please select one of: '", "+", "str", "(", "planes", ")", "return", "(", "getattr", "(", "COLS", ",", "plane", "[", "0", "]", ".", "capitalize", "(", ")", ")", ",", "getattr", "(", "COLS", ",", "plane", "[", "1", "]", ".", "capitalize", "(", ")", ")", ",", ")" ]
take a string like 'xy', and return the indices from COLS.*
[ "take", "a", "string", "like", "xy", "and", "return", "the", "indices", "from", "COLS", ".", "*" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L57-L62
BlueBrain/NeuroM
neurom/view/view.py
_get_linewidth
def _get_linewidth(tree, linewidth, diameter_scale): '''calculate the desired linewidth based on tree contents If diameter_scale exists, it is used to scale the diameter of each of the segments in the tree If diameter_scale is None, the linewidth is used. ''' if diameter_scale is not None and tree: linewidth = [2 * segment_radius(s) * diameter_scale for s in iter_segments(tree)] return linewidth
python
def _get_linewidth(tree, linewidth, diameter_scale): if diameter_scale is not None and tree: linewidth = [2 * segment_radius(s) * diameter_scale for s in iter_segments(tree)] return linewidth
[ "def", "_get_linewidth", "(", "tree", ",", "linewidth", ",", "diameter_scale", ")", ":", "if", "diameter_scale", "is", "not", "None", "and", "tree", ":", "linewidth", "=", "[", "2", "*", "segment_radius", "(", "s", ")", "*", "diameter_scale", "for", "s", "in", "iter_segments", "(", "tree", ")", "]", "return", "linewidth" ]
calculate the desired linewidth based on tree contents If diameter_scale exists, it is used to scale the diameter of each of the segments in the tree If diameter_scale is None, the linewidth is used.
[ "calculate", "the", "desired", "linewidth", "based", "on", "tree", "contents" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L65-L75
BlueBrain/NeuroM
neurom/view/view.py
plot_tree
def plot_tree(ax, tree, plane='xy', diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): '''Plots a 2d figure of the tree's segments Args: ax(matplotlib axes): on what to plot tree(neurom.core.Tree or neurom.core.Neurite): plotted tree plane(str): Any pair of 'xyz' diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values Note: If the tree contains one single point the plot will be empty since no segments can be constructed. ''' plane0, plane1 = _plane2col(plane) segs = [((s[0][plane0], s[0][plane1]), (s[1][plane0], s[1][plane1])) for s in iter_segments(tree)] linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth) color = _get_color(color, tree.type) collection = LineCollection(segs, color=color, linewidth=linewidth, alpha=alpha) ax.add_collection(collection)
python
def plot_tree(ax, tree, plane='xy', diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): plane0, plane1 = _plane2col(plane) segs = [((s[0][plane0], s[0][plane1]), (s[1][plane0], s[1][plane1])) for s in iter_segments(tree)] linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth) color = _get_color(color, tree.type) collection = LineCollection(segs, color=color, linewidth=linewidth, alpha=alpha) ax.add_collection(collection)
[ "def", "plot_tree", "(", "ax", ",", "tree", ",", "plane", "=", "'xy'", ",", "diameter_scale", "=", "_DIAMETER_SCALE", ",", "linewidth", "=", "_LINEWIDTH", ",", "color", "=", "None", ",", "alpha", "=", "_ALPHA", ")", ":", "plane0", ",", "plane1", "=", "_plane2col", "(", "plane", ")", "segs", "=", "[", "(", "(", "s", "[", "0", "]", "[", "plane0", "]", ",", "s", "[", "0", "]", "[", "plane1", "]", ")", ",", "(", "s", "[", "1", "]", "[", "plane0", "]", ",", "s", "[", "1", "]", "[", "plane1", "]", ")", ")", "for", "s", "in", "iter_segments", "(", "tree", ")", "]", "linewidth", "=", "_get_linewidth", "(", "tree", ",", "diameter_scale", "=", "diameter_scale", ",", "linewidth", "=", "linewidth", ")", "color", "=", "_get_color", "(", "color", ",", "tree", ".", "type", ")", "collection", "=", "LineCollection", "(", "segs", ",", "color", "=", "color", ",", "linewidth", "=", "linewidth", ",", "alpha", "=", "alpha", ")", "ax", ".", "add_collection", "(", "collection", ")" ]
Plots a 2d figure of the tree's segments Args: ax(matplotlib axes): on what to plot tree(neurom.core.Tree or neurom.core.Neurite): plotted tree plane(str): Any pair of 'xyz' diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values Note: If the tree contains one single point the plot will be empty since no segments can be constructed.
[ "Plots", "a", "2d", "figure", "of", "the", "tree", "s", "segments" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L85-L112
BlueBrain/NeuroM
neurom/view/view.py
plot_soma
def plot_soma(ax, soma, plane='xy', soma_outline=True, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): '''Generates a 2d figure of the soma. Args: ax(matplotlib axes): on what to plot soma(neurom.core.Soma): plotted soma plane(str): Any pair of 'xyz' diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values ''' plane0, plane1 = _plane2col(plane) color = _get_color(color, tree_type=NeuriteType.soma) if isinstance(soma, SomaCylinders): plane0, plane1 = _plane2col(plane) for start, end in zip(soma.points, soma.points[1:]): common.project_cylinder_onto_2d(ax, (plane0, plane1), start=start[COLS.XYZ], end=end[COLS.XYZ], start_radius=start[COLS.R], end_radius=end[COLS.R], color=color, alpha=alpha) else: if soma_outline: ax.add_artist(Circle(soma.center[[plane0, plane1]], soma.radius, color=color, alpha=alpha)) else: plane0, plane1 = _plane2col(plane) points = [(p[plane0], p[plane1]) for p in soma.iter()] if points: points.append(points[0]) # close the loop ax.plot(points, color=color, alpha=alpha, linewidth=linewidth) ax.set_xlabel(plane[0]) ax.set_ylabel(plane[1]) bounding_box = geom.bounding_box(soma) ax.dataLim.update_from_data_xy(np.vstack(([bounding_box[0][plane0], bounding_box[0][plane1]], [bounding_box[1][plane0], bounding_box[1][plane1]])), ignore=False)
python
def plot_soma(ax, soma, plane='xy', soma_outline=True, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): plane0, plane1 = _plane2col(plane) color = _get_color(color, tree_type=NeuriteType.soma) if isinstance(soma, SomaCylinders): plane0, plane1 = _plane2col(plane) for start, end in zip(soma.points, soma.points[1:]): common.project_cylinder_onto_2d(ax, (plane0, plane1), start=start[COLS.XYZ], end=end[COLS.XYZ], start_radius=start[COLS.R], end_radius=end[COLS.R], color=color, alpha=alpha) else: if soma_outline: ax.add_artist(Circle(soma.center[[plane0, plane1]], soma.radius, color=color, alpha=alpha)) else: plane0, plane1 = _plane2col(plane) points = [(p[plane0], p[plane1]) for p in soma.iter()] if points: points.append(points[0]) ax.plot(points, color=color, alpha=alpha, linewidth=linewidth) ax.set_xlabel(plane[0]) ax.set_ylabel(plane[1]) bounding_box = geom.bounding_box(soma) ax.dataLim.update_from_data_xy(np.vstack(([bounding_box[0][plane0], bounding_box[0][plane1]], [bounding_box[1][plane0], bounding_box[1][plane1]])), ignore=False)
[ "def", "plot_soma", "(", "ax", ",", "soma", ",", "plane", "=", "'xy'", ",", "soma_outline", "=", "True", ",", "linewidth", "=", "_LINEWIDTH", ",", "color", "=", "None", ",", "alpha", "=", "_ALPHA", ")", ":", "plane0", ",", "plane1", "=", "_plane2col", "(", "plane", ")", "color", "=", "_get_color", "(", "color", ",", "tree_type", "=", "NeuriteType", ".", "soma", ")", "if", "isinstance", "(", "soma", ",", "SomaCylinders", ")", ":", "plane0", ",", "plane1", "=", "_plane2col", "(", "plane", ")", "for", "start", ",", "end", "in", "zip", "(", "soma", ".", "points", ",", "soma", ".", "points", "[", "1", ":", "]", ")", ":", "common", ".", "project_cylinder_onto_2d", "(", "ax", ",", "(", "plane0", ",", "plane1", ")", ",", "start", "=", "start", "[", "COLS", ".", "XYZ", "]", ",", "end", "=", "end", "[", "COLS", ".", "XYZ", "]", ",", "start_radius", "=", "start", "[", "COLS", ".", "R", "]", ",", "end_radius", "=", "end", "[", "COLS", ".", "R", "]", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")", "else", ":", "if", "soma_outline", ":", "ax", ".", "add_artist", "(", "Circle", "(", "soma", ".", "center", "[", "[", "plane0", ",", "plane1", "]", "]", ",", "soma", ".", "radius", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")", ")", "else", ":", "plane0", ",", "plane1", "=", "_plane2col", "(", "plane", ")", "points", "=", "[", "(", "p", "[", "plane0", "]", ",", "p", "[", "plane1", "]", ")", "for", "p", "in", "soma", ".", "iter", "(", ")", "]", "if", "points", ":", "points", ".", "append", "(", "points", "[", "0", "]", ")", "# close the loop", "ax", ".", "plot", "(", "points", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ",", "linewidth", "=", "linewidth", ")", "ax", ".", "set_xlabel", "(", "plane", "[", "0", "]", ")", "ax", ".", "set_ylabel", "(", "plane", "[", "1", "]", ")", "bounding_box", "=", "geom", ".", "bounding_box", "(", "soma", ")", "ax", ".", "dataLim", ".", "update_from_data_xy", "(", "np", ".", "vstack", "(", "(", "[", "bounding_box", "[", "0", "]", "[", "plane0", "]", ",", "bounding_box", "[", "0", "]", "[", "plane1", "]", "]", ",", "[", "bounding_box", "[", "1", "]", "[", "plane0", "]", ",", "bounding_box", "[", "1", "]", "[", "plane1", "]", "]", ")", ")", ",", "ignore", "=", "False", ")" ]
Generates a 2d figure of the soma. Args: ax(matplotlib axes): on what to plot soma(neurom.core.Soma): plotted soma plane(str): Any pair of 'xyz' diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
[ "Generates", "a", "2d", "figure", "of", "the", "soma", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L115-L158
BlueBrain/NeuroM
neurom/view/view.py
plot_neuron
def plot_neuron(ax, nrn, neurite_type=NeuriteType.all, plane='xy', soma_outline=True, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): '''Plots a 2D figure of the neuron, that contains a soma and the neurites Args: ax(matplotlib axes): on what to plot neurite_type(NeuriteType): an optional filter on the neurite type nrn(neuron): neuron to be plotted soma_outline(bool): should the soma be drawn as an outline plane(str): Any pair of 'xyz' diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values ''' plot_soma(ax, nrn.soma, plane=plane, soma_outline=soma_outline, linewidth=linewidth, color=color, alpha=alpha) for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)): plot_tree(ax, neurite, plane=plane, diameter_scale=diameter_scale, linewidth=linewidth, color=color, alpha=alpha) ax.set_title(nrn.name) ax.set_xlabel(plane[0]) ax.set_ylabel(plane[1])
python
def plot_neuron(ax, nrn, neurite_type=NeuriteType.all, plane='xy', soma_outline=True, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): plot_soma(ax, nrn.soma, plane=plane, soma_outline=soma_outline, linewidth=linewidth, color=color, alpha=alpha) for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)): plot_tree(ax, neurite, plane=plane, diameter_scale=diameter_scale, linewidth=linewidth, color=color, alpha=alpha) ax.set_title(nrn.name) ax.set_xlabel(plane[0]) ax.set_ylabel(plane[1])
[ "def", "plot_neuron", "(", "ax", ",", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "all", ",", "plane", "=", "'xy'", ",", "soma_outline", "=", "True", ",", "diameter_scale", "=", "_DIAMETER_SCALE", ",", "linewidth", "=", "_LINEWIDTH", ",", "color", "=", "None", ",", "alpha", "=", "_ALPHA", ")", ":", "plot_soma", "(", "ax", ",", "nrn", ".", "soma", ",", "plane", "=", "plane", ",", "soma_outline", "=", "soma_outline", ",", "linewidth", "=", "linewidth", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")", "for", "neurite", "in", "iter_neurites", "(", "nrn", ",", "filt", "=", "tree_type_checker", "(", "neurite_type", ")", ")", ":", "plot_tree", "(", "ax", ",", "neurite", ",", "plane", "=", "plane", ",", "diameter_scale", "=", "diameter_scale", ",", "linewidth", "=", "linewidth", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")", "ax", ".", "set_title", "(", "nrn", ".", "name", ")", "ax", ".", "set_xlabel", "(", "plane", "[", "0", "]", ")", "ax", ".", "set_ylabel", "(", "plane", "[", "1", "]", ")" ]
Plots a 2D figure of the neuron, that contains a soma and the neurites Args: ax(matplotlib axes): on what to plot neurite_type(NeuriteType): an optional filter on the neurite type nrn(neuron): neuron to be plotted soma_outline(bool): should the soma be drawn as an outline plane(str): Any pair of 'xyz' diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
[ "Plots", "a", "2D", "figure", "of", "the", "neuron", "that", "contains", "a", "soma", "and", "the", "neurites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L162-L191
BlueBrain/NeuroM
neurom/view/view.py
_update_3d_datalim
def _update_3d_datalim(ax, obj): '''unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually''' min_bounding_box, max_bounding_box = geom.bounding_box(obj) xy_bounds = np.vstack((min_bounding_box[:COLS.Z], max_bounding_box[:COLS.Z])) ax.xy_dataLim.update_from_data_xy(xy_bounds, ignore=False) z_bounds = np.vstack(((min_bounding_box[COLS.Z], min_bounding_box[COLS.Z]), (max_bounding_box[COLS.Z], max_bounding_box[COLS.Z]))) ax.zz_dataLim.update_from_data_xy(z_bounds, ignore=False)
python
def _update_3d_datalim(ax, obj): min_bounding_box, max_bounding_box = geom.bounding_box(obj) xy_bounds = np.vstack((min_bounding_box[:COLS.Z], max_bounding_box[:COLS.Z])) ax.xy_dataLim.update_from_data_xy(xy_bounds, ignore=False) z_bounds = np.vstack(((min_bounding_box[COLS.Z], min_bounding_box[COLS.Z]), (max_bounding_box[COLS.Z], max_bounding_box[COLS.Z]))) ax.zz_dataLim.update_from_data_xy(z_bounds, ignore=False)
[ "def", "_update_3d_datalim", "(", "ax", ",", "obj", ")", ":", "min_bounding_box", ",", "max_bounding_box", "=", "geom", ".", "bounding_box", "(", "obj", ")", "xy_bounds", "=", "np", ".", "vstack", "(", "(", "min_bounding_box", "[", ":", "COLS", ".", "Z", "]", ",", "max_bounding_box", "[", ":", "COLS", ".", "Z", "]", ")", ")", "ax", ".", "xy_dataLim", ".", "update_from_data_xy", "(", "xy_bounds", ",", "ignore", "=", "False", ")", "z_bounds", "=", "np", ".", "vstack", "(", "(", "(", "min_bounding_box", "[", "COLS", ".", "Z", "]", ",", "min_bounding_box", "[", "COLS", ".", "Z", "]", ")", ",", "(", "max_bounding_box", "[", "COLS", ".", "Z", "]", ",", "max_bounding_box", "[", "COLS", ".", "Z", "]", ")", ")", ")", "ax", ".", "zz_dataLim", ".", "update_from_data_xy", "(", "z_bounds", ",", "ignore", "=", "False", ")" ]
unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually
[ "unlike", "w", "/", "2d", "Axes", "the", "dataLim", "isn", "t", "set", "by", "collections", "so", "it", "has", "to", "be", "updated", "manually" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L194-L203
BlueBrain/NeuroM
neurom/view/view.py
plot_tree3d
def plot_tree3d(ax, tree, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): '''Generates a figure of the tree in 3d. If the tree contains one single point the plot will be empty \ since no segments can be constructed. Args: ax(matplotlib axes): on what to plot tree(neurom.core.Tree or neurom.core.Neurite): plotted tree diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values ''' segs = [(s[0][COLS.XYZ], s[1][COLS.XYZ]) for s in iter_segments(tree)] linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth) color = _get_color(color, tree.type) collection = Line3DCollection(segs, color=color, linewidth=linewidth, alpha=alpha) ax.add_collection3d(collection) _update_3d_datalim(ax, tree)
python
def plot_tree3d(ax, tree, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): segs = [(s[0][COLS.XYZ], s[1][COLS.XYZ]) for s in iter_segments(tree)] linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth) color = _get_color(color, tree.type) collection = Line3DCollection(segs, color=color, linewidth=linewidth, alpha=alpha) ax.add_collection3d(collection) _update_3d_datalim(ax, tree)
[ "def", "plot_tree3d", "(", "ax", ",", "tree", ",", "diameter_scale", "=", "_DIAMETER_SCALE", ",", "linewidth", "=", "_LINEWIDTH", ",", "color", "=", "None", ",", "alpha", "=", "_ALPHA", ")", ":", "segs", "=", "[", "(", "s", "[", "0", "]", "[", "COLS", ".", "XYZ", "]", ",", "s", "[", "1", "]", "[", "COLS", ".", "XYZ", "]", ")", "for", "s", "in", "iter_segments", "(", "tree", ")", "]", "linewidth", "=", "_get_linewidth", "(", "tree", ",", "diameter_scale", "=", "diameter_scale", ",", "linewidth", "=", "linewidth", ")", "color", "=", "_get_color", "(", "color", ",", "tree", ".", "type", ")", "collection", "=", "Line3DCollection", "(", "segs", ",", "color", "=", "color", ",", "linewidth", "=", "linewidth", ",", "alpha", "=", "alpha", ")", "ax", ".", "add_collection3d", "(", "collection", ")", "_update_3d_datalim", "(", "ax", ",", "tree", ")" ]
Generates a figure of the tree in 3d. If the tree contains one single point the plot will be empty \ since no segments can be constructed. Args: ax(matplotlib axes): on what to plot tree(neurom.core.Tree or neurom.core.Neurite): plotted tree diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
[ "Generates", "a", "figure", "of", "the", "tree", "in", "3d", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L206-L230
BlueBrain/NeuroM
neurom/view/view.py
plot_soma3d
def plot_soma3d(ax, soma, color=None, alpha=_ALPHA): '''Generates a 3d figure of the soma. Args: ax(matplotlib axes): on what to plot soma(neurom.core.Soma): plotted soma color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values ''' color = _get_color(color, tree_type=NeuriteType.soma) if isinstance(soma, SomaCylinders): for start, end in zip(soma.points, soma.points[1:]): common.plot_cylinder(ax, start=start[COLS.XYZ], end=end[COLS.XYZ], start_radius=start[COLS.R], end_radius=end[COLS.R], color=color, alpha=alpha) else: common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius, color=color, alpha=alpha) # unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually _update_3d_datalim(ax, soma)
python
def plot_soma3d(ax, soma, color=None, alpha=_ALPHA): color = _get_color(color, tree_type=NeuriteType.soma) if isinstance(soma, SomaCylinders): for start, end in zip(soma.points, soma.points[1:]): common.plot_cylinder(ax, start=start[COLS.XYZ], end=end[COLS.XYZ], start_radius=start[COLS.R], end_radius=end[COLS.R], color=color, alpha=alpha) else: common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius, color=color, alpha=alpha) _update_3d_datalim(ax, soma)
[ "def", "plot_soma3d", "(", "ax", ",", "soma", ",", "color", "=", "None", ",", "alpha", "=", "_ALPHA", ")", ":", "color", "=", "_get_color", "(", "color", ",", "tree_type", "=", "NeuriteType", ".", "soma", ")", "if", "isinstance", "(", "soma", ",", "SomaCylinders", ")", ":", "for", "start", ",", "end", "in", "zip", "(", "soma", ".", "points", ",", "soma", ".", "points", "[", "1", ":", "]", ")", ":", "common", ".", "plot_cylinder", "(", "ax", ",", "start", "=", "start", "[", "COLS", ".", "XYZ", "]", ",", "end", "=", "end", "[", "COLS", ".", "XYZ", "]", ",", "start_radius", "=", "start", "[", "COLS", ".", "R", "]", ",", "end_radius", "=", "end", "[", "COLS", ".", "R", "]", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")", "else", ":", "common", ".", "plot_sphere", "(", "ax", ",", "center", "=", "soma", ".", "center", "[", "COLS", ".", "XYZ", "]", ",", "radius", "=", "soma", ".", "radius", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")", "# unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually", "_update_3d_datalim", "(", "ax", ",", "soma", ")" ]
Generates a 3d figure of the soma. Args: ax(matplotlib axes): on what to plot soma(neurom.core.Soma): plotted soma color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
[ "Generates", "a", "3d", "figure", "of", "the", "soma", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L233-L255
BlueBrain/NeuroM
neurom/view/view.py
plot_neuron3d
def plot_neuron3d(ax, nrn, neurite_type=NeuriteType.all, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): ''' Generates a figure of the neuron, that contains a soma and a list of trees. Args: ax(matplotlib axes): on what to plot nrn(neuron): neuron to be plotted neurite_type(NeuriteType): an optional filter on the neurite type diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values ''' plot_soma3d(ax, nrn.soma, color=color, alpha=alpha) for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)): plot_tree3d(ax, neurite, diameter_scale=diameter_scale, linewidth=linewidth, color=color, alpha=alpha) ax.set_title(nrn.name)
python
def plot_neuron3d(ax, nrn, neurite_type=NeuriteType.all, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): plot_soma3d(ax, nrn.soma, color=color, alpha=alpha) for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)): plot_tree3d(ax, neurite, diameter_scale=diameter_scale, linewidth=linewidth, color=color, alpha=alpha) ax.set_title(nrn.name)
[ "def", "plot_neuron3d", "(", "ax", ",", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "all", ",", "diameter_scale", "=", "_DIAMETER_SCALE", ",", "linewidth", "=", "_LINEWIDTH", ",", "color", "=", "None", ",", "alpha", "=", "_ALPHA", ")", ":", "plot_soma3d", "(", "ax", ",", "nrn", ".", "soma", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")", "for", "neurite", "in", "iter_neurites", "(", "nrn", ",", "filt", "=", "tree_type_checker", "(", "neurite_type", ")", ")", ":", "plot_tree3d", "(", "ax", ",", "neurite", ",", "diameter_scale", "=", "diameter_scale", ",", "linewidth", "=", "linewidth", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")", "ax", ".", "set_title", "(", "nrn", ".", "name", ")" ]
Generates a figure of the neuron, that contains a soma and a list of trees. Args: ax(matplotlib axes): on what to plot nrn(neuron): neuron to be plotted neurite_type(NeuriteType): an optional filter on the neurite type diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
[ "Generates", "a", "figure", "of", "the", "neuron", "that", "contains", "a", "soma", "and", "a", "list", "of", "trees", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L258-L281
BlueBrain/NeuroM
neurom/view/view.py
_generate_collection
def _generate_collection(group, ax, ctype, colors): '''Render rectangle collection''' color = TREE_COLOR[ctype] # generate segment collection collection = PolyCollection(group, closed=False, antialiaseds=True, edgecolors='face', facecolors=color) # add it to the axes ax.add_collection(collection) # dummy plot for the legend if color not in colors: label = str(ctype).replace('NeuriteType.', '').replace('_', ' ').capitalize() ax.plot((0., 0.), (0., 0.), c=color, label=label) colors.add(color)
python
def _generate_collection(group, ax, ctype, colors): color = TREE_COLOR[ctype] collection = PolyCollection(group, closed=False, antialiaseds=True, edgecolors='face', facecolors=color) ax.add_collection(collection) if color not in colors: label = str(ctype).replace('NeuriteType.', '').replace('_', ' ').capitalize() ax.plot((0., 0.), (0., 0.), c=color, label=label) colors.add(color)
[ "def", "_generate_collection", "(", "group", ",", "ax", ",", "ctype", ",", "colors", ")", ":", "color", "=", "TREE_COLOR", "[", "ctype", "]", "# generate segment collection", "collection", "=", "PolyCollection", "(", "group", ",", "closed", "=", "False", ",", "antialiaseds", "=", "True", ",", "edgecolors", "=", "'face'", ",", "facecolors", "=", "color", ")", "# add it to the axes", "ax", ".", "add_collection", "(", "collection", ")", "# dummy plot for the legend", "if", "color", "not", "in", "colors", ":", "label", "=", "str", "(", "ctype", ")", ".", "replace", "(", "'NeuriteType.'", ",", "''", ")", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "capitalize", "(", ")", "ax", ".", "plot", "(", "(", "0.", ",", "0.", ")", ",", "(", "0.", ",", "0.", ")", ",", "c", "=", "color", ",", "label", "=", "label", ")", "colors", ".", "add", "(", "color", ")" ]
Render rectangle collection
[ "Render", "rectangle", "collection" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L284-L299
BlueBrain/NeuroM
neurom/view/view.py
_render_dendrogram
def _render_dendrogram(dnd, ax, displacement): '''Renders dendrogram''' # set of unique colors that reflect the set of types of the neurites colors = set() for n, (indices, ctype) in enumerate(zip(dnd.groups, dnd.types)): # slice rectangles array for the current neurite group = dnd.data[indices[0]:indices[1]] if n > 0: # displace the neurites by half of their maximum x dimension # plus half of the previous neurite's maxmimum x dimension displacement += 0.5 * (dnd.dims[n - 1][0] + dnd.dims[n][0]) # arrange the trees without overlapping with each other group += (displacement, 0.) # create the polygonal collection of the dendrogram # segments _generate_collection(group, ax, ctype, colors) soma_square = dnd.soma if soma_square is not None: _generate_collection((soma_square + (displacement / 2., 0.),), ax, NeuriteType.soma, colors) ax.plot((displacement / 2., displacement), (0., 0.), color='k') ax.plot((0., displacement / 2.), (0., 0.), color='k') return displacement
python
def _render_dendrogram(dnd, ax, displacement): colors = set() for n, (indices, ctype) in enumerate(zip(dnd.groups, dnd.types)): group = dnd.data[indices[0]:indices[1]] if n > 0: displacement += 0.5 * (dnd.dims[n - 1][0] + dnd.dims[n][0]) group += (displacement, 0.) _generate_collection(group, ax, ctype, colors) soma_square = dnd.soma if soma_square is not None: _generate_collection((soma_square + (displacement / 2., 0.),), ax, NeuriteType.soma, colors) ax.plot((displacement / 2., displacement), (0., 0.), color='k') ax.plot((0., displacement / 2.), (0., 0.), color='k') return displacement
[ "def", "_render_dendrogram", "(", "dnd", ",", "ax", ",", "displacement", ")", ":", "# set of unique colors that reflect the set of types of the neurites", "colors", "=", "set", "(", ")", "for", "n", ",", "(", "indices", ",", "ctype", ")", "in", "enumerate", "(", "zip", "(", "dnd", ".", "groups", ",", "dnd", ".", "types", ")", ")", ":", "# slice rectangles array for the current neurite", "group", "=", "dnd", ".", "data", "[", "indices", "[", "0", "]", ":", "indices", "[", "1", "]", "]", "if", "n", ">", "0", ":", "# displace the neurites by half of their maximum x dimension", "# plus half of the previous neurite's maxmimum x dimension", "displacement", "+=", "0.5", "*", "(", "dnd", ".", "dims", "[", "n", "-", "1", "]", "[", "0", "]", "+", "dnd", ".", "dims", "[", "n", "]", "[", "0", "]", ")", "# arrange the trees without overlapping with each other", "group", "+=", "(", "displacement", ",", "0.", ")", "# create the polygonal collection of the dendrogram", "# segments", "_generate_collection", "(", "group", ",", "ax", ",", "ctype", ",", "colors", ")", "soma_square", "=", "dnd", ".", "soma", "if", "soma_square", "is", "not", "None", ":", "_generate_collection", "(", "(", "soma_square", "+", "(", "displacement", "/", "2.", ",", "0.", ")", ",", ")", ",", "ax", ",", "NeuriteType", ".", "soma", ",", "colors", ")", "ax", ".", "plot", "(", "(", "displacement", "/", "2.", ",", "displacement", ")", ",", "(", "0.", ",", "0.", ")", ",", "color", "=", "'k'", ")", "ax", ".", "plot", "(", "(", "0.", ",", "displacement", "/", "2.", ")", ",", "(", "0.", ",", "0.", ")", ",", "color", "=", "'k'", ")", "return", "displacement" ]
Renders dendrogram
[ "Renders", "dendrogram" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L302-L332
BlueBrain/NeuroM
neurom/view/view.py
plot_dendrogram
def plot_dendrogram(ax, obj, show_diameters=True): '''Dendrogram of `obj` Args: obj: Neuron or tree \ neurom.Neuron, neurom.Tree show_diameters : boolean \ Determines if node diameters will \ be show or not. ''' # create dendrogram and generate rectangle collection dnd = Dendrogram(obj, show_diameters=show_diameters) dnd.generate() # render dendrogram and take into account neurite displacement which # starts as zero. It is important to avoid overlapping of neurites # and to determine tha limits of the figure. _render_dendrogram(dnd, ax, 0.) ax.set_title('Morphology Dendrogram') ax.set_xlabel('micrometers (um)') ax.set_ylabel('micrometers (um)') ax.set_aspect('auto') ax.legend()
python
def plot_dendrogram(ax, obj, show_diameters=True): dnd = Dendrogram(obj, show_diameters=show_diameters) dnd.generate() _render_dendrogram(dnd, ax, 0.) ax.set_title('Morphology Dendrogram') ax.set_xlabel('micrometers (um)') ax.set_ylabel('micrometers (um)') ax.set_aspect('auto') ax.legend()
[ "def", "plot_dendrogram", "(", "ax", ",", "obj", ",", "show_diameters", "=", "True", ")", ":", "# create dendrogram and generate rectangle collection", "dnd", "=", "Dendrogram", "(", "obj", ",", "show_diameters", "=", "show_diameters", ")", "dnd", ".", "generate", "(", ")", "# render dendrogram and take into account neurite displacement which", "# starts as zero. It is important to avoid overlapping of neurites", "# and to determine tha limits of the figure.", "_render_dendrogram", "(", "dnd", ",", "ax", ",", "0.", ")", "ax", ".", "set_title", "(", "'Morphology Dendrogram'", ")", "ax", ".", "set_xlabel", "(", "'micrometers (um)'", ")", "ax", ".", "set_ylabel", "(", "'micrometers (um)'", ")", "ax", ".", "set_aspect", "(", "'auto'", ")", "ax", ".", "legend", "(", ")" ]
Dendrogram of `obj` Args: obj: Neuron or tree \ neurom.Neuron, neurom.Tree show_diameters : boolean \ Determines if node diameters will \ be show or not.
[ "Dendrogram", "of", "obj" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L335-L360
BlueBrain/NeuroM
neurom/fst/_core.py
make_neurites
def make_neurites(rdw): '''Build neurite trees from a raw data wrapper''' post_action = _NEURITE_ACTION[rdw.fmt] trunks = rdw.neurite_root_section_ids() if not trunks: return [], [] # One pass over sections to build nodes nodes = tuple(Section(section_id=i, points=rdw.data_block[sec.ids], section_type=_TREE_TYPES[sec.ntype]) for i, sec in enumerate(rdw.sections)) # One pass over nodes to connect children to parents for i, node in enumerate(nodes): parent_id = rdw.sections[i].pid parent_type = nodes[parent_id].type # only connect neurites if parent_id != ROOT_ID and parent_type != NeuriteType.soma: nodes[parent_id].add_child(node) neurites = tuple(Neurite(nodes[i]) for i in trunks) if post_action is not None: for n in neurites: post_action(n.root_node) return neurites, nodes
python
def make_neurites(rdw): post_action = _NEURITE_ACTION[rdw.fmt] trunks = rdw.neurite_root_section_ids() if not trunks: return [], [] nodes = tuple(Section(section_id=i, points=rdw.data_block[sec.ids], section_type=_TREE_TYPES[sec.ntype]) for i, sec in enumerate(rdw.sections)) for i, node in enumerate(nodes): parent_id = rdw.sections[i].pid parent_type = nodes[parent_id].type if parent_id != ROOT_ID and parent_type != NeuriteType.soma: nodes[parent_id].add_child(node) neurites = tuple(Neurite(nodes[i]) for i in trunks) if post_action is not None: for n in neurites: post_action(n.root_node) return neurites, nodes
[ "def", "make_neurites", "(", "rdw", ")", ":", "post_action", "=", "_NEURITE_ACTION", "[", "rdw", ".", "fmt", "]", "trunks", "=", "rdw", ".", "neurite_root_section_ids", "(", ")", "if", "not", "trunks", ":", "return", "[", "]", ",", "[", "]", "# One pass over sections to build nodes", "nodes", "=", "tuple", "(", "Section", "(", "section_id", "=", "i", ",", "points", "=", "rdw", ".", "data_block", "[", "sec", ".", "ids", "]", ",", "section_type", "=", "_TREE_TYPES", "[", "sec", ".", "ntype", "]", ")", "for", "i", ",", "sec", "in", "enumerate", "(", "rdw", ".", "sections", ")", ")", "# One pass over nodes to connect children to parents", "for", "i", ",", "node", "in", "enumerate", "(", "nodes", ")", ":", "parent_id", "=", "rdw", ".", "sections", "[", "i", "]", ".", "pid", "parent_type", "=", "nodes", "[", "parent_id", "]", ".", "type", "# only connect neurites", "if", "parent_id", "!=", "ROOT_ID", "and", "parent_type", "!=", "NeuriteType", ".", "soma", ":", "nodes", "[", "parent_id", "]", ".", "add_child", "(", "node", ")", "neurites", "=", "tuple", "(", "Neurite", "(", "nodes", "[", "i", "]", ")", "for", "i", "in", "trunks", ")", "if", "post_action", "is", "not", "None", ":", "for", "n", "in", "neurites", ":", "post_action", "(", "n", ".", "root_node", ")", "return", "neurites", ",", "nodes" ]
Build neurite trees from a raw data wrapper
[ "Build", "neurite", "trees", "from", "a", "raw", "data", "wrapper" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_core.py#L78-L105
BlueBrain/NeuroM
neurom/fst/_core.py
_remove_soma_initial_point
def _remove_soma_initial_point(tree): '''Remove tree's initial point if soma''' if tree.points[0][COLS.TYPE] == POINT_TYPE.SOMA: tree.points = tree.points[1:]
python
def _remove_soma_initial_point(tree): if tree.points[0][COLS.TYPE] == POINT_TYPE.SOMA: tree.points = tree.points[1:]
[ "def", "_remove_soma_initial_point", "(", "tree", ")", ":", "if", "tree", ".", "points", "[", "0", "]", "[", "COLS", ".", "TYPE", "]", "==", "POINT_TYPE", ".", "SOMA", ":", "tree", ".", "points", "=", "tree", ".", "points", "[", "1", ":", "]" ]
Remove tree's initial point if soma
[ "Remove", "tree", "s", "initial", "point", "if", "soma" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_core.py#L108-L111
BlueBrain/NeuroM
neurom/fst/_core.py
_check_soma_topology_swc
def _check_soma_topology_swc(points): '''check if points form valid soma Currently checks if there are bifurcations within a soma with more than three points. ''' if len(points) == 3: return parents = tuple(p[COLS.P] for p in points if p[COLS.P] != ROOT_ID) if len(parents) > len(set(parents)): raise SomaError("Bifurcating soma")
python
def _check_soma_topology_swc(points): if len(points) == 3: return parents = tuple(p[COLS.P] for p in points if p[COLS.P] != ROOT_ID) if len(parents) > len(set(parents)): raise SomaError("Bifurcating soma")
[ "def", "_check_soma_topology_swc", "(", "points", ")", ":", "if", "len", "(", "points", ")", "==", "3", ":", "return", "parents", "=", "tuple", "(", "p", "[", "COLS", ".", "P", "]", "for", "p", "in", "points", "if", "p", "[", "COLS", ".", "P", "]", "!=", "ROOT_ID", ")", "if", "len", "(", "parents", ")", ">", "len", "(", "set", "(", "parents", ")", ")", ":", "raise", "SomaError", "(", "\"Bifurcating soma\"", ")" ]
check if points form valid soma Currently checks if there are bifurcations within a soma with more than three points.
[ "check", "if", "points", "form", "valid", "soma" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_core.py#L114-L125
BlueBrain/NeuroM
neurom/fst/_core.py
FstNeuron.points
def points(self): '''Return unordered array with all the points in this neuron''' if self._points is None: _points = self.soma.points.tolist() for n in self.neurites: _points.extend(n.points.tolist()) self._points = np.array(_points) return self._points
python
def points(self): if self._points is None: _points = self.soma.points.tolist() for n in self.neurites: _points.extend(n.points.tolist()) self._points = np.array(_points) return self._points
[ "def", "points", "(", "self", ")", ":", "if", "self", ".", "_points", "is", "None", ":", "_points", "=", "self", ".", "soma", ".", "points", ".", "tolist", "(", ")", "for", "n", "in", "self", ".", "neurites", ":", "_points", ".", "extend", "(", "n", ".", "points", ".", "tolist", "(", ")", ")", "self", ".", "_points", "=", "np", ".", "array", "(", "_points", ")", "return", "self", ".", "_points" ]
Return unordered array with all the points in this neuron
[ "Return", "unordered", "array", "with", "all", "the", "points", "in", "this", "neuron" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_core.py#L52-L60
BlueBrain/NeuroM
neurom/fst/_core.py
FstNeuron.transform
def transform(self, trans): '''Return a copy of this neuron with a 3D transformation applied''' _data = deepcopy(self._data) _data.data_block[:, 0:3] = trans(_data.data_block[:, 0:3]) return FstNeuron(_data, self.name)
python
def transform(self, trans): _data = deepcopy(self._data) _data.data_block[:, 0:3] = trans(_data.data_block[:, 0:3]) return FstNeuron(_data, self.name)
[ "def", "transform", "(", "self", ",", "trans", ")", ":", "_data", "=", "deepcopy", "(", "self", ".", "_data", ")", "_data", ".", "data_block", "[", ":", ",", "0", ":", "3", "]", "=", "trans", "(", "_data", ".", "data_block", "[", ":", ",", "0", ":", "3", "]", ")", "return", "FstNeuron", "(", "_data", ",", "self", ".", "name", ")" ]
Return a copy of this neuron with a 3D transformation applied
[ "Return", "a", "copy", "of", "this", "neuron", "with", "a", "3D", "transformation", "applied" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_core.py#L62-L66
BlueBrain/NeuroM
neurom/morphmath.py
vector
def vector(p1, p2): '''compute vector between two 3D points Args: p1, p2: indexable objects with indices 0, 1, 2 corresponding to 3D cartesian coordinates. Returns: 3-vector from p1 - p2 ''' return np.subtract(p1[COLS.XYZ], p2[COLS.XYZ])
python
def vector(p1, p2): return np.subtract(p1[COLS.XYZ], p2[COLS.XYZ])
[ "def", "vector", "(", "p1", ",", "p2", ")", ":", "return", "np", ".", "subtract", "(", "p1", "[", "COLS", ".", "XYZ", "]", ",", "p2", "[", "COLS", ".", "XYZ", "]", ")" ]
compute vector between two 3D points Args: p1, p2: indexable objects with indices 0, 1, 2 corresponding to 3D cartesian coordinates. Returns: 3-vector from p1 - p2
[ "compute", "vector", "between", "two", "3D", "points" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L38-L48
BlueBrain/NeuroM
neurom/morphmath.py
linear_interpolate
def linear_interpolate(p1, p2, fraction): '''Returns the point p satisfying: p1 + fraction * (p2 - p1)''' return np.array((p1[0] + fraction * (p2[0] - p1[0]), p1[1] + fraction * (p2[1] - p1[1]), p1[2] + fraction * (p2[2] - p1[2])))
python
def linear_interpolate(p1, p2, fraction): return np.array((p1[0] + fraction * (p2[0] - p1[0]), p1[1] + fraction * (p2[1] - p1[1]), p1[2] + fraction * (p2[2] - p1[2])))
[ "def", "linear_interpolate", "(", "p1", ",", "p2", ",", "fraction", ")", ":", "return", "np", ".", "array", "(", "(", "p1", "[", "0", "]", "+", "fraction", "*", "(", "p2", "[", "0", "]", "-", "p1", "[", "0", "]", ")", ",", "p1", "[", "1", "]", "+", "fraction", "*", "(", "p2", "[", "1", "]", "-", "p1", "[", "1", "]", ")", ",", "p1", "[", "2", "]", "+", "fraction", "*", "(", "p2", "[", "2", "]", "-", "p1", "[", "2", "]", ")", ")", ")" ]
Returns the point p satisfying: p1 + fraction * (p2 - p1)
[ "Returns", "the", "point", "p", "satisfying", ":", "p1", "+", "fraction", "*", "(", "p2", "-", "p1", ")" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L51-L55
BlueBrain/NeuroM
neurom/morphmath.py
interpolate_radius
def interpolate_radius(r1, r2, fraction): '''Calculate the radius that corresponds to a point P that lies at a fraction of the length of a cut cone P1P2 where P1, P2 are the centers of the circles that bound the shape with radii r1 and r2 respectively. Args: r1: float Radius of the first node of the segment. r2: float Radius of the second node of the segment fraction: float The fraction at which the interpolated radius is calculated. Returns: float The interpolated radius. Note: The fraction is assumed from point P1, not from point P2. ''' def f(a, b, c): ''' Returns the length of the interpolated radius calculated using similar triangles. ''' return a + c * (b - a) return f(r2, r1, 1. - fraction) if r1 > r2 else f(r1, r2, fraction)
python
def interpolate_radius(r1, r2, fraction): def f(a, b, c): return a + c * (b - a) return f(r2, r1, 1. - fraction) if r1 > r2 else f(r1, r2, fraction)
[ "def", "interpolate_radius", "(", "r1", ",", "r2", ",", "fraction", ")", ":", "def", "f", "(", "a", ",", "b", ",", "c", ")", ":", "''' Returns the length of the interpolated radius calculated\n using similar triangles.\n '''", "return", "a", "+", "c", "*", "(", "b", "-", "a", ")", "return", "f", "(", "r2", ",", "r1", ",", "1.", "-", "fraction", ")", "if", "r1", ">", "r2", "else", "f", "(", "r1", ",", "r2", ",", "fraction", ")" ]
Calculate the radius that corresponds to a point P that lies at a fraction of the length of a cut cone P1P2 where P1, P2 are the centers of the circles that bound the shape with radii r1 and r2 respectively. Args: r1: float Radius of the first node of the segment. r2: float Radius of the second node of the segment fraction: float The fraction at which the interpolated radius is calculated. Returns: float The interpolated radius. Note: The fraction is assumed from point P1, not from point P2.
[ "Calculate", "the", "radius", "that", "corresponds", "to", "a", "point", "P", "that", "lies", "at", "a", "fraction", "of", "the", "length", "of", "a", "cut", "cone", "P1P2", "where", "P1", "P2", "are", "the", "centers", "of", "the", "circles", "that", "bound", "the", "shape", "with", "radii", "r1", "and", "r2", "respectively", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L58-L81
BlueBrain/NeuroM
neurom/morphmath.py
path_fraction_id_offset
def path_fraction_id_offset(points, fraction, relative_offset=False): '''Find the segment which corresponds to the fraction of the path length along the piecewise linear curve which is constructed from the set of points. Args: points: an iterable of indexable objects with indices 0, 1, 2 correspoding to 3D cartesian coordinates fraction: path length fraction (0.0 <= fraction <= 1.0) relative_offset: return absolute or relative segment distance Returns: (segment ID, segment offset) pair. ''' if not (0. <= fraction <= 1.0): raise ValueError("Invalid fraction: %.3f" % fraction) pts = np.array(points)[:, COLS.XYZ] lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1) cum_lengths = np.cumsum(lengths) offset = cum_lengths[-1] * fraction seg_id = np.argmin(cum_lengths < offset) if seg_id > 0: offset -= cum_lengths[seg_id - 1] if relative_offset: offset /= lengths[seg_id] return seg_id, offset
python
def path_fraction_id_offset(points, fraction, relative_offset=False): if not (0. <= fraction <= 1.0): raise ValueError("Invalid fraction: %.3f" % fraction) pts = np.array(points)[:, COLS.XYZ] lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1) cum_lengths = np.cumsum(lengths) offset = cum_lengths[-1] * fraction seg_id = np.argmin(cum_lengths < offset) if seg_id > 0: offset -= cum_lengths[seg_id - 1] if relative_offset: offset /= lengths[seg_id] return seg_id, offset
[ "def", "path_fraction_id_offset", "(", "points", ",", "fraction", ",", "relative_offset", "=", "False", ")", ":", "if", "not", "(", "0.", "<=", "fraction", "<=", "1.0", ")", ":", "raise", "ValueError", "(", "\"Invalid fraction: %.3f\"", "%", "fraction", ")", "pts", "=", "np", ".", "array", "(", "points", ")", "[", ":", ",", "COLS", ".", "XYZ", "]", "lengths", "=", "np", ".", "linalg", ".", "norm", "(", "np", ".", "diff", "(", "pts", ",", "axis", "=", "0", ")", ",", "axis", "=", "1", ")", "cum_lengths", "=", "np", ".", "cumsum", "(", "lengths", ")", "offset", "=", "cum_lengths", "[", "-", "1", "]", "*", "fraction", "seg_id", "=", "np", ".", "argmin", "(", "cum_lengths", "<", "offset", ")", "if", "seg_id", ">", "0", ":", "offset", "-=", "cum_lengths", "[", "seg_id", "-", "1", "]", "if", "relative_offset", ":", "offset", "/=", "lengths", "[", "seg_id", "]", "return", "seg_id", ",", "offset" ]
Find the segment which corresponds to the fraction of the path length along the piecewise linear curve which is constructed from the set of points. Args: points: an iterable of indexable objects with indices 0, 1, 2 correspoding to 3D cartesian coordinates fraction: path length fraction (0.0 <= fraction <= 1.0) relative_offset: return absolute or relative segment distance Returns: (segment ID, segment offset) pair.
[ "Find", "the", "segment", "which", "corresponds", "to", "the", "fraction", "of", "the", "path", "length", "along", "the", "piecewise", "linear", "curve", "which", "is", "constructed", "from", "the", "set", "of", "points", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L84-L109
BlueBrain/NeuroM
neurom/morphmath.py
path_fraction_point
def path_fraction_point(points, fraction): '''Computes the point which corresponds to the fraction of the path length along the piecewise linear curve which is constructed from the set of points. Args: points: an iterable of indexable objects with indices 0, 1, 2 correspoding to 3D cartesian coordinates fraction: path length fraction (0 <= fraction <= 1) Returns: The 3D coordinates of the aforementioned point ''' seg_id, offset = path_fraction_id_offset(points, fraction, relative_offset=True) return linear_interpolate(points[seg_id], points[seg_id + 1], offset)
python
def path_fraction_point(points, fraction): seg_id, offset = path_fraction_id_offset(points, fraction, relative_offset=True) return linear_interpolate(points[seg_id], points[seg_id + 1], offset)
[ "def", "path_fraction_point", "(", "points", ",", "fraction", ")", ":", "seg_id", ",", "offset", "=", "path_fraction_id_offset", "(", "points", ",", "fraction", ",", "relative_offset", "=", "True", ")", "return", "linear_interpolate", "(", "points", "[", "seg_id", "]", ",", "points", "[", "seg_id", "+", "1", "]", ",", "offset", ")" ]
Computes the point which corresponds to the fraction of the path length along the piecewise linear curve which is constructed from the set of points. Args: points: an iterable of indexable objects with indices 0, 1, 2 correspoding to 3D cartesian coordinates fraction: path length fraction (0 <= fraction <= 1) Returns: The 3D coordinates of the aforementioned point
[ "Computes", "the", "point", "which", "corresponds", "to", "the", "fraction", "of", "the", "path", "length", "along", "the", "piecewise", "linear", "curve", "which", "is", "constructed", "from", "the", "set", "of", "points", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L112-L126
BlueBrain/NeuroM
neurom/morphmath.py
scalar_projection
def scalar_projection(v1, v2): '''compute the scalar projection of v1 upon v2 Args: v1, v2: iterable indices 0, 1, 2 corresponding to cartesian coordinates Returns: 3-vector of the projection of point p onto the direction of v ''' return np.dot(v1, v2) / np.linalg.norm(v2)
python
def scalar_projection(v1, v2): return np.dot(v1, v2) / np.linalg.norm(v2)
[ "def", "scalar_projection", "(", "v1", ",", "v2", ")", ":", "return", "np", ".", "dot", "(", "v1", ",", "v2", ")", "/", "np", ".", "linalg", ".", "norm", "(", "v2", ")" ]
compute the scalar projection of v1 upon v2 Args: v1, v2: iterable indices 0, 1, 2 corresponding to cartesian coordinates Returns: 3-vector of the projection of point p onto the direction of v
[ "compute", "the", "scalar", "projection", "of", "v1", "upon", "v2" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L129-L139
BlueBrain/NeuroM
neurom/morphmath.py
vector_projection
def vector_projection(v1, v2): '''compute the vector projection of v1 upon v2 Args: v1, v2: iterable indices 0, 1, 2 corresponding to cartesian coordinates Returns: 3-vector of the projection of point p onto the direction of v ''' return scalar_projection(v1, v2) * v2 / np.linalg.norm(v2)
python
def vector_projection(v1, v2): return scalar_projection(v1, v2) * v2 / np.linalg.norm(v2)
[ "def", "vector_projection", "(", "v1", ",", "v2", ")", ":", "return", "scalar_projection", "(", "v1", ",", "v2", ")", "*", "v2", "/", "np", ".", "linalg", ".", "norm", "(", "v2", ")" ]
compute the vector projection of v1 upon v2 Args: v1, v2: iterable indices 0, 1, 2 corresponding to cartesian coordinates Returns: 3-vector of the projection of point p onto the direction of v
[ "compute", "the", "vector", "projection", "of", "v1", "upon", "v2" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L142-L152
BlueBrain/NeuroM
neurom/morphmath.py
dist_point_line
def dist_point_line(p, l1, l2): '''compute the orthogonal distance between from the line that goes through the points l1, l2 and the point p Args: p, l1, l2 : iterable point indices 0, 1, 2 corresponding to cartesian coordinates ''' cross_prod = np.cross(l2 - l1, p - l1) return np.linalg.norm(cross_prod) / np.linalg.norm(l2 - l1)
python
def dist_point_line(p, l1, l2): cross_prod = np.cross(l2 - l1, p - l1) return np.linalg.norm(cross_prod) / np.linalg.norm(l2 - l1)
[ "def", "dist_point_line", "(", "p", ",", "l1", ",", "l2", ")", ":", "cross_prod", "=", "np", ".", "cross", "(", "l2", "-", "l1", ",", "p", "-", "l1", ")", "return", "np", ".", "linalg", ".", "norm", "(", "cross_prod", ")", "/", "np", ".", "linalg", ".", "norm", "(", "l2", "-", "l1", ")" ]
compute the orthogonal distance between from the line that goes through the points l1, l2 and the point p Args: p, l1, l2 : iterable point indices 0, 1, 2 corresponding to cartesian coordinates
[ "compute", "the", "orthogonal", "distance", "between", "from", "the", "line", "that", "goes", "through", "the", "points", "l1", "l2", "and", "the", "point", "p" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L155-L165
BlueBrain/NeuroM
neurom/morphmath.py
point_dist2
def point_dist2(p1, p2): '''compute the square of the euclidian distance between two 3D points Args: p1, p2: indexable objects with indices 0, 1, 2 corresponding to 3D cartesian coordinates. Returns: The square of the euclidian distance between the points. ''' v = vector(p1, p2) return np.dot(v, v)
python
def point_dist2(p1, p2): v = vector(p1, p2) return np.dot(v, v)
[ "def", "point_dist2", "(", "p1", ",", "p2", ")", ":", "v", "=", "vector", "(", "p1", ",", "p2", ")", "return", "np", ".", "dot", "(", "v", ",", "v", ")" ]
compute the square of the euclidian distance between two 3D points Args: p1, p2: indexable objects with indices 0, 1, 2 corresponding to 3D cartesian coordinates. Returns: The square of the euclidian distance between the points.
[ "compute", "the", "square", "of", "the", "euclidian", "distance", "between", "two", "3D", "points" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L168-L178
BlueBrain/NeuroM
neurom/morphmath.py
angle_3points
def angle_3points(p0, p1, p2): ''' compute the angle in radians between three 3D points Calculated as the angle between p1-p0 and p2-p0. Args: p0, p1, p2: indexable objects with indices 0, 1, 2 corresponding to 3D cartesian coordinates. Returns: Angle in radians between (p1-p0) and (p2-p0). 0.0 if p0==p1 or p0==p2. ''' vec1 = vector(p1, p0) vec2 = vector(p2, p0) return math.atan2(np.linalg.norm(np.cross(vec1, vec2)), np.dot(vec1, vec2))
python
def angle_3points(p0, p1, p2): vec1 = vector(p1, p0) vec2 = vector(p2, p0) return math.atan2(np.linalg.norm(np.cross(vec1, vec2)), np.dot(vec1, vec2))
[ "def", "angle_3points", "(", "p0", ",", "p1", ",", "p2", ")", ":", "vec1", "=", "vector", "(", "p1", ",", "p0", ")", "vec2", "=", "vector", "(", "p2", ",", "p0", ")", "return", "math", ".", "atan2", "(", "np", ".", "linalg", ".", "norm", "(", "np", ".", "cross", "(", "vec1", ",", "vec2", ")", ")", ",", "np", ".", "dot", "(", "vec1", ",", "vec2", ")", ")" ]
compute the angle in radians between three 3D points Calculated as the angle between p1-p0 and p2-p0. Args: p0, p1, p2: indexable objects with indices 0, 1, 2 corresponding to 3D cartesian coordinates. Returns: Angle in radians between (p1-p0) and (p2-p0). 0.0 if p0==p1 or p0==p2.
[ "compute", "the", "angle", "in", "radians", "between", "three", "3D", "points" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L193-L209
BlueBrain/NeuroM
neurom/morphmath.py
angle_between_vectors
def angle_between_vectors(p1, p2): """ Computes the angle in radians between vectors 'p1' and 'p2' Normalizes the input vectors and computes the relative angle between them. >>> angle_between((1, 0), (0, 1)) 1.5707963267948966 >>> angle_between((1, 0), (1, 0)) 0.0 >>> angle_between((1, 0), (-1, 0)) 3.141592653589793 """ v1 = p1 / np.linalg.norm(p1) v2 = p2 / np.linalg.norm(p2) return np.arccos(np.clip(np.dot(v1, v2), -1.0, 1.0))
python
def angle_between_vectors(p1, p2): v1 = p1 / np.linalg.norm(p1) v2 = p2 / np.linalg.norm(p2) return np.arccos(np.clip(np.dot(v1, v2), -1.0, 1.0))
[ "def", "angle_between_vectors", "(", "p1", ",", "p2", ")", ":", "v1", "=", "p1", "/", "np", ".", "linalg", ".", "norm", "(", "p1", ")", "v2", "=", "p2", "/", "np", ".", "linalg", ".", "norm", "(", "p2", ")", "return", "np", ".", "arccos", "(", "np", ".", "clip", "(", "np", ".", "dot", "(", "v1", ",", "v2", ")", ",", "-", "1.0", ",", "1.0", ")", ")" ]
Computes the angle in radians between vectors 'p1' and 'p2' Normalizes the input vectors and computes the relative angle between them. >>> angle_between((1, 0), (0, 1)) 1.5707963267948966 >>> angle_between((1, 0), (1, 0)) 0.0 >>> angle_between((1, 0), (-1, 0)) 3.141592653589793
[ "Computes", "the", "angle", "in", "radians", "between", "vectors", "p1", "and", "p2", "Normalizes", "the", "input", "vectors", "and", "computes", "the", "relative", "angle", "between", "them", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L212-L226
BlueBrain/NeuroM
neurom/morphmath.py
polygon_diameter
def polygon_diameter(points): ''' Compute the maximun euclidian distance between any two points in a list of points ''' return max(point_dist(p0, p1) for (p0, p1) in combinations(points, 2))
python
def polygon_diameter(points): return max(point_dist(p0, p1) for (p0, p1) in combinations(points, 2))
[ "def", "polygon_diameter", "(", "points", ")", ":", "return", "max", "(", "point_dist", "(", "p0", ",", "p1", ")", "for", "(", "p0", ",", "p1", ")", "in", "combinations", "(", "points", ",", "2", ")", ")" ]
Compute the maximun euclidian distance between any two points in a list of points
[ "Compute", "the", "maximun", "euclidian", "distance", "between", "any", "two", "points", "in", "a", "list", "of", "points" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L229-L233
BlueBrain/NeuroM
neurom/morphmath.py
average_points_dist
def average_points_dist(p0, p_list): """ Computes the average distance between a list of points and a given point p0. """ return np.mean(list(point_dist(p0, p1) for p1 in p_list))
python
def average_points_dist(p0, p_list): return np.mean(list(point_dist(p0, p1) for p1 in p_list))
[ "def", "average_points_dist", "(", "p0", ",", "p_list", ")", ":", "return", "np", ".", "mean", "(", "list", "(", "point_dist", "(", "p0", ",", "p1", ")", "for", "p1", "in", "p_list", ")", ")" ]
Computes the average distance between a list of points and a given point p0.
[ "Computes", "the", "average", "distance", "between", "a", "list", "of", "points", "and", "a", "given", "point", "p0", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L236-L241
BlueBrain/NeuroM
neurom/morphmath.py
path_distance
def path_distance(points): """ Compute the path distance from given set of points """ vecs = np.diff(points, axis=0)[:, :3] d2 = [np.dot(p, p) for p in vecs] return np.sum(np.sqrt(d2))
python
def path_distance(points): vecs = np.diff(points, axis=0)[:, :3] d2 = [np.dot(p, p) for p in vecs] return np.sum(np.sqrt(d2))
[ "def", "path_distance", "(", "points", ")", ":", "vecs", "=", "np", ".", "diff", "(", "points", ",", "axis", "=", "0", ")", "[", ":", ",", ":", "3", "]", "d2", "=", "[", "np", ".", "dot", "(", "p", ",", "p", ")", "for", "p", "in", "vecs", "]", "return", "np", ".", "sum", "(", "np", ".", "sqrt", "(", "d2", ")", ")" ]
Compute the path distance from given set of points
[ "Compute", "the", "path", "distance", "from", "given", "set", "of", "points" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L244-L250
BlueBrain/NeuroM
neurom/morphmath.py
segment_radial_dist
def segment_radial_dist(seg, pos): '''Return the radial distance of a tree segment to a given point The radial distance is the euclidian distance between the mid-point of the segment and the point in question. Parameters: seg: tree segment pos: origin to which distances are measured. It must have at lease 3 components. The first 3 components are (x, y, z). ''' return point_dist(pos, np.divide(np.add(seg[0], seg[1]), 2.0))
python
def segment_radial_dist(seg, pos): return point_dist(pos, np.divide(np.add(seg[0], seg[1]), 2.0))
[ "def", "segment_radial_dist", "(", "seg", ",", "pos", ")", ":", "return", "point_dist", "(", "pos", ",", "np", ".", "divide", "(", "np", ".", "add", "(", "seg", "[", "0", "]", ",", "seg", "[", "1", "]", ")", ",", "2.0", ")", ")" ]
Return the radial distance of a tree segment to a given point The radial distance is the euclidian distance between the mid-point of the segment and the point in question. Parameters: seg: tree segment pos: origin to which distances are measured. It must have at lease 3 components. The first 3 components are (x, y, z).
[ "Return", "the", "radial", "distance", "of", "a", "tree", "segment", "to", "a", "given", "point" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L301-L313
BlueBrain/NeuroM
neurom/morphmath.py
segment_area
def segment_area(seg): '''Compute the surface area of a segment. Approximated as a conical frustum. Does not include the surface area of the bounding circles. ''' r0 = seg[0][COLS.R] r1 = seg[1][COLS.R] h2 = point_dist2(seg[0], seg[1]) return math.pi * (r0 + r1) * math.sqrt((r0 - r1) ** 2 + h2)
python
def segment_area(seg): r0 = seg[0][COLS.R] r1 = seg[1][COLS.R] h2 = point_dist2(seg[0], seg[1]) return math.pi * (r0 + r1) * math.sqrt((r0 - r1) ** 2 + h2)
[ "def", "segment_area", "(", "seg", ")", ":", "r0", "=", "seg", "[", "0", "]", "[", "COLS", ".", "R", "]", "r1", "=", "seg", "[", "1", "]", "[", "COLS", ".", "R", "]", "h2", "=", "point_dist2", "(", "seg", "[", "0", "]", ",", "seg", "[", "1", "]", ")", "return", "math", ".", "pi", "*", "(", "r0", "+", "r1", ")", "*", "math", ".", "sqrt", "(", "(", "r0", "-", "r1", ")", "**", "2", "+", "h2", ")" ]
Compute the surface area of a segment. Approximated as a conical frustum. Does not include the surface area of the bounding circles.
[ "Compute", "the", "surface", "area", "of", "a", "segment", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L316-L325
BlueBrain/NeuroM
neurom/morphmath.py
segment_volume
def segment_volume(seg): '''Compute the volume of a segment. Approximated as a conical frustum. ''' r0 = seg[0][COLS.R] r1 = seg[1][COLS.R] h = point_dist(seg[0], seg[1]) return math.pi * h * ((r0 * r0) + (r0 * r1) + (r1 * r1)) / 3.0
python
def segment_volume(seg): r0 = seg[0][COLS.R] r1 = seg[1][COLS.R] h = point_dist(seg[0], seg[1]) return math.pi * h * ((r0 * r0) + (r0 * r1) + (r1 * r1)) / 3.0
[ "def", "segment_volume", "(", "seg", ")", ":", "r0", "=", "seg", "[", "0", "]", "[", "COLS", ".", "R", "]", "r1", "=", "seg", "[", "1", "]", "[", "COLS", ".", "R", "]", "h", "=", "point_dist", "(", "seg", "[", "0", "]", ",", "seg", "[", "1", "]", ")", "return", "math", ".", "pi", "*", "h", "*", "(", "(", "r0", "*", "r0", ")", "+", "(", "r0", "*", "r1", ")", "+", "(", "r1", "*", "r1", ")", ")", "/", "3.0" ]
Compute the volume of a segment. Approximated as a conical frustum.
[ "Compute", "the", "volume", "of", "a", "segment", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L328-L336
BlueBrain/NeuroM
neurom/morphmath.py
taper_rate
def taper_rate(p0, p1): '''Compute the taper rate between points p0 and p1 Args: p0, p1: iterables with first 4 components containing (x, y, z, r) Returns: The taper rate, defined as the absolute value of the difference in the diameters of p0 and p1 divided by the euclidian distance between them. ''' return 2 * abs(p0[COLS.R] - p1[COLS.R]) / point_dist(p0, p1)
python
def taper_rate(p0, p1): return 2 * abs(p0[COLS.R] - p1[COLS.R]) / point_dist(p0, p1)
[ "def", "taper_rate", "(", "p0", ",", "p1", ")", ":", "return", "2", "*", "abs", "(", "p0", "[", "COLS", ".", "R", "]", "-", "p1", "[", "COLS", ".", "R", "]", ")", "/", "point_dist", "(", "p0", ",", "p1", ")" ]
Compute the taper rate between points p0 and p1 Args: p0, p1: iterables with first 4 components containing (x, y, z, r) Returns: The taper rate, defined as the absolute value of the difference in the diameters of p0 and p1 divided by the euclidian distance between them.
[ "Compute", "the", "taper", "rate", "between", "points", "p0", "and", "p1" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L339-L350
BlueBrain/NeuroM
neurom/morphmath.py
principal_direction_extent
def principal_direction_extent(points): '''Calculate the extent of a set of 3D points. The extent is defined as the maximum distance between the projections on the principal directions of the covariance matrix of the points. Parameter: points : a 2D numpy array of points Returns: extents : the extents for each of the eigenvectors of the cov matrix eigs : eigenvalues of the covariance matrix eigv : respective eigenvectors of the covariance matrix ''' # center the points around 0.0 points = np.copy(points) points -= np.mean(points, axis=0) # principal components _, eigv = pca(points) extent = np.zeros(3) for i in range(eigv.shape[1]): # orthogonal projection onto the direction of the v component scalar_projs = np.sort(np.array([np.dot(p, eigv[:, i]) for p in points])) extent[i] = scalar_projs[-1] if scalar_projs[0] < 0.: extent -= scalar_projs[0] return extent
python
def principal_direction_extent(points): points = np.copy(points) points -= np.mean(points, axis=0) _, eigv = pca(points) extent = np.zeros(3) for i in range(eigv.shape[1]): scalar_projs = np.sort(np.array([np.dot(p, eigv[:, i]) for p in points])) extent[i] = scalar_projs[-1] if scalar_projs[0] < 0.: extent -= scalar_projs[0] return extent
[ "def", "principal_direction_extent", "(", "points", ")", ":", "# center the points around 0.0", "points", "=", "np", ".", "copy", "(", "points", ")", "points", "-=", "np", ".", "mean", "(", "points", ",", "axis", "=", "0", ")", "# principal components", "_", ",", "eigv", "=", "pca", "(", "points", ")", "extent", "=", "np", ".", "zeros", "(", "3", ")", "for", "i", "in", "range", "(", "eigv", ".", "shape", "[", "1", "]", ")", ":", "# orthogonal projection onto the direction of the v component", "scalar_projs", "=", "np", ".", "sort", "(", "np", ".", "array", "(", "[", "np", ".", "dot", "(", "p", ",", "eigv", "[", ":", ",", "i", "]", ")", "for", "p", "in", "points", "]", ")", ")", "extent", "[", "i", "]", "=", "scalar_projs", "[", "-", "1", "]", "if", "scalar_projs", "[", "0", "]", "<", "0.", ":", "extent", "-=", "scalar_projs", "[", "0", "]", "return", "extent" ]
Calculate the extent of a set of 3D points. The extent is defined as the maximum distance between the projections on the principal directions of the covariance matrix of the points. Parameter: points : a 2D numpy array of points Returns: extents : the extents for each of the eigenvectors of the cov matrix eigs : eigenvalues of the covariance matrix eigv : respective eigenvectors of the covariance matrix
[ "Calculate", "the", "extent", "of", "a", "set", "of", "3D", "points", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L387-L419
BlueBrain/NeuroM
examples/features_graph_table.py
stylize
def stylize(ax, name, feature): '''Stylization modifications to the plots ''' ax.set_ylabel(feature) ax.set_title(name, fontsize='small')
python
def stylize(ax, name, feature): ax.set_ylabel(feature) ax.set_title(name, fontsize='small')
[ "def", "stylize", "(", "ax", ",", "name", ",", "feature", ")", ":", "ax", ".", "set_ylabel", "(", "feature", ")", "ax", ".", "set_title", "(", "name", ",", "fontsize", "=", "'small'", ")" ]
Stylization modifications to the plots
[ "Stylization", "modifications", "to", "the", "plots" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/features_graph_table.py#L60-L64
BlueBrain/NeuroM
examples/features_graph_table.py
histogram
def histogram(neuron, feature, ax, bins=15, normed=True, cumulative=False): ''' Plot a histogram of the selected feature for the population of neurons. Plots x-axis versus y-axis on a scatter|histogram|binned values plot. Parameters : neurons : neuron list feature : str The feature of interest. bins : int Number of bins for the histogram. cumulative : bool Sets cumulative histogram on. ax : axes object the axes in which the plot is taking place ''' feature_values = nm.get(feature, neuron) # generate histogram ax.hist(feature_values, bins=bins, cumulative=cumulative, normed=normed)
python
def histogram(neuron, feature, ax, bins=15, normed=True, cumulative=False): feature_values = nm.get(feature, neuron) ax.hist(feature_values, bins=bins, cumulative=cumulative, normed=normed)
[ "def", "histogram", "(", "neuron", ",", "feature", ",", "ax", ",", "bins", "=", "15", ",", "normed", "=", "True", ",", "cumulative", "=", "False", ")", ":", "feature_values", "=", "nm", ".", "get", "(", "feature", ",", "neuron", ")", "# generate histogram", "ax", ".", "hist", "(", "feature_values", ",", "bins", "=", "bins", ",", "cumulative", "=", "cumulative", ",", "normed", "=", "normed", ")" ]
Plot a histogram of the selected feature for the population of neurons. Plots x-axis versus y-axis on a scatter|histogram|binned values plot. Parameters : neurons : neuron list feature : str The feature of interest. bins : int Number of bins for the histogram. cumulative : bool Sets cumulative histogram on. ax : axes object the axes in which the plot is taking place
[ "Plot", "a", "histogram", "of", "the", "selected", "feature", "for", "the", "population", "of", "neurons", ".", "Plots", "x", "-", "axis", "versus", "y", "-", "axis", "on", "a", "scatter|histogram|binned", "values", "plot", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/features_graph_table.py#L67-L91
BlueBrain/NeuroM
examples/features_graph_table.py
plot_feature
def plot_feature(feature, cell): '''Plot a feature ''' fig = pl.figure() ax = fig.add_subplot(111) if cell is not None: try: histogram(cell, feature, ax) except ValueError: pass stylize(ax, cell.name, feature) return fig
python
def plot_feature(feature, cell): fig = pl.figure() ax = fig.add_subplot(111) if cell is not None: try: histogram(cell, feature, ax) except ValueError: pass stylize(ax, cell.name, feature) return fig
[ "def", "plot_feature", "(", "feature", ",", "cell", ")", ":", "fig", "=", "pl", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "if", "cell", "is", "not", "None", ":", "try", ":", "histogram", "(", "cell", ",", "feature", ",", "ax", ")", "except", "ValueError", ":", "pass", "stylize", "(", "ax", ",", "cell", ".", "name", ",", "feature", ")", "return", "fig" ]
Plot a feature
[ "Plot", "a", "feature" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/features_graph_table.py#L94-L106
BlueBrain/NeuroM
examples/end_to_end_distance.py
path_end_to_end_distance
def path_end_to_end_distance(neurite): '''Calculate and return end-to-end-distance of a given neurite.''' trunk = neurite.root_node.points[0] return max(morphmath.point_dist(l.points[-1], trunk) for l in neurite.root_node.ileaf())
python
def path_end_to_end_distance(neurite): trunk = neurite.root_node.points[0] return max(morphmath.point_dist(l.points[-1], trunk) for l in neurite.root_node.ileaf())
[ "def", "path_end_to_end_distance", "(", "neurite", ")", ":", "trunk", "=", "neurite", ".", "root_node", ".", "points", "[", "0", "]", "return", "max", "(", "morphmath", ".", "point_dist", "(", "l", ".", "points", "[", "-", "1", "]", ",", "trunk", ")", "for", "l", "in", "neurite", ".", "root_node", ".", "ileaf", "(", ")", ")" ]
Calculate and return end-to-end-distance of a given neurite.
[ "Calculate", "and", "return", "end", "-", "to", "-", "end", "-", "distance", "of", "a", "given", "neurite", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/end_to_end_distance.py#L38-L42
BlueBrain/NeuroM
examples/end_to_end_distance.py
make_end_to_end_distance_plot
def make_end_to_end_distance_plot(nb_segments, end_to_end_distance, neurite_type): '''Plot end-to-end distance vs number of segments''' plt.figure() plt.plot(nb_segments, end_to_end_distance) plt.title(neurite_type) plt.xlabel('Number of segments') plt.ylabel('End-to-end distance') plt.show()
python
def make_end_to_end_distance_plot(nb_segments, end_to_end_distance, neurite_type): plt.figure() plt.plot(nb_segments, end_to_end_distance) plt.title(neurite_type) plt.xlabel('Number of segments') plt.ylabel('End-to-end distance') plt.show()
[ "def", "make_end_to_end_distance_plot", "(", "nb_segments", ",", "end_to_end_distance", ",", "neurite_type", ")", ":", "plt", ".", "figure", "(", ")", "plt", ".", "plot", "(", "nb_segments", ",", "end_to_end_distance", ")", "plt", ".", "title", "(", "neurite_type", ")", "plt", ".", "xlabel", "(", "'Number of segments'", ")", "plt", ".", "ylabel", "(", "'End-to-end distance'", ")", "plt", ".", "show", "(", ")" ]
Plot end-to-end distance vs number of segments
[ "Plot", "end", "-", "to", "-", "end", "distance", "vs", "number", "of", "segments" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/end_to_end_distance.py#L50-L57
BlueBrain/NeuroM
examples/end_to_end_distance.py
calculate_and_plot_end_to_end_distance
def calculate_and_plot_end_to_end_distance(neurite): '''Calculate and plot the end-to-end distance vs the number of segments for an increasingly larger part of a given neurite. Note that the plots are not very meaningful for bifurcating trees.''' def _dist(seg): '''Distance between segmenr end and trunk''' return morphmath.point_dist(seg[1], neurite.root_node.points[0]) end_to_end_distance = [_dist(s) for s in nm.iter_segments(neurite)] make_end_to_end_distance_plot(np.arange(len(end_to_end_distance)) + 1, end_to_end_distance, neurite.type)
python
def calculate_and_plot_end_to_end_distance(neurite): def _dist(seg): return morphmath.point_dist(seg[1], neurite.root_node.points[0]) end_to_end_distance = [_dist(s) for s in nm.iter_segments(neurite)] make_end_to_end_distance_plot(np.arange(len(end_to_end_distance)) + 1, end_to_end_distance, neurite.type)
[ "def", "calculate_and_plot_end_to_end_distance", "(", "neurite", ")", ":", "def", "_dist", "(", "seg", ")", ":", "'''Distance between segmenr end and trunk'''", "return", "morphmath", ".", "point_dist", "(", "seg", "[", "1", "]", ",", "neurite", ".", "root_node", ".", "points", "[", "0", "]", ")", "end_to_end_distance", "=", "[", "_dist", "(", "s", ")", "for", "s", "in", "nm", ".", "iter_segments", "(", "neurite", ")", "]", "make_end_to_end_distance_plot", "(", "np", ".", "arange", "(", "len", "(", "end_to_end_distance", ")", ")", "+", "1", ",", "end_to_end_distance", ",", "neurite", ".", "type", ")" ]
Calculate and plot the end-to-end distance vs the number of segments for an increasingly larger part of a given neurite. Note that the plots are not very meaningful for bifurcating trees.
[ "Calculate", "and", "plot", "the", "end", "-", "to", "-", "end", "distance", "vs", "the", "number", "of", "segments", "for", "an", "increasingly", "larger", "part", "of", "a", "given", "neurite", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/end_to_end_distance.py#L60-L71
BlueBrain/NeuroM
neurom/core/types.py
tree_type_checker
def tree_type_checker(*ref): '''Tree type checker functor Returns: Functor that takes a tree, and returns true if that tree matches any of NeuriteTypes in ref Ex: >>> from neurom.core.types import NeuriteType, tree_type_checker >>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite) >>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter) ''' ref = tuple(ref) if NeuriteType.all in ref: def check_tree_type(_): '''Always returns true''' return True else: def check_tree_type(tree): '''Check whether tree has the same type as ref Returns: True if ref in the same type as tree.type or ref is NeuriteType.all ''' return tree.type in ref return check_tree_type
python
def tree_type_checker(*ref): ref = tuple(ref) if NeuriteType.all in ref: def check_tree_type(_): return True else: def check_tree_type(tree): return tree.type in ref return check_tree_type
[ "def", "tree_type_checker", "(", "*", "ref", ")", ":", "ref", "=", "tuple", "(", "ref", ")", "if", "NeuriteType", ".", "all", "in", "ref", ":", "def", "check_tree_type", "(", "_", ")", ":", "'''Always returns true'''", "return", "True", "else", ":", "def", "check_tree_type", "(", "tree", ")", ":", "'''Check whether tree has the same type as ref\n\n Returns:\n True if ref in the same type as tree.type or ref is NeuriteType.all\n '''", "return", "tree", ".", "type", "in", "ref", "return", "check_tree_type" ]
Tree type checker functor Returns: Functor that takes a tree, and returns true if that tree matches any of NeuriteTypes in ref Ex: >>> from neurom.core.types import NeuriteType, tree_type_checker >>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite) >>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter)
[ "Tree", "type", "checker", "functor" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/types.py#L66-L92
BlueBrain/NeuroM
neurom/core/types.py
dendrite_filter
def dendrite_filter(n): '''Select only dendrites''' return n.type == NeuriteType.basal_dendrite or n.type == NeuriteType.apical_dendrite
python
def dendrite_filter(n): return n.type == NeuriteType.basal_dendrite or n.type == NeuriteType.apical_dendrite
[ "def", "dendrite_filter", "(", "n", ")", ":", "return", "n", ".", "type", "==", "NeuriteType", ".", "basal_dendrite", "or", "n", ".", "type", "==", "NeuriteType", ".", "apical_dendrite" ]
Select only dendrites
[ "Select", "only", "dendrites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/types.py#L95-L97
BlueBrain/NeuroM
examples/plot_somas.py
plot_somas
def plot_somas(somas): '''Plot set of somas on same figure as spheres, each with different color''' _, ax = common.get_figure(new_fig=True, subplot=111, params={'projection': '3d', 'aspect': 'equal'}) for s in somas: common.plot_sphere(ax, s.center, s.radius, color=random_color(), alpha=1) plt.show()
python
def plot_somas(somas): _, ax = common.get_figure(new_fig=True, subplot=111, params={'projection': '3d', 'aspect': 'equal'}) for s in somas: common.plot_sphere(ax, s.center, s.radius, color=random_color(), alpha=1) plt.show()
[ "def", "plot_somas", "(", "somas", ")", ":", "_", ",", "ax", "=", "common", ".", "get_figure", "(", "new_fig", "=", "True", ",", "subplot", "=", "111", ",", "params", "=", "{", "'projection'", ":", "'3d'", ",", "'aspect'", ":", "'equal'", "}", ")", "for", "s", "in", "somas", ":", "common", ".", "plot_sphere", "(", "ax", ",", "s", ".", "center", ",", "s", ".", "radius", ",", "color", "=", "random_color", "(", ")", ",", "alpha", "=", "1", ")", "plt", ".", "show", "(", ")" ]
Plot set of somas on same figure as spheres, each with different color
[ "Plot", "set", "of", "somas", "on", "same", "figure", "as", "spheres", "each", "with", "different", "color" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_somas.py#L48-L54
BlueBrain/NeuroM
neurom/view/_dendrogram.py
_max_recursion_depth
def _max_recursion_depth(obj): ''' Estimate recursion depth, which is defined as the number of nodes in a tree ''' neurites = obj.neurites if hasattr(obj, 'neurites') else [obj] return max(sum(1 for _ in neu.iter_sections()) for neu in neurites)
python
def _max_recursion_depth(obj): neurites = obj.neurites if hasattr(obj, 'neurites') else [obj] return max(sum(1 for _ in neu.iter_sections()) for neu in neurites)
[ "def", "_max_recursion_depth", "(", "obj", ")", ":", "neurites", "=", "obj", ".", "neurites", "if", "hasattr", "(", "obj", ",", "'neurites'", ")", "else", "[", "obj", "]", "return", "max", "(", "sum", "(", "1", "for", "_", "in", "neu", ".", "iter_sections", "(", ")", ")", "for", "neu", "in", "neurites", ")" ]
Estimate recursion depth, which is defined as the number of nodes in a tree
[ "Estimate", "recursion", "depth", "which", "is", "defined", "as", "the", "number", "of", "nodes", "in", "a", "tree" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L44-L49
BlueBrain/NeuroM
neurom/view/_dendrogram.py
_total_rectangles
def _total_rectangles(tree): ''' Calculate the total number of segments that are required for the dendrogram. There is a vertical line for each segment and two horizontal line at each branching point ''' return sum(len(sec.children) + sec.points.shape[0] - 1 for sec in tree.iter_sections())
python
def _total_rectangles(tree): return sum(len(sec.children) + sec.points.shape[0] - 1 for sec in tree.iter_sections())
[ "def", "_total_rectangles", "(", "tree", ")", ":", "return", "sum", "(", "len", "(", "sec", ".", "children", ")", "+", "sec", ".", "points", ".", "shape", "[", "0", "]", "-", "1", "for", "sec", "in", "tree", ".", "iter_sections", "(", ")", ")" ]
Calculate the total number of segments that are required for the dendrogram. There is a vertical line for each segment and two horizontal line at each branching point
[ "Calculate", "the", "total", "number", "of", "segments", "that", "are", "required", "for", "the", "dendrogram", ".", "There", "is", "a", "vertical", "line", "for", "each", "segment", "and", "two", "horizontal", "line", "at", "each", "branching", "point" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L52-L59
BlueBrain/NeuroM
neurom/view/_dendrogram.py
_n_rectangles
def _n_rectangles(obj): ''' Calculate the total number of rectangles with respect to the type of the object ''' return sum(_total_rectangles(neu) for neu in obj.neurites) \ if hasattr(obj, 'neurites') else _total_rectangles(obj)
python
def _n_rectangles(obj): return sum(_total_rectangles(neu) for neu in obj.neurites) \ if hasattr(obj, 'neurites') else _total_rectangles(obj)
[ "def", "_n_rectangles", "(", "obj", ")", ":", "return", "sum", "(", "_total_rectangles", "(", "neu", ")", "for", "neu", "in", "obj", ".", "neurites", ")", "if", "hasattr", "(", "obj", ",", "'neurites'", ")", "else", "_total_rectangles", "(", "obj", ")" ]
Calculate the total number of rectangles with respect to the type of the object
[ "Calculate", "the", "total", "number", "of", "rectangles", "with", "respect", "to", "the", "type", "of", "the", "object" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L62-L68
BlueBrain/NeuroM
neurom/view/_dendrogram.py
_square_segment
def _square_segment(radius, origin): '''Vertices for a square ''' return np.array(((origin[0] - radius, origin[1] - radius), (origin[0] - radius, origin[1] + radius), (origin[0] + radius, origin[1] + radius), (origin[0] + radius, origin[1] - radius)))
python
def _square_segment(radius, origin): return np.array(((origin[0] - radius, origin[1] - radius), (origin[0] - radius, origin[1] + radius), (origin[0] + radius, origin[1] + radius), (origin[0] + radius, origin[1] - radius)))
[ "def", "_square_segment", "(", "radius", ",", "origin", ")", ":", "return", "np", ".", "array", "(", "(", "(", "origin", "[", "0", "]", "-", "radius", ",", "origin", "[", "1", "]", "-", "radius", ")", ",", "(", "origin", "[", "0", "]", "-", "radius", ",", "origin", "[", "1", "]", "+", "radius", ")", ",", "(", "origin", "[", "0", "]", "+", "radius", ",", "origin", "[", "1", "]", "+", "radius", ")", ",", "(", "origin", "[", "0", "]", "+", "radius", ",", "origin", "[", "1", "]", "-", "radius", ")", ")", ")" ]
Vertices for a square
[ "Vertices", "for", "a", "square" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L71-L77
BlueBrain/NeuroM
neurom/view/_dendrogram.py
_vertical_segment
def _vertical_segment(old_offs, new_offs, spacing, radii): '''Vertices for a vertical rectangle ''' return np.array(((new_offs[0] - radii[0], old_offs[1] + spacing[1]), (new_offs[0] - radii[1], new_offs[1]), (new_offs[0] + radii[1], new_offs[1]), (new_offs[0] + radii[0], old_offs[1] + spacing[1])))
python
def _vertical_segment(old_offs, new_offs, spacing, radii): return np.array(((new_offs[0] - radii[0], old_offs[1] + spacing[1]), (new_offs[0] - radii[1], new_offs[1]), (new_offs[0] + radii[1], new_offs[1]), (new_offs[0] + radii[0], old_offs[1] + spacing[1])))
[ "def", "_vertical_segment", "(", "old_offs", ",", "new_offs", ",", "spacing", ",", "radii", ")", ":", "return", "np", ".", "array", "(", "(", "(", "new_offs", "[", "0", "]", "-", "radii", "[", "0", "]", ",", "old_offs", "[", "1", "]", "+", "spacing", "[", "1", "]", ")", ",", "(", "new_offs", "[", "0", "]", "-", "radii", "[", "1", "]", ",", "new_offs", "[", "1", "]", ")", ",", "(", "new_offs", "[", "0", "]", "+", "radii", "[", "1", "]", ",", "new_offs", "[", "1", "]", ")", ",", "(", "new_offs", "[", "0", "]", "+", "radii", "[", "0", "]", ",", "old_offs", "[", "1", "]", "+", "spacing", "[", "1", "]", ")", ")", ")" ]
Vertices for a vertical rectangle
[ "Vertices", "for", "a", "vertical", "rectangle" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L80-L86
BlueBrain/NeuroM
neurom/view/_dendrogram.py
_horizontal_segment
def _horizontal_segment(old_offs, new_offs, spacing, diameter): '''Vertices of a horizontal rectangle ''' return np.array(((old_offs[0], old_offs[1] + spacing[1]), (new_offs[0], old_offs[1] + spacing[1]), (new_offs[0], old_offs[1] + spacing[1] - diameter), (old_offs[0], old_offs[1] + spacing[1] - diameter)))
python
def _horizontal_segment(old_offs, new_offs, spacing, diameter): return np.array(((old_offs[0], old_offs[1] + spacing[1]), (new_offs[0], old_offs[1] + spacing[1]), (new_offs[0], old_offs[1] + spacing[1] - diameter), (old_offs[0], old_offs[1] + spacing[1] - diameter)))
[ "def", "_horizontal_segment", "(", "old_offs", ",", "new_offs", ",", "spacing", ",", "diameter", ")", ":", "return", "np", ".", "array", "(", "(", "(", "old_offs", "[", "0", "]", ",", "old_offs", "[", "1", "]", "+", "spacing", "[", "1", "]", ")", ",", "(", "new_offs", "[", "0", "]", ",", "old_offs", "[", "1", "]", "+", "spacing", "[", "1", "]", ")", ",", "(", "new_offs", "[", "0", "]", ",", "old_offs", "[", "1", "]", "+", "spacing", "[", "1", "]", "-", "diameter", ")", ",", "(", "old_offs", "[", "0", "]", ",", "old_offs", "[", "1", "]", "+", "spacing", "[", "1", "]", "-", "diameter", ")", ")", ")" ]
Vertices of a horizontal rectangle
[ "Vertices", "of", "a", "horizontal", "rectangle" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L89-L95
BlueBrain/NeuroM
neurom/view/_dendrogram.py
_spacingx
def _spacingx(node, max_dims, xoffset, xspace): '''Determine the spacing of the current node depending on the number of the leaves of the tree ''' x_spacing = _n_terminations(node) * xspace if x_spacing > max_dims[0]: max_dims[0] = x_spacing return xoffset - x_spacing / 2.
python
def _spacingx(node, max_dims, xoffset, xspace): x_spacing = _n_terminations(node) * xspace if x_spacing > max_dims[0]: max_dims[0] = x_spacing return xoffset - x_spacing / 2.
[ "def", "_spacingx", "(", "node", ",", "max_dims", ",", "xoffset", ",", "xspace", ")", ":", "x_spacing", "=", "_n_terminations", "(", "node", ")", "*", "xspace", "if", "x_spacing", ">", "max_dims", "[", "0", "]", ":", "max_dims", "[", "0", "]", "=", "x_spacing", "return", "xoffset", "-", "x_spacing", "/", "2." ]
Determine the spacing of the current node depending on the number of the leaves of the tree
[ "Determine", "the", "spacing", "of", "the", "current", "node", "depending", "on", "the", "number", "of", "the", "leaves", "of", "the", "tree" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L98-L107
BlueBrain/NeuroM
neurom/view/_dendrogram.py
_update_offsets
def _update_offsets(start_x, spacing, terminations, offsets, length): '''Update the offsets ''' return (start_x + spacing[0] * terminations / 2., offsets[1] + spacing[1] * 2. + length)
python
def _update_offsets(start_x, spacing, terminations, offsets, length): return (start_x + spacing[0] * terminations / 2., offsets[1] + spacing[1] * 2. + length)
[ "def", "_update_offsets", "(", "start_x", ",", "spacing", ",", "terminations", ",", "offsets", ",", "length", ")", ":", "return", "(", "start_x", "+", "spacing", "[", "0", "]", "*", "terminations", "/", "2.", ",", "offsets", "[", "1", "]", "+", "spacing", "[", "1", "]", "*", "2.", "+", "length", ")" ]
Update the offsets
[ "Update", "the", "offsets" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L110-L114
BlueBrain/NeuroM
neurom/view/_dendrogram.py
_max_diameter
def _max_diameter(tree): '''Find max diameter in tree ''' return 2. * max(max(node.points[:, COLS.R]) for node in tree.ipreorder())
python
def _max_diameter(tree): return 2. * max(max(node.points[:, COLS.R]) for node in tree.ipreorder())
[ "def", "_max_diameter", "(", "tree", ")", ":", "return", "2.", "*", "max", "(", "max", "(", "node", ".", "points", "[", ":", ",", "COLS", ".", "R", "]", ")", "for", "node", "in", "tree", ".", "ipreorder", "(", ")", ")" ]
Find max diameter in tree
[ "Find", "max", "diameter", "in", "tree" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L117-L120
BlueBrain/NeuroM
neurom/view/_dendrogram.py
Dendrogram._generate_soma
def _generate_soma(self): '''soma''' radius = self._obj.soma.radius return _square_segment(radius, (0., -radius))
python
def _generate_soma(self): radius = self._obj.soma.radius return _square_segment(radius, (0., -radius))
[ "def", "_generate_soma", "(", "self", ")", ":", "radius", "=", "self", ".", "_obj", ".", "soma", ".", "radius", "return", "_square_segment", "(", "radius", ",", "(", "0.", ",", "-", "radius", ")", ")" ]
soma
[ "soma" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L160-L163
BlueBrain/NeuroM
neurom/view/_dendrogram.py
Dendrogram.generate
def generate(self): '''Generate dendrogram ''' offsets = (0., 0.) n_previous = 0 # set recursion limit with respect to # the max number of nodes on the trees old_depth = sys.getrecursionlimit() max_depth = old_depth if old_depth > self._max_rec_depth else self._max_rec_depth # TODO: This should be fixed so we don't set sys.setrecursionlimit at all sys.setrecursionlimit(max_depth) if isinstance(self._obj, Neurite): max_diameter = _max_diameter(self._obj.root_node) dummy_section = Tree() dummy_section.add_child(self._obj.root_node) self._generate_dendro(dummy_section, (max_diameter, 0.), offsets) self._groups.append((0, self._n)) self._dims.append(self._max_dims) else: for neurite in self._obj.neurites: neurite = neurite.root_node max_diameter = _max_diameter(neurite) dummy_section = Tree() dummy_section.add_child(neurite) self._generate_dendro(dummy_section, (max_diameter, 0.), offsets) # store in trees the indices for the slice which corresponds # to the current neurite self._groups.append((n_previous, self._n)) # store the max dims per neurite for view positioning self._dims.append(self._max_dims) # reset the max dimensions for the next tree in line self._max_dims = [0., 0.] # keep track of the next tree start index in list n_previous = self._n # set it back to its initial value sys.setrecursionlimit(old_depth)
python
def generate(self): offsets = (0., 0.) n_previous = 0 old_depth = sys.getrecursionlimit() max_depth = old_depth if old_depth > self._max_rec_depth else self._max_rec_depth sys.setrecursionlimit(max_depth) if isinstance(self._obj, Neurite): max_diameter = _max_diameter(self._obj.root_node) dummy_section = Tree() dummy_section.add_child(self._obj.root_node) self._generate_dendro(dummy_section, (max_diameter, 0.), offsets) self._groups.append((0, self._n)) self._dims.append(self._max_dims) else: for neurite in self._obj.neurites: neurite = neurite.root_node max_diameter = _max_diameter(neurite) dummy_section = Tree() dummy_section.add_child(neurite) self._generate_dendro(dummy_section, (max_diameter, 0.), offsets) self._groups.append((n_previous, self._n)) self._dims.append(self._max_dims) self._max_dims = [0., 0.] n_previous = self._n sys.setrecursionlimit(old_depth)
[ "def", "generate", "(", "self", ")", ":", "offsets", "=", "(", "0.", ",", "0.", ")", "n_previous", "=", "0", "# set recursion limit with respect to", "# the max number of nodes on the trees", "old_depth", "=", "sys", ".", "getrecursionlimit", "(", ")", "max_depth", "=", "old_depth", "if", "old_depth", ">", "self", ".", "_max_rec_depth", "else", "self", ".", "_max_rec_depth", "# TODO: This should be fixed so we don't set sys.setrecursionlimit at all", "sys", ".", "setrecursionlimit", "(", "max_depth", ")", "if", "isinstance", "(", "self", ".", "_obj", ",", "Neurite", ")", ":", "max_diameter", "=", "_max_diameter", "(", "self", ".", "_obj", ".", "root_node", ")", "dummy_section", "=", "Tree", "(", ")", "dummy_section", ".", "add_child", "(", "self", ".", "_obj", ".", "root_node", ")", "self", ".", "_generate_dendro", "(", "dummy_section", ",", "(", "max_diameter", ",", "0.", ")", ",", "offsets", ")", "self", ".", "_groups", ".", "append", "(", "(", "0", ",", "self", ".", "_n", ")", ")", "self", ".", "_dims", ".", "append", "(", "self", ".", "_max_dims", ")", "else", ":", "for", "neurite", "in", "self", ".", "_obj", ".", "neurites", ":", "neurite", "=", "neurite", ".", "root_node", "max_diameter", "=", "_max_diameter", "(", "neurite", ")", "dummy_section", "=", "Tree", "(", ")", "dummy_section", ".", "add_child", "(", "neurite", ")", "self", ".", "_generate_dendro", "(", "dummy_section", ",", "(", "max_diameter", ",", "0.", ")", ",", "offsets", ")", "# store in trees the indices for the slice which corresponds", "# to the current neurite", "self", ".", "_groups", ".", "append", "(", "(", "n_previous", ",", "self", ".", "_n", ")", ")", "# store the max dims per neurite for view positioning", "self", ".", "_dims", ".", "append", "(", "self", ".", "_max_dims", ")", "# reset the max dimensions for the next tree in line", "self", ".", "_max_dims", "=", "[", "0.", ",", "0.", "]", "# keep track of the next tree start index in list", "n_previous", "=", "self", ".", "_n", "# set it back to its initial value", "sys", ".", "setrecursionlimit", "(", "old_depth", ")" ]
Generate dendrogram
[ "Generate", "dendrogram" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L165-L216
BlueBrain/NeuroM
neurom/view/_dendrogram.py
Dendrogram._generate_dendro
def _generate_dendro(self, current_section, spacing, offsets): '''Recursive function for dendrogram line computations ''' max_dims = self._max_dims start_x = _spacingx(current_section, max_dims, offsets[0], spacing[0]) for child in current_section.children: segments = child.points # number of leaves in child terminations = _n_terminations(child) # segement lengths seg_lengths = np.linalg.norm(np.subtract(segments[:-1, COLS.XYZ], segments[1:, COLS.XYZ]), axis=1) # segment radii radii = np.vstack((segments[:-1, COLS.R], segments[1:, COLS.R])).T \ if self._show_diameters else np.zeros((seg_lengths.shape[0], 2)) y_offset = offsets[1] for i, slen in enumerate(seg_lengths): # offset update for the vertical segments new_offsets = _update_offsets(start_x, spacing, terminations, (offsets[0], y_offset), slen) # segments are drawn vertically, thus only y_offset changes from init offsets self._rectangles[self._n] = _vertical_segment((offsets[0], y_offset), new_offsets, spacing, radii[i, :]) self._n += 1 y_offset = new_offsets[1] if y_offset + spacing[1] * 2 + sum(seg_lengths) > max_dims[1]: max_dims[1] = y_offset + spacing[1] * 2. + sum(seg_lengths) self._max_dims = max_dims # recursive call to self. self._generate_dendro(child, spacing, new_offsets) # update the starting position for the next child start_x += terminations * spacing[0] # write the horizontal lines only for bifurcations, where the are actual horizontal # lines and not zero ones if offsets[0] != new_offsets[0]: # horizontal segment. Thickness is either 0 if show_diameters is false # or 1. if show_diameters is true self._rectangles[self._n] = _horizontal_segment(offsets, new_offsets, spacing, 0.) self._n += 1
python
def _generate_dendro(self, current_section, spacing, offsets): max_dims = self._max_dims start_x = _spacingx(current_section, max_dims, offsets[0], spacing[0]) for child in current_section.children: segments = child.points terminations = _n_terminations(child) seg_lengths = np.linalg.norm(np.subtract(segments[:-1, COLS.XYZ], segments[1:, COLS.XYZ]), axis=1) radii = np.vstack((segments[:-1, COLS.R], segments[1:, COLS.R])).T \ if self._show_diameters else np.zeros((seg_lengths.shape[0], 2)) y_offset = offsets[1] for i, slen in enumerate(seg_lengths): new_offsets = _update_offsets(start_x, spacing, terminations, (offsets[0], y_offset), slen) self._rectangles[self._n] = _vertical_segment((offsets[0], y_offset), new_offsets, spacing, radii[i, :]) self._n += 1 y_offset = new_offsets[1] if y_offset + spacing[1] * 2 + sum(seg_lengths) > max_dims[1]: max_dims[1] = y_offset + spacing[1] * 2. + sum(seg_lengths) self._max_dims = max_dims self._generate_dendro(child, spacing, new_offsets) start_x += terminations * spacing[0] if offsets[0] != new_offsets[0]: self._rectangles[self._n] = _horizontal_segment(offsets, new_offsets, spacing, 0.) self._n += 1
[ "def", "_generate_dendro", "(", "self", ",", "current_section", ",", "spacing", ",", "offsets", ")", ":", "max_dims", "=", "self", ".", "_max_dims", "start_x", "=", "_spacingx", "(", "current_section", ",", "max_dims", ",", "offsets", "[", "0", "]", ",", "spacing", "[", "0", "]", ")", "for", "child", "in", "current_section", ".", "children", ":", "segments", "=", "child", ".", "points", "# number of leaves in child", "terminations", "=", "_n_terminations", "(", "child", ")", "# segement lengths", "seg_lengths", "=", "np", ".", "linalg", ".", "norm", "(", "np", ".", "subtract", "(", "segments", "[", ":", "-", "1", ",", "COLS", ".", "XYZ", "]", ",", "segments", "[", "1", ":", ",", "COLS", ".", "XYZ", "]", ")", ",", "axis", "=", "1", ")", "# segment radii", "radii", "=", "np", ".", "vstack", "(", "(", "segments", "[", ":", "-", "1", ",", "COLS", ".", "R", "]", ",", "segments", "[", "1", ":", ",", "COLS", ".", "R", "]", ")", ")", ".", "T", "if", "self", ".", "_show_diameters", "else", "np", ".", "zeros", "(", "(", "seg_lengths", ".", "shape", "[", "0", "]", ",", "2", ")", ")", "y_offset", "=", "offsets", "[", "1", "]", "for", "i", ",", "slen", "in", "enumerate", "(", "seg_lengths", ")", ":", "# offset update for the vertical segments", "new_offsets", "=", "_update_offsets", "(", "start_x", ",", "spacing", ",", "terminations", ",", "(", "offsets", "[", "0", "]", ",", "y_offset", ")", ",", "slen", ")", "# segments are drawn vertically, thus only y_offset changes from init offsets", "self", ".", "_rectangles", "[", "self", ".", "_n", "]", "=", "_vertical_segment", "(", "(", "offsets", "[", "0", "]", ",", "y_offset", ")", ",", "new_offsets", ",", "spacing", ",", "radii", "[", "i", ",", ":", "]", ")", "self", ".", "_n", "+=", "1", "y_offset", "=", "new_offsets", "[", "1", "]", "if", "y_offset", "+", "spacing", "[", "1", "]", "*", "2", "+", "sum", "(", "seg_lengths", ")", ">", "max_dims", "[", "1", "]", ":", "max_dims", "[", "1", "]", "=", "y_offset", "+", "spacing", "[", "1", "]", "*", "2.", "+", "sum", "(", "seg_lengths", ")", "self", ".", "_max_dims", "=", "max_dims", "# recursive call to self.", "self", ".", "_generate_dendro", "(", "child", ",", "spacing", ",", "new_offsets", ")", "# update the starting position for the next child", "start_x", "+=", "terminations", "*", "spacing", "[", "0", "]", "# write the horizontal lines only for bifurcations, where the are actual horizontal", "# lines and not zero ones", "if", "offsets", "[", "0", "]", "!=", "new_offsets", "[", "0", "]", ":", "# horizontal segment. Thickness is either 0 if show_diameters is false", "# or 1. if show_diameters is true", "self", ".", "_rectangles", "[", "self", ".", "_n", "]", "=", "_horizontal_segment", "(", "offsets", ",", "new_offsets", ",", "spacing", ",", "0.", ")", "self", ".", "_n", "+=", "1" ]
Recursive function for dendrogram line computations
[ "Recursive", "function", "for", "dendrogram", "line", "computations" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L219-L270
BlueBrain/NeuroM
neurom/view/_dendrogram.py
Dendrogram.types
def types(self): ''' Returns an iterator over the types of the neurites in the object. If the object is a tree, then one value is returned. ''' neurites = self._obj.neurites if hasattr(self._obj, 'neurites') else (self._obj,) return (neu.type for neu in neurites)
python
def types(self): neurites = self._obj.neurites if hasattr(self._obj, 'neurites') else (self._obj,) return (neu.type for neu in neurites)
[ "def", "types", "(", "self", ")", ":", "neurites", "=", "self", ".", "_obj", ".", "neurites", "if", "hasattr", "(", "self", ".", "_obj", ",", "'neurites'", ")", "else", "(", "self", ".", "_obj", ",", ")", "return", "(", "neu", ".", "type", "for", "neu", "in", "neurites", ")" ]
Returns an iterator over the types of the neurites in the object. If the object is a tree, then one value is returned.
[ "Returns", "an", "iterator", "over", "the", "types", "of", "the", "neurites", "in", "the", "object", ".", "If", "the", "object", "is", "a", "tree", "then", "one", "value", "is", "returned", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/_dendrogram.py#L292-L297
BlueBrain/NeuroM
neurom/fst/__init__.py
register_neurite_feature
def register_neurite_feature(name, func): '''Register a feature to be applied to neurites Parameters: name: name of the feature, used for access via get() function. func: single parameter function of a neurite. ''' if name in NEURITEFEATURES: raise NeuroMError('Attempt to hide registered feature %s' % name) def _fun(neurites, neurite_type=_ntype.all): '''Wrap neurite function from outer scope and map into list''' return list(func(n) for n in _ineurites(neurites, filt=_is_type(neurite_type))) NEURONFEATURES[name] = _fun
python
def register_neurite_feature(name, func): if name in NEURITEFEATURES: raise NeuroMError('Attempt to hide registered feature %s' % name) def _fun(neurites, neurite_type=_ntype.all): return list(func(n) for n in _ineurites(neurites, filt=_is_type(neurite_type))) NEURONFEATURES[name] = _fun
[ "def", "register_neurite_feature", "(", "name", ",", "func", ")", ":", "if", "name", "in", "NEURITEFEATURES", ":", "raise", "NeuroMError", "(", "'Attempt to hide registered feature %s'", "%", "name", ")", "def", "_fun", "(", "neurites", ",", "neurite_type", "=", "_ntype", ".", "all", ")", ":", "'''Wrap neurite function from outer scope and map into list'''", "return", "list", "(", "func", "(", "n", ")", "for", "n", "in", "_ineurites", "(", "neurites", ",", "filt", "=", "_is_type", "(", "neurite_type", ")", ")", ")", "NEURONFEATURES", "[", "name", "]", "=", "_fun" ]
Register a feature to be applied to neurites Parameters: name: name of the feature, used for access via get() function. func: single parameter function of a neurite.
[ "Register", "a", "feature", "to", "be", "applied", "to", "neurites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/__init__.py#L108-L122
BlueBrain/NeuroM
neurom/fst/__init__.py
get
def get(feature, obj, **kwargs): '''Obtain a feature from a set of morphology objects Parameters: feature(string): feature to extract obj: a neuron, population or neurite tree **kwargs: parameters to forward to underlying worker functions Returns: features as a 1D or 2D numpy array. ''' feature = (NEURITEFEATURES[feature] if feature in NEURITEFEATURES else NEURONFEATURES[feature]) return _np.array(list(feature(obj, **kwargs)))
python
def get(feature, obj, **kwargs): feature = (NEURITEFEATURES[feature] if feature in NEURITEFEATURES else NEURONFEATURES[feature]) return _np.array(list(feature(obj, **kwargs)))
[ "def", "get", "(", "feature", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "feature", "=", "(", "NEURITEFEATURES", "[", "feature", "]", "if", "feature", "in", "NEURITEFEATURES", "else", "NEURONFEATURES", "[", "feature", "]", ")", "return", "_np", ".", "array", "(", "list", "(", "feature", "(", "obj", ",", "*", "*", "kwargs", ")", ")", ")" ]
Obtain a feature from a set of morphology objects Parameters: feature(string): feature to extract obj: a neuron, population or neurite tree **kwargs: parameters to forward to underlying worker functions Returns: features as a 1D or 2D numpy array.
[ "Obtain", "a", "feature", "from", "a", "set", "of", "morphology", "objects" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/__init__.py#L125-L141
BlueBrain/NeuroM
neurom/fst/__init__.py
_indent
def _indent(string, count): '''indent `string` by `count` * INDENT''' indent = _INDENT * count ret = indent + string.replace('\n', '\n' + indent) return ret.rstrip()
python
def _indent(string, count): indent = _INDENT * count ret = indent + string.replace('\n', '\n' + indent) return ret.rstrip()
[ "def", "_indent", "(", "string", ",", "count", ")", ":", "indent", "=", "_INDENT", "*", "count", "ret", "=", "indent", "+", "string", ".", "replace", "(", "'\\n'", ",", "'\\n'", "+", "indent", ")", "return", "ret", ".", "rstrip", "(", ")" ]
indent `string` by `count` * INDENT
[ "indent", "string", "by", "count", "*", "INDENT" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/__init__.py#L147-L151
BlueBrain/NeuroM
neurom/fst/__init__.py
_get_doc
def _get_doc(): '''Get a description of all the known available features''' def get_docstring(func): '''extract doctstring, if possible''' docstring = ':\n' if func.__doc__: docstring += _indent(func.__doc__, 2) return docstring ret = ['\nNeurite features (neurite, neuron, neuron population):'] ret.extend(_INDENT + '- ' + feature + get_docstring(func) for feature, func in sorted(NEURITEFEATURES.items())) ret.append('\nNeuron features (neuron, neuron population):') ret.extend(_INDENT + '- ' + feature + get_docstring(func) for feature, func in sorted(NEURONFEATURES.items())) return '\n'.join(ret)
python
def _get_doc(): def get_docstring(func): docstring = ':\n' if func.__doc__: docstring += _indent(func.__doc__, 2) return docstring ret = ['\nNeurite features (neurite, neuron, neuron population):'] ret.extend(_INDENT + '- ' + feature + get_docstring(func) for feature, func in sorted(NEURITEFEATURES.items())) ret.append('\nNeuron features (neuron, neuron population):') ret.extend(_INDENT + '- ' + feature + get_docstring(func) for feature, func in sorted(NEURONFEATURES.items())) return '\n'.join(ret)
[ "def", "_get_doc", "(", ")", ":", "def", "get_docstring", "(", "func", ")", ":", "'''extract doctstring, if possible'''", "docstring", "=", "':\\n'", "if", "func", ".", "__doc__", ":", "docstring", "+=", "_indent", "(", "func", ".", "__doc__", ",", "2", ")", "return", "docstring", "ret", "=", "[", "'\\nNeurite features (neurite, neuron, neuron population):'", "]", "ret", ".", "extend", "(", "_INDENT", "+", "'- '", "+", "feature", "+", "get_docstring", "(", "func", ")", "for", "feature", ",", "func", "in", "sorted", "(", "NEURITEFEATURES", ".", "items", "(", ")", ")", ")", "ret", ".", "append", "(", "'\\nNeuron features (neuron, neuron population):'", ")", "ret", ".", "extend", "(", "_INDENT", "+", "'- '", "+", "feature", "+", "get_docstring", "(", "func", ")", "for", "feature", ",", "func", "in", "sorted", "(", "NEURONFEATURES", ".", "items", "(", ")", ")", ")", "return", "'\\n'", ".", "join", "(", "ret", ")" ]
Get a description of all the known available features
[ "Get", "a", "description", "of", "all", "the", "known", "available", "features" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/__init__.py#L154-L171
BlueBrain/NeuroM
neurom/io/hdf5.py
read
def read(filename, remove_duplicates=False, data_wrapper=DataWrapper): '''Read a file and return a `data_wrapper'd` data * Tries to guess the format and the H5 version. * Unpacks the first block it finds out of ('repaired', 'unraveled', 'raw') Parameters: remove_duplicates: boolean, If True removes duplicate points from the beginning of each section. ''' with h5py.File(filename, mode='r') as h5file: version = get_version(h5file) if version == 'H5V1': points, groups = _unpack_v1(h5file) elif version == 'H5V2': stg = next(s for s in ('repaired', 'unraveled', 'raw') if s in h5file['neuron1']) points, groups = _unpack_v2(h5file, stage=stg) if remove_duplicates: points, groups = _remove_duplicate_points(points, groups) neuron_builder = BlockNeuronBuilder() points[:, POINT_DIAMETER] /= 2 # Store radius, not diameter for id_, row in enumerate(zip_longest(groups, groups[1:, GPFIRST], fillvalue=len(points))): (point_start, section_type, parent_id), point_end = row neuron_builder.add_section(id_, int(parent_id), int(section_type), points[point_start:point_end]) return neuron_builder.get_datawrapper(version, data_wrapper=data_wrapper)
python
def read(filename, remove_duplicates=False, data_wrapper=DataWrapper): with h5py.File(filename, mode='r') as h5file: version = get_version(h5file) if version == 'H5V1': points, groups = _unpack_v1(h5file) elif version == 'H5V2': stg = next(s for s in ('repaired', 'unraveled', 'raw') if s in h5file['neuron1']) points, groups = _unpack_v2(h5file, stage=stg) if remove_duplicates: points, groups = _remove_duplicate_points(points, groups) neuron_builder = BlockNeuronBuilder() points[:, POINT_DIAMETER] /= 2 for id_, row in enumerate(zip_longest(groups, groups[1:, GPFIRST], fillvalue=len(points))): (point_start, section_type, parent_id), point_end = row neuron_builder.add_section(id_, int(parent_id), int(section_type), points[point_start:point_end]) return neuron_builder.get_datawrapper(version, data_wrapper=data_wrapper)
[ "def", "read", "(", "filename", ",", "remove_duplicates", "=", "False", ",", "data_wrapper", "=", "DataWrapper", ")", ":", "with", "h5py", ".", "File", "(", "filename", ",", "mode", "=", "'r'", ")", "as", "h5file", ":", "version", "=", "get_version", "(", "h5file", ")", "if", "version", "==", "'H5V1'", ":", "points", ",", "groups", "=", "_unpack_v1", "(", "h5file", ")", "elif", "version", "==", "'H5V2'", ":", "stg", "=", "next", "(", "s", "for", "s", "in", "(", "'repaired'", ",", "'unraveled'", ",", "'raw'", ")", "if", "s", "in", "h5file", "[", "'neuron1'", "]", ")", "points", ",", "groups", "=", "_unpack_v2", "(", "h5file", ",", "stage", "=", "stg", ")", "if", "remove_duplicates", ":", "points", ",", "groups", "=", "_remove_duplicate_points", "(", "points", ",", "groups", ")", "neuron_builder", "=", "BlockNeuronBuilder", "(", ")", "points", "[", ":", ",", "POINT_DIAMETER", "]", "/=", "2", "# Store radius, not diameter", "for", "id_", ",", "row", "in", "enumerate", "(", "zip_longest", "(", "groups", ",", "groups", "[", "1", ":", ",", "GPFIRST", "]", ",", "fillvalue", "=", "len", "(", "points", ")", ")", ")", ":", "(", "point_start", ",", "section_type", ",", "parent_id", ")", ",", "point_end", "=", "row", "neuron_builder", ".", "add_section", "(", "id_", ",", "int", "(", "parent_id", ")", ",", "int", "(", "section_type", ")", ",", "points", "[", "point_start", ":", "point_end", "]", ")", "return", "neuron_builder", ".", "get_datawrapper", "(", "version", ",", "data_wrapper", "=", "data_wrapper", ")" ]
Read a file and return a `data_wrapper'd` data * Tries to guess the format and the H5 version. * Unpacks the first block it finds out of ('repaired', 'unraveled', 'raw') Parameters: remove_duplicates: boolean, If True removes duplicate points from the beginning of each section.
[ "Read", "a", "file", "and", "return", "a", "data_wrapper", "d", "data" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/hdf5.py#L67-L98
BlueBrain/NeuroM
neurom/io/hdf5.py
_remove_duplicate_points
def _remove_duplicate_points(points, groups): ''' Removes the duplicate points from the beginning of a section, if they are present in points-groups representation. Returns: points, groups with unique points. ''' group_initial_ids = groups[:, GPFIRST] to_be_reduced = np.zeros(len(group_initial_ids)) to_be_removed = [] for ig, g in enumerate(groups): iid, typ, pid = g[GPFIRST], g[GTYPE], g[GPID] # Remove first point from sections that are # not the root section, a soma, or a child of a soma if pid != -1 and typ != 1 and groups[pid][GTYPE] != 1: # Remove duplicate from list of points to_be_removed.append(iid) # Reduce the id of the following sections # in groups structure by one to_be_reduced[ig + 1:] += 1 groups[:, GPFIRST] = groups[:, GPFIRST] - to_be_reduced points = np.delete(points, to_be_removed, axis=0) return points, groups
python
def _remove_duplicate_points(points, groups): group_initial_ids = groups[:, GPFIRST] to_be_reduced = np.zeros(len(group_initial_ids)) to_be_removed = [] for ig, g in enumerate(groups): iid, typ, pid = g[GPFIRST], g[GTYPE], g[GPID] if pid != -1 and typ != 1 and groups[pid][GTYPE] != 1: to_be_removed.append(iid) to_be_reduced[ig + 1:] += 1 groups[:, GPFIRST] = groups[:, GPFIRST] - to_be_reduced points = np.delete(points, to_be_removed, axis=0) return points, groups
[ "def", "_remove_duplicate_points", "(", "points", ",", "groups", ")", ":", "group_initial_ids", "=", "groups", "[", ":", ",", "GPFIRST", "]", "to_be_reduced", "=", "np", ".", "zeros", "(", "len", "(", "group_initial_ids", ")", ")", "to_be_removed", "=", "[", "]", "for", "ig", ",", "g", "in", "enumerate", "(", "groups", ")", ":", "iid", ",", "typ", ",", "pid", "=", "g", "[", "GPFIRST", "]", ",", "g", "[", "GTYPE", "]", ",", "g", "[", "GPID", "]", "# Remove first point from sections that are", "# not the root section, a soma, or a child of a soma", "if", "pid", "!=", "-", "1", "and", "typ", "!=", "1", "and", "groups", "[", "pid", "]", "[", "GTYPE", "]", "!=", "1", ":", "# Remove duplicate from list of points", "to_be_removed", ".", "append", "(", "iid", ")", "# Reduce the id of the following sections", "# in groups structure by one", "to_be_reduced", "[", "ig", "+", "1", ":", "]", "+=", "1", "groups", "[", ":", ",", "GPFIRST", "]", "=", "groups", "[", ":", ",", "GPFIRST", "]", "-", "to_be_reduced", "points", "=", "np", ".", "delete", "(", "points", ",", "to_be_removed", ",", "axis", "=", "0", ")", "return", "points", ",", "groups" ]
Removes the duplicate points from the beginning of a section, if they are present in points-groups representation. Returns: points, groups with unique points.
[ "Removes", "the", "duplicate", "points", "from", "the", "beginning", "of", "a", "section", "if", "they", "are", "present", "in", "points", "-", "groups", "representation", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/hdf5.py#L101-L129
BlueBrain/NeuroM
neurom/io/hdf5.py
_unpack_v1
def _unpack_v1(h5file): '''Unpack groups from HDF5 v1 file''' points = np.array(h5file['points']) groups = np.array(h5file['structure']) return points, groups
python
def _unpack_v1(h5file): points = np.array(h5file['points']) groups = np.array(h5file['structure']) return points, groups
[ "def", "_unpack_v1", "(", "h5file", ")", ":", "points", "=", "np", ".", "array", "(", "h5file", "[", "'points'", "]", ")", "groups", "=", "np", ".", "array", "(", "h5file", "[", "'structure'", "]", ")", "return", "points", ",", "groups" ]
Unpack groups from HDF5 v1 file
[ "Unpack", "groups", "from", "HDF5", "v1", "file" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/hdf5.py#L132-L136
BlueBrain/NeuroM
neurom/io/hdf5.py
_unpack_v2
def _unpack_v2(h5file, stage): '''Unpack groups from HDF5 v2 file''' points = np.array(h5file['neuron1/%s/points' % stage]) # from documentation: The /neuron1/structure/unraveled reuses /neuron1/structure/raw groups_stage = stage if stage != 'unraveled' else 'raw' groups = np.array(h5file['neuron1/structure/%s' % groups_stage]) stypes = np.array(h5file['neuron1/structure/sectiontype']) groups = np.hstack([groups, stypes]) groups[:, [1, 2]] = groups[:, [2, 1]] return points, groups
python
def _unpack_v2(h5file, stage): points = np.array(h5file['neuron1/%s/points' % stage]) groups_stage = stage if stage != 'unraveled' else 'raw' groups = np.array(h5file['neuron1/structure/%s' % groups_stage]) stypes = np.array(h5file['neuron1/structure/sectiontype']) groups = np.hstack([groups, stypes]) groups[:, [1, 2]] = groups[:, [2, 1]] return points, groups
[ "def", "_unpack_v2", "(", "h5file", ",", "stage", ")", ":", "points", "=", "np", ".", "array", "(", "h5file", "[", "'neuron1/%s/points'", "%", "stage", "]", ")", "# from documentation: The /neuron1/structure/unraveled reuses /neuron1/structure/raw", "groups_stage", "=", "stage", "if", "stage", "!=", "'unraveled'", "else", "'raw'", "groups", "=", "np", ".", "array", "(", "h5file", "[", "'neuron1/structure/%s'", "%", "groups_stage", "]", ")", "stypes", "=", "np", ".", "array", "(", "h5file", "[", "'neuron1/structure/sectiontype'", "]", ")", "groups", "=", "np", ".", "hstack", "(", "[", "groups", ",", "stypes", "]", ")", "groups", "[", ":", ",", "[", "1", ",", "2", "]", "]", "=", "groups", "[", ":", ",", "[", "2", ",", "1", "]", "]", "return", "points", ",", "groups" ]
Unpack groups from HDF5 v2 file
[ "Unpack", "groups", "from", "HDF5", "v2", "file" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/hdf5.py#L139-L148
BlueBrain/NeuroM
neurom/stats.py
fit_results_to_dict
def fit_results_to_dict(fit_results, min_bound=None, max_bound=None): '''Create a JSON-comparable dict from a FitResults object Parameters: fit_results (FitResults): object containing fit parameters,\ errors and type min_bound: optional min value to add to dictionary if min isn't\ a fit parameter. max_bound: optional max value to add to dictionary if max isn't\ a fit parameter. Returns: JSON-compatible dictionary with fit results Note: Supported fit types: 'norm', 'expon', 'uniform' ''' type_map = {'norm': 'normal', 'expon': 'exponential', 'uniform': 'uniform'} param_map = {'uniform': lambda p: [('min', p[0]), ('max', p[0] + p[1])], 'norm': lambda p: [('mu', p[0]), ('sigma', p[1])], 'expon': lambda p: [('lambda', 1.0 / p[1])]} d = OrderedDict({'type': type_map[fit_results.type]}) d.update(param_map[fit_results.type](fit_results.params)) if min_bound is not None and 'min' not in d: d['min'] = min_bound if max_bound is not None and 'max' not in d: d['max'] = max_bound return d
python
def fit_results_to_dict(fit_results, min_bound=None, max_bound=None): type_map = {'norm': 'normal', 'expon': 'exponential', 'uniform': 'uniform'} param_map = {'uniform': lambda p: [('min', p[0]), ('max', p[0] + p[1])], 'norm': lambda p: [('mu', p[0]), ('sigma', p[1])], 'expon': lambda p: [('lambda', 1.0 / p[1])]} d = OrderedDict({'type': type_map[fit_results.type]}) d.update(param_map[fit_results.type](fit_results.params)) if min_bound is not None and 'min' not in d: d['min'] = min_bound if max_bound is not None and 'max' not in d: d['max'] = max_bound return d
[ "def", "fit_results_to_dict", "(", "fit_results", ",", "min_bound", "=", "None", ",", "max_bound", "=", "None", ")", ":", "type_map", "=", "{", "'norm'", ":", "'normal'", ",", "'expon'", ":", "'exponential'", ",", "'uniform'", ":", "'uniform'", "}", "param_map", "=", "{", "'uniform'", ":", "lambda", "p", ":", "[", "(", "'min'", ",", "p", "[", "0", "]", ")", ",", "(", "'max'", ",", "p", "[", "0", "]", "+", "p", "[", "1", "]", ")", "]", ",", "'norm'", ":", "lambda", "p", ":", "[", "(", "'mu'", ",", "p", "[", "0", "]", ")", ",", "(", "'sigma'", ",", "p", "[", "1", "]", ")", "]", ",", "'expon'", ":", "lambda", "p", ":", "[", "(", "'lambda'", ",", "1.0", "/", "p", "[", "1", "]", ")", "]", "}", "d", "=", "OrderedDict", "(", "{", "'type'", ":", "type_map", "[", "fit_results", ".", "type", "]", "}", ")", "d", ".", "update", "(", "param_map", "[", "fit_results", ".", "type", "]", "(", "fit_results", ".", "params", ")", ")", "if", "min_bound", "is", "not", "None", "and", "'min'", "not", "in", "d", ":", "d", "[", "'min'", "]", "=", "min_bound", "if", "max_bound", "is", "not", "None", "and", "'max'", "not", "in", "d", ":", "d", "[", "'max'", "]", "=", "max_bound", "return", "d" ]
Create a JSON-comparable dict from a FitResults object Parameters: fit_results (FitResults): object containing fit parameters,\ errors and type min_bound: optional min value to add to dictionary if min isn't\ a fit parameter. max_bound: optional max value to add to dictionary if max isn't\ a fit parameter. Returns: JSON-compatible dictionary with fit results Note: Supported fit types: 'norm', 'expon', 'uniform'
[ "Create", "a", "JSON", "-", "comparable", "dict", "from", "a", "FitResults", "object" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/stats.py#L60-L91
BlueBrain/NeuroM
neurom/stats.py
fit
def fit(data, distribution='norm'): '''Calculate the parameters of a fit of a distribution to a data set Parameters: data: array of data points to be fitted Options: distribution (str): type of distribution to fit. Default 'norm'. Returns: FitResults object with fitted parameters, errors and distribution type Note: Uses Kolmogorov-Smirnov test to estimate distance and p-value. ''' params = getattr(_st, distribution).fit(data) return FitResults(params, _st.kstest(data, distribution, params), distribution)
python
def fit(data, distribution='norm'): params = getattr(_st, distribution).fit(data) return FitResults(params, _st.kstest(data, distribution, params), distribution)
[ "def", "fit", "(", "data", ",", "distribution", "=", "'norm'", ")", ":", "params", "=", "getattr", "(", "_st", ",", "distribution", ")", ".", "fit", "(", "data", ")", "return", "FitResults", "(", "params", ",", "_st", ".", "kstest", "(", "data", ",", "distribution", ",", "params", ")", ",", "distribution", ")" ]
Calculate the parameters of a fit of a distribution to a data set Parameters: data: array of data points to be fitted Options: distribution (str): type of distribution to fit. Default 'norm'. Returns: FitResults object with fitted parameters, errors and distribution type Note: Uses Kolmogorov-Smirnov test to estimate distance and p-value.
[ "Calculate", "the", "parameters", "of", "a", "fit", "of", "a", "distribution", "to", "a", "data", "set" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/stats.py#L94-L110
BlueBrain/NeuroM
neurom/stats.py
optimal_distribution
def optimal_distribution(data, distr_to_check=('norm', 'expon', 'uniform')): '''Calculate the parameters of a fit of different distributions to a data set and returns the distribution of the minimal ks-distance. Parameters: data: array of data points to be fitted Options: distr_to_check: tuple of distributions to be checked Returns: FitResults object with fitted parameters, errors and distribution type\ of the fit with the smallest fit distance Note: Uses Kolmogorov-Smirnov test to estimate distance and p-value. ''' fit_results = [fit(data, d) for d in distr_to_check] return min(fit_results, key=lambda fit: fit.errs[0])
python
def optimal_distribution(data, distr_to_check=('norm', 'expon', 'uniform')): fit_results = [fit(data, d) for d in distr_to_check] return min(fit_results, key=lambda fit: fit.errs[0])
[ "def", "optimal_distribution", "(", "data", ",", "distr_to_check", "=", "(", "'norm'", ",", "'expon'", ",", "'uniform'", ")", ")", ":", "fit_results", "=", "[", "fit", "(", "data", ",", "d", ")", "for", "d", "in", "distr_to_check", "]", "return", "min", "(", "fit_results", ",", "key", "=", "lambda", "fit", ":", "fit", ".", "errs", "[", "0", "]", ")" ]
Calculate the parameters of a fit of different distributions to a data set and returns the distribution of the minimal ks-distance. Parameters: data: array of data points to be fitted Options: distr_to_check: tuple of distributions to be checked Returns: FitResults object with fitted parameters, errors and distribution type\ of the fit with the smallest fit distance Note: Uses Kolmogorov-Smirnov test to estimate distance and p-value.
[ "Calculate", "the", "parameters", "of", "a", "fit", "of", "different", "distributions", "to", "a", "data", "set", "and", "returns", "the", "distribution", "of", "the", "minimal", "ks", "-", "distance", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/stats.py#L113-L131
BlueBrain/NeuroM
neurom/stats.py
scalar_stats
def scalar_stats(data, functions=('min', 'max', 'mean', 'std')): '''Calculate the stats from the given numpy functions Parameters: data: array of data points to be used for the stats Options: functions: tuple of numpy stat functions to apply on data Returns: Dictionary with the name of the function as key and the result as the respective value ''' stats = {} for func in functions: stats[func] = getattr(np, func)(data) return stats
python
def scalar_stats(data, functions=('min', 'max', 'mean', 'std')): stats = {} for func in functions: stats[func] = getattr(np, func)(data) return stats
[ "def", "scalar_stats", "(", "data", ",", "functions", "=", "(", "'min'", ",", "'max'", ",", "'mean'", ",", "'std'", ")", ")", ":", "stats", "=", "{", "}", "for", "func", "in", "functions", ":", "stats", "[", "func", "]", "=", "getattr", "(", "np", ",", "func", ")", "(", "data", ")", "return", "stats" ]
Calculate the stats from the given numpy functions Parameters: data: array of data points to be used for the stats Options: functions: tuple of numpy stat functions to apply on data Returns: Dictionary with the name of the function as key and the result as the respective value
[ "Calculate", "the", "stats", "from", "the", "given", "numpy", "functions" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/stats.py#L134-L152
BlueBrain/NeuroM
neurom/stats.py
compare_two
def compare_two(data1, data2, test=StatTests.ks): '''Compares two distributions of data and assess two scores: a distance between them and a probability they are drawn from the same distribution. Parameters: data1: numpy array of dataset 1 data2: numpy array of dataset 2 test: Stat_tests\ Defines the statistical test to be used, based\ on the scipy available modules.\ Accepted tests: ks_2samp, wilcoxon, ttest Returns: dist: float\ High numbers define high dissimilarity between the two datasets p-value: float\ Small numbers define high probability the data come from\ same dataset. ''' results = getattr(_st, get_test(test))(data1, data2) Stats = namedtuple('Stats', ['dist', 'pvalue']) return Stats(*results)
python
def compare_two(data1, data2, test=StatTests.ks): results = getattr(_st, get_test(test))(data1, data2) Stats = namedtuple('Stats', ['dist', 'pvalue']) return Stats(*results)
[ "def", "compare_two", "(", "data1", ",", "data2", ",", "test", "=", "StatTests", ".", "ks", ")", ":", "results", "=", "getattr", "(", "_st", ",", "get_test", "(", "test", ")", ")", "(", "data1", ",", "data2", ")", "Stats", "=", "namedtuple", "(", "'Stats'", ",", "[", "'dist'", ",", "'pvalue'", "]", ")", "return", "Stats", "(", "*", "results", ")" ]
Compares two distributions of data and assess two scores: a distance between them and a probability they are drawn from the same distribution. Parameters: data1: numpy array of dataset 1 data2: numpy array of dataset 2 test: Stat_tests\ Defines the statistical test to be used, based\ on the scipy available modules.\ Accepted tests: ks_2samp, wilcoxon, ttest Returns: dist: float\ High numbers define high dissimilarity between the two datasets p-value: float\ Small numbers define high probability the data come from\ same dataset.
[ "Compares", "two", "distributions", "of", "data", "and", "assess", "two", "scores", ":", "a", "distance", "between", "them", "and", "a", "probability", "they", "are", "drawn", "from", "the", "same", "distribution", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/stats.py#L155-L179
BlueBrain/NeuroM
neurom/stats.py
total_score
def total_score(paired_dats, p=2, test=StatTests.ks): '''Calculates the p-norm of the distances that have been calculated from the statistical test that has been applied on all the paired datasets. Parameters: paired_dats: a list of tuples or where each tuple contains the paired data lists from two datasets Options: p : integer that defines the order of p-norm test: Stat_tests\ Defines the statistical test to be used, based\ on the scipy available modules.\ Accepted tests: ks_2samp, wilcoxon, ttest Returns: A float corresponding to the p-norm of the distances that have been calculated. 0 corresponds to high similarity while 1 to low. ''' scores = np.array([compare_two(fL1, fL2, test=test).dist for fL1, fL2 in paired_dats]) return np.linalg.norm(scores, p)
python
def total_score(paired_dats, p=2, test=StatTests.ks): scores = np.array([compare_two(fL1, fL2, test=test).dist for fL1, fL2 in paired_dats]) return np.linalg.norm(scores, p)
[ "def", "total_score", "(", "paired_dats", ",", "p", "=", "2", ",", "test", "=", "StatTests", ".", "ks", ")", ":", "scores", "=", "np", ".", "array", "(", "[", "compare_two", "(", "fL1", ",", "fL2", ",", "test", "=", "test", ")", ".", "dist", "for", "fL1", ",", "fL2", "in", "paired_dats", "]", ")", "return", "np", ".", "linalg", ".", "norm", "(", "scores", ",", "p", ")" ]
Calculates the p-norm of the distances that have been calculated from the statistical test that has been applied on all the paired datasets. Parameters: paired_dats: a list of tuples or where each tuple contains the paired data lists from two datasets Options: p : integer that defines the order of p-norm test: Stat_tests\ Defines the statistical test to be used, based\ on the scipy available modules.\ Accepted tests: ks_2samp, wilcoxon, ttest Returns: A float corresponding to the p-norm of the distances that have been calculated. 0 corresponds to high similarity while 1 to low.
[ "Calculates", "the", "p", "-", "norm", "of", "the", "distances", "that", "have", "been", "calculated", "from", "the", "statistical", "test", "that", "has", "been", "applied", "on", "all", "the", "paired", "datasets", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/stats.py#L182-L202
BlueBrain/NeuroM
neurom/core/_neuron.py
iter_neurites
def iter_neurites(obj, mapfun=None, filt=None, neurite_order=NeuriteIter.FileOrder): '''Iterator to a neurite, neuron or neuron population Applies optional neurite filter and mapping functions. Parameters: obj: a neurite, neuron or neuron population. mapfun: optional neurite mapping function. filt: optional neurite filter function. neurite_order (NeuriteIter): order upon which neurites should be iterated - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical Examples: Get the number of points in each neurite in a neuron population >>> from neurom.core import iter_neurites >>> n_points = [n for n in iter_neurites(pop, lambda x : len(x.points))] Get the number of points in each axon in a neuron population >>> import neurom as nm >>> from neurom.core import iter_neurites >>> filter = lambda n : n.type == nm.AXON >>> mapping = lambda n : len(n.points) >>> n_points = [n for n in iter_neurites(pop, mapping, filter)] ''' neurites = ((obj,) if isinstance(obj, Neurite) else obj.neurites if hasattr(obj, 'neurites') else obj) if neurite_order == NeuriteIter.NRN: last_position = max(NRN_ORDER.values()) + 1 neurites = sorted(neurites, key=lambda neurite: NRN_ORDER.get(neurite.type, last_position)) neurite_iter = iter(neurites) if filt is None else filter(filt, neurites) return neurite_iter if mapfun is None else map(mapfun, neurite_iter)
python
def iter_neurites(obj, mapfun=None, filt=None, neurite_order=NeuriteIter.FileOrder): neurites = ((obj,) if isinstance(obj, Neurite) else obj.neurites if hasattr(obj, 'neurites') else obj) if neurite_order == NeuriteIter.NRN: last_position = max(NRN_ORDER.values()) + 1 neurites = sorted(neurites, key=lambda neurite: NRN_ORDER.get(neurite.type, last_position)) neurite_iter = iter(neurites) if filt is None else filter(filt, neurites) return neurite_iter if mapfun is None else map(mapfun, neurite_iter)
[ "def", "iter_neurites", "(", "obj", ",", "mapfun", "=", "None", ",", "filt", "=", "None", ",", "neurite_order", "=", "NeuriteIter", ".", "FileOrder", ")", ":", "neurites", "=", "(", "(", "obj", ",", ")", "if", "isinstance", "(", "obj", ",", "Neurite", ")", "else", "obj", ".", "neurites", "if", "hasattr", "(", "obj", ",", "'neurites'", ")", "else", "obj", ")", "if", "neurite_order", "==", "NeuriteIter", ".", "NRN", ":", "last_position", "=", "max", "(", "NRN_ORDER", ".", "values", "(", ")", ")", "+", "1", "neurites", "=", "sorted", "(", "neurites", ",", "key", "=", "lambda", "neurite", ":", "NRN_ORDER", ".", "get", "(", "neurite", ".", "type", ",", "last_position", ")", ")", "neurite_iter", "=", "iter", "(", "neurites", ")", "if", "filt", "is", "None", "else", "filter", "(", "filt", ",", "neurites", ")", "return", "neurite_iter", "if", "mapfun", "is", "None", "else", "map", "(", "mapfun", ",", "neurite_iter", ")" ]
Iterator to a neurite, neuron or neuron population Applies optional neurite filter and mapping functions. Parameters: obj: a neurite, neuron or neuron population. mapfun: optional neurite mapping function. filt: optional neurite filter function. neurite_order (NeuriteIter): order upon which neurites should be iterated - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical Examples: Get the number of points in each neurite in a neuron population >>> from neurom.core import iter_neurites >>> n_points = [n for n in iter_neurites(pop, lambda x : len(x.points))] Get the number of points in each axon in a neuron population >>> import neurom as nm >>> from neurom.core import iter_neurites >>> filter = lambda n : n.type == nm.AXON >>> mapping = lambda n : len(n.points) >>> n_points = [n for n in iter_neurites(pop, mapping, filter)]
[ "Iterator", "to", "a", "neurite", "neuron", "or", "neuron", "population" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/_neuron.py#L54-L90
BlueBrain/NeuroM
neurom/core/_neuron.py
iter_sections
def iter_sections(neurites, iterator_type=Tree.ipreorder, neurite_filter=None, neurite_order=NeuriteIter.FileOrder): '''Iterator to the sections in a neurite, neuron or neuron population. Parameters: neurites: neuron, population, neurite, or iterable containing neurite objects iterator_type: section iteration order within a given neurite. Must be one of: Tree.ipreorder: Depth-first pre-order iteration of tree nodes Tree.ipreorder: Depth-first post-order iteration of tree nodes Tree.iupstream: Iterate from a tree node to the root nodes Tree.ibifurcation_point: Iterator to bifurcation points Tree.ileaf: Iterator to all leaves of a tree neurite_filter: optional top level filter on properties of neurite neurite objects. neurite_order (NeuriteIter): order upon which neurites should be iterated - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical Examples: Get the number of points in each section of all the axons in a neuron population >>> import neurom as nm >>> from neurom.core import ites_sections >>> filter = lambda n : n.type == nm.AXON >>> n_points = [len(s.points) for s in iter_sections(pop, neurite_filter=filter)] ''' return chain.from_iterable( iterator_type(neurite.root_node) for neurite in iter_neurites(neurites, filt=neurite_filter, neurite_order=neurite_order))
python
def iter_sections(neurites, iterator_type=Tree.ipreorder, neurite_filter=None, neurite_order=NeuriteIter.FileOrder): return chain.from_iterable( iterator_type(neurite.root_node) for neurite in iter_neurites(neurites, filt=neurite_filter, neurite_order=neurite_order))
[ "def", "iter_sections", "(", "neurites", ",", "iterator_type", "=", "Tree", ".", "ipreorder", ",", "neurite_filter", "=", "None", ",", "neurite_order", "=", "NeuriteIter", ".", "FileOrder", ")", ":", "return", "chain", ".", "from_iterable", "(", "iterator_type", "(", "neurite", ".", "root_node", ")", "for", "neurite", "in", "iter_neurites", "(", "neurites", ",", "filt", "=", "neurite_filter", ",", "neurite_order", "=", "neurite_order", ")", ")" ]
Iterator to the sections in a neurite, neuron or neuron population. Parameters: neurites: neuron, population, neurite, or iterable containing neurite objects iterator_type: section iteration order within a given neurite. Must be one of: Tree.ipreorder: Depth-first pre-order iteration of tree nodes Tree.ipreorder: Depth-first post-order iteration of tree nodes Tree.iupstream: Iterate from a tree node to the root nodes Tree.ibifurcation_point: Iterator to bifurcation points Tree.ileaf: Iterator to all leaves of a tree neurite_filter: optional top level filter on properties of neurite neurite objects. neurite_order (NeuriteIter): order upon which neurites should be iterated - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical Examples: Get the number of points in each section of all the axons in a neuron population >>> import neurom as nm >>> from neurom.core import ites_sections >>> filter = lambda n : n.type == nm.AXON >>> n_points = [len(s.points) for s in iter_sections(pop, neurite_filter=filter)]
[ "Iterator", "to", "the", "sections", "in", "a", "neurite", "neuron", "or", "neuron", "population", "." ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/_neuron.py#L93-L126
BlueBrain/NeuroM
neurom/core/_neuron.py
iter_segments
def iter_segments(obj, neurite_filter=None, neurite_order=NeuriteIter.FileOrder): '''Return an iterator to the segments in a collection of neurites Parameters: obj: neuron, population, neurite, section, or iterable containing neurite objects neurite_filter: optional top level filter on properties of neurite neurite objects neurite_order: order upon which neurite should be iterated. Values: - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical Note: This is a convenience function provided for generic access to neuron segments. It may have a performance overhead WRT custom-made segment analysis functions that leverage numpy and section-wise iteration. ''' sections = iter((obj,) if isinstance(obj, Section) else iter_sections(obj, neurite_filter=neurite_filter, neurite_order=neurite_order)) return chain.from_iterable(zip(sec.points[:-1], sec.points[1:]) for sec in sections)
python
def iter_segments(obj, neurite_filter=None, neurite_order=NeuriteIter.FileOrder): sections = iter((obj,) if isinstance(obj, Section) else iter_sections(obj, neurite_filter=neurite_filter, neurite_order=neurite_order)) return chain.from_iterable(zip(sec.points[:-1], sec.points[1:]) for sec in sections)
[ "def", "iter_segments", "(", "obj", ",", "neurite_filter", "=", "None", ",", "neurite_order", "=", "NeuriteIter", ".", "FileOrder", ")", ":", "sections", "=", "iter", "(", "(", "obj", ",", ")", "if", "isinstance", "(", "obj", ",", "Section", ")", "else", "iter_sections", "(", "obj", ",", "neurite_filter", "=", "neurite_filter", ",", "neurite_order", "=", "neurite_order", ")", ")", "return", "chain", ".", "from_iterable", "(", "zip", "(", "sec", ".", "points", "[", ":", "-", "1", "]", ",", "sec", ".", "points", "[", "1", ":", "]", ")", "for", "sec", "in", "sections", ")" ]
Return an iterator to the segments in a collection of neurites Parameters: obj: neuron, population, neurite, section, or iterable containing neurite objects neurite_filter: optional top level filter on properties of neurite neurite objects neurite_order: order upon which neurite should be iterated. Values: - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical Note: This is a convenience function provided for generic access to neuron segments. It may have a performance overhead WRT custom-made segment analysis functions that leverage numpy and section-wise iteration.
[ "Return", "an", "iterator", "to", "the", "segments", "in", "a", "collection", "of", "neurites" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/_neuron.py#L129-L150
BlueBrain/NeuroM
neurom/core/_neuron.py
graft_neuron
def graft_neuron(root_section): '''Returns a neuron starting at root_section''' assert isinstance(root_section, Section) return Neuron(soma=Soma(root_section.points[:1]), neurites=[Neurite(root_section)])
python
def graft_neuron(root_section): assert isinstance(root_section, Section) return Neuron(soma=Soma(root_section.points[:1]), neurites=[Neurite(root_section)])
[ "def", "graft_neuron", "(", "root_section", ")", ":", "assert", "isinstance", "(", "root_section", ",", "Section", ")", "return", "Neuron", "(", "soma", "=", "Soma", "(", "root_section", ".", "points", "[", ":", "1", "]", ")", ",", "neurites", "=", "[", "Neurite", "(", "root_section", ")", "]", ")" ]
Returns a neuron starting at root_section
[ "Returns", "a", "neuron", "starting", "at", "root_section" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/_neuron.py#L153-L156
BlueBrain/NeuroM
neurom/core/_neuron.py
Neurite.points
def points(self): '''Return unordered array with all the points in this neurite''' # add all points in a section except the first one, which is a duplicate _pts = [v for s in self.root_node.ipreorder() for v in s.points[1:, COLS.XYZR]] # except for the very first point, which is not a duplicate _pts.insert(0, self.root_node.points[0][COLS.XYZR]) return np.array(_pts)
python
def points(self): _pts = [v for s in self.root_node.ipreorder() for v in s.points[1:, COLS.XYZR]] _pts.insert(0, self.root_node.points[0][COLS.XYZR]) return np.array(_pts)
[ "def", "points", "(", "self", ")", ":", "# add all points in a section except the first one, which is a duplicate", "_pts", "=", "[", "v", "for", "s", "in", "self", ".", "root_node", ".", "ipreorder", "(", ")", "for", "v", "in", "s", ".", "points", "[", "1", ":", ",", "COLS", ".", "XYZR", "]", "]", "# except for the very first point, which is not a duplicate", "_pts", ".", "insert", "(", "0", ",", "self", ".", "root_node", ".", "points", "[", "0", "]", "[", "COLS", ".", "XYZR", "]", ")", "return", "np", ".", "array", "(", "_pts", ")" ]
Return unordered array with all the points in this neurite
[ "Return", "unordered", "array", "with", "all", "the", "points", "in", "this", "neurite" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/_neuron.py#L211-L218
BlueBrain/NeuroM
neurom/core/_neuron.py
Neurite.transform
def transform(self, trans): '''Return a copy of this neurite with a 3D transformation applied''' clone = deepcopy(self) for n in clone.iter_sections(): n.points[:, 0:3] = trans(n.points[:, 0:3]) return clone
python
def transform(self, trans): clone = deepcopy(self) for n in clone.iter_sections(): n.points[:, 0:3] = trans(n.points[:, 0:3]) return clone
[ "def", "transform", "(", "self", ",", "trans", ")", ":", "clone", "=", "deepcopy", "(", "self", ")", "for", "n", "in", "clone", ".", "iter_sections", "(", ")", ":", "n", ".", "points", "[", ":", ",", "0", ":", "3", "]", "=", "trans", "(", "n", ".", "points", "[", ":", ",", "0", ":", "3", "]", ")", "return", "clone" ]
Return a copy of this neurite with a 3D transformation applied
[ "Return", "a", "copy", "of", "this", "neurite", "with", "a", "3D", "transformation", "applied" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/_neuron.py#L247-L253
BlueBrain/NeuroM
neurom/core/_neuron.py
Neurite.iter_sections
def iter_sections(self, order=Tree.ipreorder, neurite_order=NeuriteIter.FileOrder): '''iteration over section nodes Parameters: order: section iteration order within a given neurite. Must be one of: Tree.ipreorder: Depth-first pre-order iteration of tree nodes Tree.ipreorder: Depth-first post-order iteration of tree nodes Tree.iupstream: Iterate from a tree node to the root nodes Tree.ibifurcation_point: Iterator to bifurcation points Tree.ileaf: Iterator to all leaves of a tree neurite_order: order upon which neurites should be iterated. Values: - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical ''' return iter_sections(self, iterator_type=order, neurite_order=neurite_order)
python
def iter_sections(self, order=Tree.ipreorder, neurite_order=NeuriteIter.FileOrder): return iter_sections(self, iterator_type=order, neurite_order=neurite_order)
[ "def", "iter_sections", "(", "self", ",", "order", "=", "Tree", ".", "ipreorder", ",", "neurite_order", "=", "NeuriteIter", ".", "FileOrder", ")", ":", "return", "iter_sections", "(", "self", ",", "iterator_type", "=", "order", ",", "neurite_order", "=", "neurite_order", ")" ]
iteration over section nodes Parameters: order: section iteration order within a given neurite. Must be one of: Tree.ipreorder: Depth-first pre-order iteration of tree nodes Tree.ipreorder: Depth-first post-order iteration of tree nodes Tree.iupstream: Iterate from a tree node to the root nodes Tree.ibifurcation_point: Iterator to bifurcation points Tree.ileaf: Iterator to all leaves of a tree neurite_order: order upon which neurites should be iterated. Values: - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical
[ "iteration", "over", "section", "nodes" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/_neuron.py#L255-L270
BlueBrain/NeuroM
neurom/apps/morph_stats.py
eval_stats
def eval_stats(values, mode): '''Extract a summary statistic from an array of list of values Parameters: values: numpy array of values mode: summary stat to extract. One of ['min', 'max', 'median', 'mean', 'std', 'raw'] Note: fails silently if values is empty, and None is returned ''' if mode == 'raw': return values.tolist() if mode == 'total': mode = 'sum' try: return getattr(np, mode)(values, axis=0) except ValueError: pass return None
python
def eval_stats(values, mode): if mode == 'raw': return values.tolist() if mode == 'total': mode = 'sum' try: return getattr(np, mode)(values, axis=0) except ValueError: pass return None
[ "def", "eval_stats", "(", "values", ",", "mode", ")", ":", "if", "mode", "==", "'raw'", ":", "return", "values", ".", "tolist", "(", ")", "if", "mode", "==", "'total'", ":", "mode", "=", "'sum'", "try", ":", "return", "getattr", "(", "np", ",", "mode", ")", "(", "values", ",", "axis", "=", "0", ")", "except", "ValueError", ":", "pass", "return", "None" ]
Extract a summary statistic from an array of list of values Parameters: values: numpy array of values mode: summary stat to extract. One of ['min', 'max', 'median', 'mean', 'std', 'raw'] Note: fails silently if values is empty, and None is returned
[ "Extract", "a", "summary", "statistic", "from", "an", "array", "of", "list", "of", "values" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/morph_stats.py#L40-L59
BlueBrain/NeuroM
neurom/apps/morph_stats.py
_stat_name
def _stat_name(feat_name, stat_mode): '''Set stat name based on feature name and stat mode''' if feat_name[-1] == 's': feat_name = feat_name[:-1] if feat_name == 'soma_radii': feat_name = 'soma_radius' if stat_mode == 'raw': return feat_name return '%s_%s' % (stat_mode, feat_name)
python
def _stat_name(feat_name, stat_mode): if feat_name[-1] == 's': feat_name = feat_name[:-1] if feat_name == 'soma_radii': feat_name = 'soma_radius' if stat_mode == 'raw': return feat_name return '%s_%s' % (stat_mode, feat_name)
[ "def", "_stat_name", "(", "feat_name", ",", "stat_mode", ")", ":", "if", "feat_name", "[", "-", "1", "]", "==", "'s'", ":", "feat_name", "=", "feat_name", "[", ":", "-", "1", "]", "if", "feat_name", "==", "'soma_radii'", ":", "feat_name", "=", "'soma_radius'", "if", "stat_mode", "==", "'raw'", ":", "return", "feat_name", "return", "'%s_%s'", "%", "(", "stat_mode", ",", "feat_name", ")" ]
Set stat name based on feature name and stat mode
[ "Set", "stat", "name", "based", "on", "feature", "name", "and", "stat", "mode" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/morph_stats.py#L62-L71
BlueBrain/NeuroM
neurom/apps/morph_stats.py
extract_stats
def extract_stats(neurons, config): '''Extract stats from neurons''' stats = defaultdict(dict) for ns, modes in config['neurite'].items(): for n in config['neurite_type']: n = _NEURITE_MAP[n] for mode in modes: stat_name = _stat_name(ns, mode) stat = eval_stats(nm.get(ns, neurons, neurite_type=n), mode) if stat is None or not stat.shape: stats[n.name][stat_name] = stat else: assert stat.shape in ((3, ), ), \ 'Statistic must create a 1x3 result' for i, suffix in enumerate('XYZ'): compound_stat_name = stat_name + '_' + suffix stats[n.name][compound_stat_name] = stat[i] for ns, modes in config['neuron'].items(): for mode in modes: stat_name = _stat_name(ns, mode) stats[stat_name] = eval_stats(nm.get(ns, neurons), mode) return stats
python
def extract_stats(neurons, config): stats = defaultdict(dict) for ns, modes in config['neurite'].items(): for n in config['neurite_type']: n = _NEURITE_MAP[n] for mode in modes: stat_name = _stat_name(ns, mode) stat = eval_stats(nm.get(ns, neurons, neurite_type=n), mode) if stat is None or not stat.shape: stats[n.name][stat_name] = stat else: assert stat.shape in ((3, ), ), \ 'Statistic must create a 1x3 result' for i, suffix in enumerate('XYZ'): compound_stat_name = stat_name + '_' + suffix stats[n.name][compound_stat_name] = stat[i] for ns, modes in config['neuron'].items(): for mode in modes: stat_name = _stat_name(ns, mode) stats[stat_name] = eval_stats(nm.get(ns, neurons), mode) return stats
[ "def", "extract_stats", "(", "neurons", ",", "config", ")", ":", "stats", "=", "defaultdict", "(", "dict", ")", "for", "ns", ",", "modes", "in", "config", "[", "'neurite'", "]", ".", "items", "(", ")", ":", "for", "n", "in", "config", "[", "'neurite_type'", "]", ":", "n", "=", "_NEURITE_MAP", "[", "n", "]", "for", "mode", "in", "modes", ":", "stat_name", "=", "_stat_name", "(", "ns", ",", "mode", ")", "stat", "=", "eval_stats", "(", "nm", ".", "get", "(", "ns", ",", "neurons", ",", "neurite_type", "=", "n", ")", ",", "mode", ")", "if", "stat", "is", "None", "or", "not", "stat", ".", "shape", ":", "stats", "[", "n", ".", "name", "]", "[", "stat_name", "]", "=", "stat", "else", ":", "assert", "stat", ".", "shape", "in", "(", "(", "3", ",", ")", ",", ")", ",", "'Statistic must create a 1x3 result'", "for", "i", ",", "suffix", "in", "enumerate", "(", "'XYZ'", ")", ":", "compound_stat_name", "=", "stat_name", "+", "'_'", "+", "suffix", "stats", "[", "n", ".", "name", "]", "[", "compound_stat_name", "]", "=", "stat", "[", "i", "]", "for", "ns", ",", "modes", "in", "config", "[", "'neuron'", "]", ".", "items", "(", ")", ":", "for", "mode", "in", "modes", ":", "stat_name", "=", "_stat_name", "(", "ns", ",", "mode", ")", "stats", "[", "stat_name", "]", "=", "eval_stats", "(", "nm", ".", "get", "(", "ns", ",", "neurons", ")", ",", "mode", ")", "return", "stats" ]
Extract stats from neurons
[ "Extract", "stats", "from", "neurons" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/morph_stats.py#L74-L100
BlueBrain/NeuroM
neurom/apps/morph_stats.py
get_header
def get_header(results): '''Extracts the headers, using the first value in the dict as the template''' ret = ['name', ] values = next(iter(results.values())) for k, v in values.items(): if isinstance(v, dict): for metric in v.keys(): ret.append('%s:%s' % (k, metric)) else: ret.append(k) return ret
python
def get_header(results): ret = ['name', ] values = next(iter(results.values())) for k, v in values.items(): if isinstance(v, dict): for metric in v.keys(): ret.append('%s:%s' % (k, metric)) else: ret.append(k) return ret
[ "def", "get_header", "(", "results", ")", ":", "ret", "=", "[", "'name'", ",", "]", "values", "=", "next", "(", "iter", "(", "results", ".", "values", "(", ")", ")", ")", "for", "k", ",", "v", "in", "values", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "for", "metric", "in", "v", ".", "keys", "(", ")", ":", "ret", ".", "append", "(", "'%s:%s'", "%", "(", "k", ",", "metric", ")", ")", "else", ":", "ret", ".", "append", "(", "k", ")", "return", "ret" ]
Extracts the headers, using the first value in the dict as the template
[ "Extracts", "the", "headers", "using", "the", "first", "value", "in", "the", "dict", "as", "the", "template" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/morph_stats.py#L103-L113
BlueBrain/NeuroM
neurom/apps/morph_stats.py
generate_flattened_dict
def generate_flattened_dict(headers, results): '''extract from results the fields in the headers list''' for name, values in results.items(): row = [] for header in headers: if header == 'name': row.append(name) elif ':' in header: neurite_type, metric = header.split(':') row.append(values[neurite_type][metric]) else: row.append(values[header]) yield row
python
def generate_flattened_dict(headers, results): for name, values in results.items(): row = [] for header in headers: if header == 'name': row.append(name) elif ':' in header: neurite_type, metric = header.split(':') row.append(values[neurite_type][metric]) else: row.append(values[header]) yield row
[ "def", "generate_flattened_dict", "(", "headers", ",", "results", ")", ":", "for", "name", ",", "values", "in", "results", ".", "items", "(", ")", ":", "row", "=", "[", "]", "for", "header", "in", "headers", ":", "if", "header", "==", "'name'", ":", "row", ".", "append", "(", "name", ")", "elif", "':'", "in", "header", ":", "neurite_type", ",", "metric", "=", "header", ".", "split", "(", "':'", ")", "row", ".", "append", "(", "values", "[", "neurite_type", "]", "[", "metric", "]", ")", "else", ":", "row", ".", "append", "(", "values", "[", "header", "]", ")", "yield", "row" ]
extract from results the fields in the headers list
[ "extract", "from", "results", "the", "fields", "in", "the", "headers", "list" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/morph_stats.py#L116-L128
BlueBrain/NeuroM
neurom/core/tree.py
Tree.add_child
def add_child(self, tree): '''Add a child to the list of this tree's children This tree becomes the added tree's parent ''' tree.parent = self self.children.append(tree) return tree
python
def add_child(self, tree): tree.parent = self self.children.append(tree) return tree
[ "def", "add_child", "(", "self", ",", "tree", ")", ":", "tree", ".", "parent", "=", "self", "self", ".", "children", ".", "append", "(", "tree", ")", "return", "tree" ]
Add a child to the list of this tree's children This tree becomes the added tree's parent
[ "Add", "a", "child", "to", "the", "list", "of", "this", "tree", "s", "children" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/tree.py#L41-L48
BlueBrain/NeuroM
neurom/core/tree.py
Tree.ipreorder
def ipreorder(self): '''Depth-first pre-order iteration of tree nodes''' children = deque((self, )) while children: cur_node = children.pop() children.extend(reversed(cur_node.children)) yield cur_node
python
def ipreorder(self): children = deque((self, )) while children: cur_node = children.pop() children.extend(reversed(cur_node.children)) yield cur_node
[ "def", "ipreorder", "(", "self", ")", ":", "children", "=", "deque", "(", "(", "self", ",", ")", ")", "while", "children", ":", "cur_node", "=", "children", ".", "pop", "(", ")", "children", ".", "extend", "(", "reversed", "(", "cur_node", ".", "children", ")", ")", "yield", "cur_node" ]
Depth-first pre-order iteration of tree nodes
[ "Depth", "-", "first", "pre", "-", "order", "iteration", "of", "tree", "nodes" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/tree.py#L66-L72
BlueBrain/NeuroM
neurom/core/tree.py
Tree.ipostorder
def ipostorder(self): '''Depth-first post-order iteration of tree nodes''' children = [self, ] seen = set() while children: cur_node = children[-1] if cur_node not in seen: seen.add(cur_node) children.extend(reversed(cur_node.children)) else: children.pop() yield cur_node
python
def ipostorder(self): children = [self, ] seen = set() while children: cur_node = children[-1] if cur_node not in seen: seen.add(cur_node) children.extend(reversed(cur_node.children)) else: children.pop() yield cur_node
[ "def", "ipostorder", "(", "self", ")", ":", "children", "=", "[", "self", ",", "]", "seen", "=", "set", "(", ")", "while", "children", ":", "cur_node", "=", "children", "[", "-", "1", "]", "if", "cur_node", "not", "in", "seen", ":", "seen", ".", "add", "(", "cur_node", ")", "children", ".", "extend", "(", "reversed", "(", "cur_node", ".", "children", ")", ")", "else", ":", "children", ".", "pop", "(", ")", "yield", "cur_node" ]
Depth-first post-order iteration of tree nodes
[ "Depth", "-", "first", "post", "-", "order", "iteration", "of", "tree", "nodes" ]
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/tree.py#L74-L85