repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 39
1.84M
| func_code_tokens
listlengths 15
672k
| func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
listlengths 1
3.92k
| split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|---|---|---|
BlueBrain/NeuroM
|
neurom/utils.py
|
deprecated
|
def deprecated(fun_name=None, msg=""):
'''Issue a deprecation warning for a function'''
def _deprecated(fun):
'''Issue a deprecation warning for a function'''
@wraps(fun)
def _wrapper(*args, **kwargs):
'''Issue deprecation warning and forward arguments to fun'''
name = fun_name if fun_name is not None else fun.__name__
_warn_deprecated('Call to deprecated function %s. %s' % (name, msg))
return fun(*args, **kwargs)
return _wrapper
return _deprecated
|
python
|
def deprecated(fun_name=None, msg=""):
def _deprecated(fun):
@wraps(fun)
def _wrapper(*args, **kwargs):
name = fun_name if fun_name is not None else fun.__name__
_warn_deprecated('Call to deprecated function %s. %s' % (name, msg))
return fun(*args, **kwargs)
return _wrapper
return _deprecated
|
[
"def",
"deprecated",
"(",
"fun_name",
"=",
"None",
",",
"msg",
"=",
"\"\"",
")",
":",
"def",
"_deprecated",
"(",
"fun",
")",
":",
"'''Issue a deprecation warning for a function'''",
"@",
"wraps",
"(",
"fun",
")",
"def",
"_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"'''Issue deprecation warning and forward arguments to fun'''",
"name",
"=",
"fun_name",
"if",
"fun_name",
"is",
"not",
"None",
"else",
"fun",
".",
"__name__",
"_warn_deprecated",
"(",
"'Call to deprecated function %s. %s'",
"%",
"(",
"name",
",",
"msg",
")",
")",
"return",
"fun",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_wrapper",
"return",
"_deprecated"
] |
Issue a deprecation warning for a function
|
[
"Issue",
"a",
"deprecation",
"warning",
"for",
"a",
"function"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/utils.py#L86-L99
|
BlueBrain/NeuroM
|
neurom/check/__init__.py
|
check_wrapper
|
def check_wrapper(fun):
'''Decorate a checking function'''
@wraps(fun)
def _wrapper(*args, **kwargs):
'''Sets the title property of the result of running a checker'''
title = fun.__name__.replace('_', ' ').capitalize()
result = fun(*args, **kwargs)
result.title = title
return result
return _wrapper
|
python
|
def check_wrapper(fun):
@wraps(fun)
def _wrapper(*args, **kwargs):
title = fun.__name__.replace('_', ' ').capitalize()
result = fun(*args, **kwargs)
result.title = title
return result
return _wrapper
|
[
"def",
"check_wrapper",
"(",
"fun",
")",
":",
"@",
"wraps",
"(",
"fun",
")",
"def",
"_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"'''Sets the title property of the result of running a checker'''",
"title",
"=",
"fun",
".",
"__name__",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
".",
"capitalize",
"(",
")",
"result",
"=",
"fun",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"result",
".",
"title",
"=",
"title",
"return",
"result",
"return",
"_wrapper"
] |
Decorate a checking function
|
[
"Decorate",
"a",
"checking",
"function"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/__init__.py#L34-L44
|
BlueBrain/NeuroM
|
neurom/check/runner.py
|
CheckRunner.run
|
def run(self, path):
'''Test a bunch of files and return a summary JSON report'''
SEPARATOR = '=' * 40
summary = {}
res = True
for _f in utils.get_files_by_path(path):
L.info(SEPARATOR)
status, summ = self._check_file(_f)
res &= status
if summ is not None:
summary.update(summ)
L.info(SEPARATOR)
status = 'PASS' if res else 'FAIL'
return {'files': summary, 'STATUS': status}
|
python
|
def run(self, path):
SEPARATOR = '=' * 40
summary = {}
res = True
for _f in utils.get_files_by_path(path):
L.info(SEPARATOR)
status, summ = self._check_file(_f)
res &= status
if summ is not None:
summary.update(summ)
L.info(SEPARATOR)
status = 'PASS' if res else 'FAIL'
return {'files': summary, 'STATUS': status}
|
[
"def",
"run",
"(",
"self",
",",
"path",
")",
":",
"SEPARATOR",
"=",
"'='",
"*",
"40",
"summary",
"=",
"{",
"}",
"res",
"=",
"True",
"for",
"_f",
"in",
"utils",
".",
"get_files_by_path",
"(",
"path",
")",
":",
"L",
".",
"info",
"(",
"SEPARATOR",
")",
"status",
",",
"summ",
"=",
"self",
".",
"_check_file",
"(",
"_f",
")",
"res",
"&=",
"status",
"if",
"summ",
"is",
"not",
"None",
":",
"summary",
".",
"update",
"(",
"summ",
")",
"L",
".",
"info",
"(",
"SEPARATOR",
")",
"status",
"=",
"'PASS'",
"if",
"res",
"else",
"'FAIL'",
"return",
"{",
"'files'",
":",
"summary",
",",
"'STATUS'",
":",
"status",
"}"
] |
Test a bunch of files and return a summary JSON report
|
[
"Test",
"a",
"bunch",
"of",
"files",
"and",
"return",
"a",
"summary",
"JSON",
"report"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/runner.py#L53-L71
|
BlueBrain/NeuroM
|
neurom/check/runner.py
|
CheckRunner._do_check
|
def _do_check(self, obj, check_module, check_str):
'''Run a check function on obj'''
opts = self._config['options']
if check_str in opts:
fargs = opts[check_str]
if isinstance(fargs, list):
out = check_wrapper(getattr(check_module, check_str))(obj, *fargs)
else:
out = check_wrapper(getattr(check_module, check_str))(obj, fargs)
else:
out = check_wrapper(getattr(check_module, check_str))(obj)
try:
if out.info:
L.debug('%s: %d failing ids detected: %s',
out.title, len(out.info), out.info)
except TypeError: # pragma: no cover
pass
return out
|
python
|
def _do_check(self, obj, check_module, check_str):
opts = self._config['options']
if check_str in opts:
fargs = opts[check_str]
if isinstance(fargs, list):
out = check_wrapper(getattr(check_module, check_str))(obj, *fargs)
else:
out = check_wrapper(getattr(check_module, check_str))(obj, fargs)
else:
out = check_wrapper(getattr(check_module, check_str))(obj)
try:
if out.info:
L.debug('%s: %d failing ids detected: %s',
out.title, len(out.info), out.info)
except TypeError:
pass
return out
|
[
"def",
"_do_check",
"(",
"self",
",",
"obj",
",",
"check_module",
",",
"check_str",
")",
":",
"opts",
"=",
"self",
".",
"_config",
"[",
"'options'",
"]",
"if",
"check_str",
"in",
"opts",
":",
"fargs",
"=",
"opts",
"[",
"check_str",
"]",
"if",
"isinstance",
"(",
"fargs",
",",
"list",
")",
":",
"out",
"=",
"check_wrapper",
"(",
"getattr",
"(",
"check_module",
",",
"check_str",
")",
")",
"(",
"obj",
",",
"*",
"fargs",
")",
"else",
":",
"out",
"=",
"check_wrapper",
"(",
"getattr",
"(",
"check_module",
",",
"check_str",
")",
")",
"(",
"obj",
",",
"fargs",
")",
"else",
":",
"out",
"=",
"check_wrapper",
"(",
"getattr",
"(",
"check_module",
",",
"check_str",
")",
")",
"(",
"obj",
")",
"try",
":",
"if",
"out",
".",
"info",
":",
"L",
".",
"debug",
"(",
"'%s: %d failing ids detected: %s'",
",",
"out",
".",
"title",
",",
"len",
"(",
"out",
".",
"info",
")",
",",
"out",
".",
"info",
")",
"except",
"TypeError",
":",
"# pragma: no cover",
"pass",
"return",
"out"
] |
Run a check function on obj
|
[
"Run",
"a",
"check",
"function",
"on",
"obj"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/runner.py#L73-L92
|
BlueBrain/NeuroM
|
neurom/check/runner.py
|
CheckRunner._check_loop
|
def _check_loop(self, obj, check_mod_str):
'''Run all the checks in a check_module'''
check_module = self._check_modules[check_mod_str]
checks = self._config['checks'][check_mod_str]
result = True
summary = OrderedDict()
for check in checks:
ok = self._do_check(obj, check_module, check)
summary[ok.title] = ok.status
result &= ok.status
return result, summary
|
python
|
def _check_loop(self, obj, check_mod_str):
check_module = self._check_modules[check_mod_str]
checks = self._config['checks'][check_mod_str]
result = True
summary = OrderedDict()
for check in checks:
ok = self._do_check(obj, check_module, check)
summary[ok.title] = ok.status
result &= ok.status
return result, summary
|
[
"def",
"_check_loop",
"(",
"self",
",",
"obj",
",",
"check_mod_str",
")",
":",
"check_module",
"=",
"self",
".",
"_check_modules",
"[",
"check_mod_str",
"]",
"checks",
"=",
"self",
".",
"_config",
"[",
"'checks'",
"]",
"[",
"check_mod_str",
"]",
"result",
"=",
"True",
"summary",
"=",
"OrderedDict",
"(",
")",
"for",
"check",
"in",
"checks",
":",
"ok",
"=",
"self",
".",
"_do_check",
"(",
"obj",
",",
"check_module",
",",
"check",
")",
"summary",
"[",
"ok",
".",
"title",
"]",
"=",
"ok",
".",
"status",
"result",
"&=",
"ok",
".",
"status",
"return",
"result",
",",
"summary"
] |
Run all the checks in a check_module
|
[
"Run",
"all",
"the",
"checks",
"in",
"a",
"check_module"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/runner.py#L94-L105
|
BlueBrain/NeuroM
|
neurom/check/runner.py
|
CheckRunner._check_file
|
def _check_file(self, f):
'''Run tests on a morphology file'''
L.info('File: %s', f)
full_result = True
full_summary = OrderedDict()
try:
data = load_data(f)
except Exception as e: # pylint: disable=W0703
L.error('Failed to load data... skipping tests for this file')
L.error(e.args)
return False, {f: OrderedDict([('ALL', False)])}
try:
result, summary = self._check_loop(data, 'structural_checks')
full_result &= result
full_summary.update(summary)
nrn = fst_core.FstNeuron(data)
result, summary = self._check_loop(nrn, 'neuron_checks')
full_result &= result
full_summary.update(summary)
except Exception as e: # pylint: disable=W0703
L.error('Check failed: %s', str(type(e)) + str(e.args))
full_result = False
full_summary['ALL'] = full_result
for m, s in full_summary.items():
self._log_msg(m, s)
return full_result, {f: full_summary}
|
python
|
def _check_file(self, f):
L.info('File: %s', f)
full_result = True
full_summary = OrderedDict()
try:
data = load_data(f)
except Exception as e:
L.error('Failed to load data... skipping tests for this file')
L.error(e.args)
return False, {f: OrderedDict([('ALL', False)])}
try:
result, summary = self._check_loop(data, 'structural_checks')
full_result &= result
full_summary.update(summary)
nrn = fst_core.FstNeuron(data)
result, summary = self._check_loop(nrn, 'neuron_checks')
full_result &= result
full_summary.update(summary)
except Exception as e:
L.error('Check failed: %s', str(type(e)) + str(e.args))
full_result = False
full_summary['ALL'] = full_result
for m, s in full_summary.items():
self._log_msg(m, s)
return full_result, {f: full_summary}
|
[
"def",
"_check_file",
"(",
"self",
",",
"f",
")",
":",
"L",
".",
"info",
"(",
"'File: %s'",
",",
"f",
")",
"full_result",
"=",
"True",
"full_summary",
"=",
"OrderedDict",
"(",
")",
"try",
":",
"data",
"=",
"load_data",
"(",
"f",
")",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=W0703",
"L",
".",
"error",
"(",
"'Failed to load data... skipping tests for this file'",
")",
"L",
".",
"error",
"(",
"e",
".",
"args",
")",
"return",
"False",
",",
"{",
"f",
":",
"OrderedDict",
"(",
"[",
"(",
"'ALL'",
",",
"False",
")",
"]",
")",
"}",
"try",
":",
"result",
",",
"summary",
"=",
"self",
".",
"_check_loop",
"(",
"data",
",",
"'structural_checks'",
")",
"full_result",
"&=",
"result",
"full_summary",
".",
"update",
"(",
"summary",
")",
"nrn",
"=",
"fst_core",
".",
"FstNeuron",
"(",
"data",
")",
"result",
",",
"summary",
"=",
"self",
".",
"_check_loop",
"(",
"nrn",
",",
"'neuron_checks'",
")",
"full_result",
"&=",
"result",
"full_summary",
".",
"update",
"(",
"summary",
")",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=W0703",
"L",
".",
"error",
"(",
"'Check failed: %s'",
",",
"str",
"(",
"type",
"(",
"e",
")",
")",
"+",
"str",
"(",
"e",
".",
"args",
")",
")",
"full_result",
"=",
"False",
"full_summary",
"[",
"'ALL'",
"]",
"=",
"full_result",
"for",
"m",
",",
"s",
"in",
"full_summary",
".",
"items",
"(",
")",
":",
"self",
".",
"_log_msg",
"(",
"m",
",",
"s",
")",
"return",
"full_result",
",",
"{",
"f",
":",
"full_summary",
"}"
] |
Run tests on a morphology file
|
[
"Run",
"tests",
"on",
"a",
"morphology",
"file"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/runner.py#L107-L138
|
BlueBrain/NeuroM
|
neurom/check/runner.py
|
CheckRunner._log_msg
|
def _log_msg(self, msg, ok):
'''Helper to log message to the right level'''
if self._config['color']:
CGREEN, CRED, CEND = '\033[92m', '\033[91m', '\033[0m'
else:
CGREEN = CRED = CEND = ''
LOG_LEVELS = {False: logging.ERROR, True: logging.INFO}
# pylint: disable=logging-not-lazy
L.log(LOG_LEVELS[ok],
'%35s %s' + CEND, msg, CGREEN + 'PASS' if ok else CRED + 'FAIL')
|
python
|
def _log_msg(self, msg, ok):
if self._config['color']:
CGREEN, CRED, CEND = '\033[92m', '\033[91m', '\033[0m'
else:
CGREEN = CRED = CEND = ''
LOG_LEVELS = {False: logging.ERROR, True: logging.INFO}
L.log(LOG_LEVELS[ok],
'%35s %s' + CEND, msg, CGREEN + 'PASS' if ok else CRED + 'FAIL')
|
[
"def",
"_log_msg",
"(",
"self",
",",
"msg",
",",
"ok",
")",
":",
"if",
"self",
".",
"_config",
"[",
"'color'",
"]",
":",
"CGREEN",
",",
"CRED",
",",
"CEND",
"=",
"'\\033[92m'",
",",
"'\\033[91m'",
",",
"'\\033[0m'",
"else",
":",
"CGREEN",
"=",
"CRED",
"=",
"CEND",
"=",
"''",
"LOG_LEVELS",
"=",
"{",
"False",
":",
"logging",
".",
"ERROR",
",",
"True",
":",
"logging",
".",
"INFO",
"}",
"# pylint: disable=logging-not-lazy",
"L",
".",
"log",
"(",
"LOG_LEVELS",
"[",
"ok",
"]",
",",
"'%35s %s'",
"+",
"CEND",
",",
"msg",
",",
"CGREEN",
"+",
"'PASS'",
"if",
"ok",
"else",
"CRED",
"+",
"'FAIL'",
")"
] |
Helper to log message to the right level
|
[
"Helper",
"to",
"log",
"message",
"to",
"the",
"right",
"level"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/runner.py#L140-L151
|
BlueBrain/NeuroM
|
neurom/check/runner.py
|
CheckRunner._sanitize_config
|
def _sanitize_config(config):
'''check that the config has the correct keys, add missing keys if necessary'''
if 'checks' in config:
checks = config['checks']
if 'structural_checks' not in checks:
checks['structural_checks'] = []
if 'neuron_checks' not in checks:
checks['neuron_checks'] = []
else:
raise ConfigError('Need to have "checks" in the config')
if 'options' not in config:
L.debug('Using default options')
config['options'] = {}
if 'color' not in config:
config['color'] = False
return config
|
python
|
def _sanitize_config(config):
if 'checks' in config:
checks = config['checks']
if 'structural_checks' not in checks:
checks['structural_checks'] = []
if 'neuron_checks' not in checks:
checks['neuron_checks'] = []
else:
raise ConfigError('Need to have "checks" in the config')
if 'options' not in config:
L.debug('Using default options')
config['options'] = {}
if 'color' not in config:
config['color'] = False
return config
|
[
"def",
"_sanitize_config",
"(",
"config",
")",
":",
"if",
"'checks'",
"in",
"config",
":",
"checks",
"=",
"config",
"[",
"'checks'",
"]",
"if",
"'structural_checks'",
"not",
"in",
"checks",
":",
"checks",
"[",
"'structural_checks'",
"]",
"=",
"[",
"]",
"if",
"'neuron_checks'",
"not",
"in",
"checks",
":",
"checks",
"[",
"'neuron_checks'",
"]",
"=",
"[",
"]",
"else",
":",
"raise",
"ConfigError",
"(",
"'Need to have \"checks\" in the config'",
")",
"if",
"'options'",
"not",
"in",
"config",
":",
"L",
".",
"debug",
"(",
"'Using default options'",
")",
"config",
"[",
"'options'",
"]",
"=",
"{",
"}",
"if",
"'color'",
"not",
"in",
"config",
":",
"config",
"[",
"'color'",
"]",
"=",
"False",
"return",
"config"
] |
check that the config has the correct keys, add missing keys if necessary
|
[
"check",
"that",
"the",
"config",
"has",
"the",
"correct",
"keys",
"add",
"missing",
"keys",
"if",
"necessary"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/runner.py#L154-L172
|
BlueBrain/NeuroM
|
neurom/io/swc.py
|
read
|
def read(filename, data_wrapper=DataWrapper):
'''Read an SWC file and return a tuple of data, format.'''
data = np.loadtxt(filename)
if len(np.shape(data)) == 1:
data = np.reshape(data, (1, -1))
data = data[:, [X, Y, Z, R, TYPE, ID, P]]
return data_wrapper(data, 'SWC', None)
|
python
|
def read(filename, data_wrapper=DataWrapper):
data = np.loadtxt(filename)
if len(np.shape(data)) == 1:
data = np.reshape(data, (1, -1))
data = data[:, [X, Y, Z, R, TYPE, ID, P]]
return data_wrapper(data, 'SWC', None)
|
[
"def",
"read",
"(",
"filename",
",",
"data_wrapper",
"=",
"DataWrapper",
")",
":",
"data",
"=",
"np",
".",
"loadtxt",
"(",
"filename",
")",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"data",
")",
")",
"==",
"1",
":",
"data",
"=",
"np",
".",
"reshape",
"(",
"data",
",",
"(",
"1",
",",
"-",
"1",
")",
")",
"data",
"=",
"data",
"[",
":",
",",
"[",
"X",
",",
"Y",
",",
"Z",
",",
"R",
",",
"TYPE",
",",
"ID",
",",
"P",
"]",
"]",
"return",
"data_wrapper",
"(",
"data",
",",
"'SWC'",
",",
"None",
")"
] |
Read an SWC file and return a tuple of data, format.
|
[
"Read",
"an",
"SWC",
"file",
"and",
"return",
"a",
"tuple",
"of",
"data",
"format",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/swc.py#L47-L53
|
BlueBrain/NeuroM
|
neurom/io/datawrapper.py
|
_merge_sections
|
def _merge_sections(sec_a, sec_b):
'''Merge two sections
Merges sec_a into sec_b and sets sec_a attributes to default
'''
sec_b.ids = list(sec_a.ids) + list(sec_b.ids[1:])
sec_b.ntype = sec_a.ntype
sec_b.pid = sec_a.pid
sec_a.ids = []
sec_a.pid = -1
sec_a.ntype = 0
|
python
|
def _merge_sections(sec_a, sec_b):
sec_b.ids = list(sec_a.ids) + list(sec_b.ids[1:])
sec_b.ntype = sec_a.ntype
sec_b.pid = sec_a.pid
sec_a.ids = []
sec_a.pid = -1
sec_a.ntype = 0
|
[
"def",
"_merge_sections",
"(",
"sec_a",
",",
"sec_b",
")",
":",
"sec_b",
".",
"ids",
"=",
"list",
"(",
"sec_a",
".",
"ids",
")",
"+",
"list",
"(",
"sec_b",
".",
"ids",
"[",
"1",
":",
"]",
")",
"sec_b",
".",
"ntype",
"=",
"sec_a",
".",
"ntype",
"sec_b",
".",
"pid",
"=",
"sec_a",
".",
"pid",
"sec_a",
".",
"ids",
"=",
"[",
"]",
"sec_a",
".",
"pid",
"=",
"-",
"1",
"sec_a",
".",
"ntype",
"=",
"0"
] |
Merge two sections
Merges sec_a into sec_b and sets sec_a attributes to default
|
[
"Merge",
"two",
"sections"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L89-L100
|
BlueBrain/NeuroM
|
neurom/io/datawrapper.py
|
_section_end_points
|
def _section_end_points(structure_block, id_map):
'''Get the section end-points'''
soma_idx = structure_block[:, TYPE] == POINT_TYPE.SOMA
soma_ids = structure_block[soma_idx, ID]
neurite_idx = structure_block[:, TYPE] != POINT_TYPE.SOMA
neurite_rows = structure_block[neurite_idx, :]
soma_end_pts = set(id_map[id_]
for id_ in soma_ids[np.in1d(soma_ids, neurite_rows[:, PID])])
# end points have either no children or more than one
# ie: leaf or multifurcation nodes
n_children = defaultdict(int)
for row in structure_block:
n_children[row[PID]] += 1
end_pts = set(i for i, row in enumerate(structure_block)
if n_children[row[ID]] != 1)
return end_pts.union(soma_end_pts)
|
python
|
def _section_end_points(structure_block, id_map):
soma_idx = structure_block[:, TYPE] == POINT_TYPE.SOMA
soma_ids = structure_block[soma_idx, ID]
neurite_idx = structure_block[:, TYPE] != POINT_TYPE.SOMA
neurite_rows = structure_block[neurite_idx, :]
soma_end_pts = set(id_map[id_]
for id_ in soma_ids[np.in1d(soma_ids, neurite_rows[:, PID])])
n_children = defaultdict(int)
for row in structure_block:
n_children[row[PID]] += 1
end_pts = set(i for i, row in enumerate(structure_block)
if n_children[row[ID]] != 1)
return end_pts.union(soma_end_pts)
|
[
"def",
"_section_end_points",
"(",
"structure_block",
",",
"id_map",
")",
":",
"soma_idx",
"=",
"structure_block",
"[",
":",
",",
"TYPE",
"]",
"==",
"POINT_TYPE",
".",
"SOMA",
"soma_ids",
"=",
"structure_block",
"[",
"soma_idx",
",",
"ID",
"]",
"neurite_idx",
"=",
"structure_block",
"[",
":",
",",
"TYPE",
"]",
"!=",
"POINT_TYPE",
".",
"SOMA",
"neurite_rows",
"=",
"structure_block",
"[",
"neurite_idx",
",",
":",
"]",
"soma_end_pts",
"=",
"set",
"(",
"id_map",
"[",
"id_",
"]",
"for",
"id_",
"in",
"soma_ids",
"[",
"np",
".",
"in1d",
"(",
"soma_ids",
",",
"neurite_rows",
"[",
":",
",",
"PID",
"]",
")",
"]",
")",
"# end points have either no children or more than one",
"# ie: leaf or multifurcation nodes",
"n_children",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"row",
"in",
"structure_block",
":",
"n_children",
"[",
"row",
"[",
"PID",
"]",
"]",
"+=",
"1",
"end_pts",
"=",
"set",
"(",
"i",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"structure_block",
")",
"if",
"n_children",
"[",
"row",
"[",
"ID",
"]",
"]",
"!=",
"1",
")",
"return",
"end_pts",
".",
"union",
"(",
"soma_end_pts",
")"
] |
Get the section end-points
|
[
"Get",
"the",
"section",
"end",
"-",
"points"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L103-L120
|
BlueBrain/NeuroM
|
neurom/io/datawrapper.py
|
_extract_sections
|
def _extract_sections(data_block):
'''Make a list of sections from an SWC-style data wrapper block'''
structure_block = data_block[:, COLS.TYPE:COLS.COL_COUNT].astype(np.int)
# SWC ID -> structure_block position
id_map = {-1: -1}
for i, row in enumerate(structure_block):
id_map[row[ID]] = i
# end points have either no children, more than one, or are the start
# of a new gap
sec_end_pts = _section_end_points(structure_block, id_map)
# a 'gap' is when a section has part of it's segments interleaved
# with those of another section
gap_sections = set()
sections = []
def new_section():
'''new_section'''
sections.append(DataBlockSection())
return sections[-1]
curr_section = new_section()
parent_section = {-1: -1}
for row in structure_block:
row_id = id_map[row[ID]]
parent_id = id_map[row[PID]]
if not curr_section.ids:
# first in section point is parent
curr_section.ids.append(parent_id)
curr_section.ntype = row[TYPE]
gap = parent_id != curr_section.ids[-1]
# If parent is not the previous point, create a section end-point.
# Else add the point to this section
if gap:
sec_end_pts.add(row_id)
else:
curr_section.ids.append(row_id)
if row_id in sec_end_pts:
parent_section[curr_section.ids[-1]] = len(sections) - 1
# Parent-child discontinuity section
if gap:
curr_section = new_section()
curr_section.ids.extend((parent_id, row_id))
curr_section.ntype = row[TYPE]
gap_sections.add(len(sections) - 2)
elif row_id != len(data_block) - 1:
# avoid creating an extra DataBlockSection for last row if it's a leaf
curr_section = new_section()
for sec in sections:
# get the section parent ID from the id of the first point.
if sec.ids:
sec.pid = parent_section[sec.ids[0]]
# join gap sections and "disable" first half
if sec.pid in gap_sections:
_merge_sections(sections[sec.pid], sec)
# TODO find a way to remove empty sections. Currently they are
# required to maintain tree integrity.
return sections
|
python
|
def _extract_sections(data_block):
structure_block = data_block[:, COLS.TYPE:COLS.COL_COUNT].astype(np.int)
id_map = {-1: -1}
for i, row in enumerate(structure_block):
id_map[row[ID]] = i
sec_end_pts = _section_end_points(structure_block, id_map)
gap_sections = set()
sections = []
def new_section():
sections.append(DataBlockSection())
return sections[-1]
curr_section = new_section()
parent_section = {-1: -1}
for row in structure_block:
row_id = id_map[row[ID]]
parent_id = id_map[row[PID]]
if not curr_section.ids:
curr_section.ids.append(parent_id)
curr_section.ntype = row[TYPE]
gap = parent_id != curr_section.ids[-1]
if gap:
sec_end_pts.add(row_id)
else:
curr_section.ids.append(row_id)
if row_id in sec_end_pts:
parent_section[curr_section.ids[-1]] = len(sections) - 1
if gap:
curr_section = new_section()
curr_section.ids.extend((parent_id, row_id))
curr_section.ntype = row[TYPE]
gap_sections.add(len(sections) - 2)
elif row_id != len(data_block) - 1:
curr_section = new_section()
for sec in sections:
if sec.ids:
sec.pid = parent_section[sec.ids[0]]
if sec.pid in gap_sections:
_merge_sections(sections[sec.pid], sec)
return sections
|
[
"def",
"_extract_sections",
"(",
"data_block",
")",
":",
"structure_block",
"=",
"data_block",
"[",
":",
",",
"COLS",
".",
"TYPE",
":",
"COLS",
".",
"COL_COUNT",
"]",
".",
"astype",
"(",
"np",
".",
"int",
")",
"# SWC ID -> structure_block position",
"id_map",
"=",
"{",
"-",
"1",
":",
"-",
"1",
"}",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"structure_block",
")",
":",
"id_map",
"[",
"row",
"[",
"ID",
"]",
"]",
"=",
"i",
"# end points have either no children, more than one, or are the start",
"# of a new gap",
"sec_end_pts",
"=",
"_section_end_points",
"(",
"structure_block",
",",
"id_map",
")",
"# a 'gap' is when a section has part of it's segments interleaved",
"# with those of another section",
"gap_sections",
"=",
"set",
"(",
")",
"sections",
"=",
"[",
"]",
"def",
"new_section",
"(",
")",
":",
"'''new_section'''",
"sections",
".",
"append",
"(",
"DataBlockSection",
"(",
")",
")",
"return",
"sections",
"[",
"-",
"1",
"]",
"curr_section",
"=",
"new_section",
"(",
")",
"parent_section",
"=",
"{",
"-",
"1",
":",
"-",
"1",
"}",
"for",
"row",
"in",
"structure_block",
":",
"row_id",
"=",
"id_map",
"[",
"row",
"[",
"ID",
"]",
"]",
"parent_id",
"=",
"id_map",
"[",
"row",
"[",
"PID",
"]",
"]",
"if",
"not",
"curr_section",
".",
"ids",
":",
"# first in section point is parent",
"curr_section",
".",
"ids",
".",
"append",
"(",
"parent_id",
")",
"curr_section",
".",
"ntype",
"=",
"row",
"[",
"TYPE",
"]",
"gap",
"=",
"parent_id",
"!=",
"curr_section",
".",
"ids",
"[",
"-",
"1",
"]",
"# If parent is not the previous point, create a section end-point.",
"# Else add the point to this section",
"if",
"gap",
":",
"sec_end_pts",
".",
"add",
"(",
"row_id",
")",
"else",
":",
"curr_section",
".",
"ids",
".",
"append",
"(",
"row_id",
")",
"if",
"row_id",
"in",
"sec_end_pts",
":",
"parent_section",
"[",
"curr_section",
".",
"ids",
"[",
"-",
"1",
"]",
"]",
"=",
"len",
"(",
"sections",
")",
"-",
"1",
"# Parent-child discontinuity section",
"if",
"gap",
":",
"curr_section",
"=",
"new_section",
"(",
")",
"curr_section",
".",
"ids",
".",
"extend",
"(",
"(",
"parent_id",
",",
"row_id",
")",
")",
"curr_section",
".",
"ntype",
"=",
"row",
"[",
"TYPE",
"]",
"gap_sections",
".",
"add",
"(",
"len",
"(",
"sections",
")",
"-",
"2",
")",
"elif",
"row_id",
"!=",
"len",
"(",
"data_block",
")",
"-",
"1",
":",
"# avoid creating an extra DataBlockSection for last row if it's a leaf",
"curr_section",
"=",
"new_section",
"(",
")",
"for",
"sec",
"in",
"sections",
":",
"# get the section parent ID from the id of the first point.",
"if",
"sec",
".",
"ids",
":",
"sec",
".",
"pid",
"=",
"parent_section",
"[",
"sec",
".",
"ids",
"[",
"0",
"]",
"]",
"# join gap sections and \"disable\" first half",
"if",
"sec",
".",
"pid",
"in",
"gap_sections",
":",
"_merge_sections",
"(",
"sections",
"[",
"sec",
".",
"pid",
"]",
",",
"sec",
")",
"# TODO find a way to remove empty sections. Currently they are",
"# required to maintain tree integrity.",
"return",
"sections"
] |
Make a list of sections from an SWC-style data wrapper block
|
[
"Make",
"a",
"list",
"of",
"sections",
"from",
"an",
"SWC",
"-",
"style",
"data",
"wrapper",
"block"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L142-L210
|
BlueBrain/NeuroM
|
neurom/io/datawrapper.py
|
DataWrapper.neurite_root_section_ids
|
def neurite_root_section_ids(self):
'''Get the section IDs of the intitial neurite sections'''
sec = self.sections
return [i for i, ss in enumerate(sec)
if ss.pid > -1 and (sec[ss.pid].ntype == POINT_TYPE.SOMA and
ss.ntype != POINT_TYPE.SOMA)]
|
python
|
def neurite_root_section_ids(self):
sec = self.sections
return [i for i, ss in enumerate(sec)
if ss.pid > -1 and (sec[ss.pid].ntype == POINT_TYPE.SOMA and
ss.ntype != POINT_TYPE.SOMA)]
|
[
"def",
"neurite_root_section_ids",
"(",
"self",
")",
":",
"sec",
"=",
"self",
".",
"sections",
"return",
"[",
"i",
"for",
"i",
",",
"ss",
"in",
"enumerate",
"(",
"sec",
")",
"if",
"ss",
".",
"pid",
">",
"-",
"1",
"and",
"(",
"sec",
"[",
"ss",
".",
"pid",
"]",
".",
"ntype",
"==",
"POINT_TYPE",
".",
"SOMA",
"and",
"ss",
".",
"ntype",
"!=",
"POINT_TYPE",
".",
"SOMA",
")",
"]"
] |
Get the section IDs of the intitial neurite sections
|
[
"Get",
"the",
"section",
"IDs",
"of",
"the",
"intitial",
"neurite",
"sections"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L76-L81
|
BlueBrain/NeuroM
|
neurom/io/datawrapper.py
|
DataWrapper.soma_points
|
def soma_points(self):
'''Get the soma points'''
db = self.data_block
return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
|
python
|
def soma_points(self):
db = self.data_block
return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
|
[
"def",
"soma_points",
"(",
"self",
")",
":",
"db",
"=",
"self",
".",
"data_block",
"return",
"db",
"[",
"db",
"[",
":",
",",
"COLS",
".",
"TYPE",
"]",
"==",
"POINT_TYPE",
".",
"SOMA",
"]"
] |
Get the soma points
|
[
"Get",
"the",
"soma",
"points"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L83-L86
|
BlueBrain/NeuroM
|
neurom/io/datawrapper.py
|
BlockNeuronBuilder.add_section
|
def add_section(self, id_, parent_id, section_type, points):
'''add a section
Args:
id_(int): identifying number of the section
parent_id(int): identifying number of the parent of this section
section_type(int): the section type as defined by POINT_TYPE
points is an array of [X, Y, Z, R]
'''
# L.debug('Adding section %d, with parent %d, of type: %d with count: %d',
# id_, parent_id, section_type, len(points))
assert id_ not in self.sections, 'id %s already exists in sections' % id_
self.sections[id_] = BlockNeuronBuilder.BlockSection(parent_id, section_type, points)
|
python
|
def add_section(self, id_, parent_id, section_type, points):
assert id_ not in self.sections, 'id %s already exists in sections' % id_
self.sections[id_] = BlockNeuronBuilder.BlockSection(parent_id, section_type, points)
|
[
"def",
"add_section",
"(",
"self",
",",
"id_",
",",
"parent_id",
",",
"section_type",
",",
"points",
")",
":",
"# L.debug('Adding section %d, with parent %d, of type: %d with count: %d',",
"# id_, parent_id, section_type, len(points))",
"assert",
"id_",
"not",
"in",
"self",
".",
"sections",
",",
"'id %s already exists in sections'",
"%",
"id_",
"self",
".",
"sections",
"[",
"id_",
"]",
"=",
"BlockNeuronBuilder",
".",
"BlockSection",
"(",
"parent_id",
",",
"section_type",
",",
"points",
")"
] |
add a section
Args:
id_(int): identifying number of the section
parent_id(int): identifying number of the parent of this section
section_type(int): the section type as defined by POINT_TYPE
points is an array of [X, Y, Z, R]
|
[
"add",
"a",
"section"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L234-L246
|
BlueBrain/NeuroM
|
neurom/io/datawrapper.py
|
BlockNeuronBuilder._make_datablock
|
def _make_datablock(self):
'''Make a data_block and sections list as required by DataWrapper'''
section_ids = sorted(self.sections)
# create all insertion id's, this needs to be done ahead of time
# as some of the children may have a lower id than their parents
id_to_insert_id = {}
row_count = 0
for section_id in section_ids:
row_count += len(self.sections[section_id].points)
id_to_insert_id[section_id] = row_count - 1
datablock = np.empty((row_count, COLS.COL_COUNT), dtype=np.float)
datablock[:, COLS.ID] = np.arange(len(datablock))
datablock[:, COLS.P] = datablock[:, COLS.ID] - 1
sections = []
insert_index = 0
for id_ in section_ids:
sec = self.sections[id_]
points, section_type, parent_id = sec.points, sec.section_type, sec.parent_id
idx = slice(insert_index, insert_index + len(points))
datablock[idx, COLS.XYZR] = points
datablock[idx, COLS.TYPE] = section_type
datablock[idx.start, COLS.P] = id_to_insert_id.get(parent_id, ROOT_ID)
sections.append(DataBlockSection(idx, section_type, parent_id))
insert_index = idx.stop
return datablock, sections
|
python
|
def _make_datablock(self):
section_ids = sorted(self.sections)
id_to_insert_id = {}
row_count = 0
for section_id in section_ids:
row_count += len(self.sections[section_id].points)
id_to_insert_id[section_id] = row_count - 1
datablock = np.empty((row_count, COLS.COL_COUNT), dtype=np.float)
datablock[:, COLS.ID] = np.arange(len(datablock))
datablock[:, COLS.P] = datablock[:, COLS.ID] - 1
sections = []
insert_index = 0
for id_ in section_ids:
sec = self.sections[id_]
points, section_type, parent_id = sec.points, sec.section_type, sec.parent_id
idx = slice(insert_index, insert_index + len(points))
datablock[idx, COLS.XYZR] = points
datablock[idx, COLS.TYPE] = section_type
datablock[idx.start, COLS.P] = id_to_insert_id.get(parent_id, ROOT_ID)
sections.append(DataBlockSection(idx, section_type, parent_id))
insert_index = idx.stop
return datablock, sections
|
[
"def",
"_make_datablock",
"(",
"self",
")",
":",
"section_ids",
"=",
"sorted",
"(",
"self",
".",
"sections",
")",
"# create all insertion id's, this needs to be done ahead of time",
"# as some of the children may have a lower id than their parents",
"id_to_insert_id",
"=",
"{",
"}",
"row_count",
"=",
"0",
"for",
"section_id",
"in",
"section_ids",
":",
"row_count",
"+=",
"len",
"(",
"self",
".",
"sections",
"[",
"section_id",
"]",
".",
"points",
")",
"id_to_insert_id",
"[",
"section_id",
"]",
"=",
"row_count",
"-",
"1",
"datablock",
"=",
"np",
".",
"empty",
"(",
"(",
"row_count",
",",
"COLS",
".",
"COL_COUNT",
")",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"datablock",
"[",
":",
",",
"COLS",
".",
"ID",
"]",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"datablock",
")",
")",
"datablock",
"[",
":",
",",
"COLS",
".",
"P",
"]",
"=",
"datablock",
"[",
":",
",",
"COLS",
".",
"ID",
"]",
"-",
"1",
"sections",
"=",
"[",
"]",
"insert_index",
"=",
"0",
"for",
"id_",
"in",
"section_ids",
":",
"sec",
"=",
"self",
".",
"sections",
"[",
"id_",
"]",
"points",
",",
"section_type",
",",
"parent_id",
"=",
"sec",
".",
"points",
",",
"sec",
".",
"section_type",
",",
"sec",
".",
"parent_id",
"idx",
"=",
"slice",
"(",
"insert_index",
",",
"insert_index",
"+",
"len",
"(",
"points",
")",
")",
"datablock",
"[",
"idx",
",",
"COLS",
".",
"XYZR",
"]",
"=",
"points",
"datablock",
"[",
"idx",
",",
"COLS",
".",
"TYPE",
"]",
"=",
"section_type",
"datablock",
"[",
"idx",
".",
"start",
",",
"COLS",
".",
"P",
"]",
"=",
"id_to_insert_id",
".",
"get",
"(",
"parent_id",
",",
"ROOT_ID",
")",
"sections",
".",
"append",
"(",
"DataBlockSection",
"(",
"idx",
",",
"section_type",
",",
"parent_id",
")",
")",
"insert_index",
"=",
"idx",
".",
"stop",
"return",
"datablock",
",",
"sections"
] |
Make a data_block and sections list as required by DataWrapper
|
[
"Make",
"a",
"data_block",
"and",
"sections",
"list",
"as",
"required",
"by",
"DataWrapper"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L248-L277
|
BlueBrain/NeuroM
|
neurom/io/datawrapper.py
|
BlockNeuronBuilder._check_consistency
|
def _check_consistency(self):
'''see if the sections have obvious errors'''
type_count = defaultdict(int)
for _, section in sorted(self.sections.items()):
type_count[section.section_type] += 1
if type_count[POINT_TYPE.SOMA] != 1:
L.info('Have %d somas, expected 1', type_count[POINT_TYPE.SOMA])
|
python
|
def _check_consistency(self):
type_count = defaultdict(int)
for _, section in sorted(self.sections.items()):
type_count[section.section_type] += 1
if type_count[POINT_TYPE.SOMA] != 1:
L.info('Have %d somas, expected 1', type_count[POINT_TYPE.SOMA])
|
[
"def",
"_check_consistency",
"(",
"self",
")",
":",
"type_count",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"_",
",",
"section",
"in",
"sorted",
"(",
"self",
".",
"sections",
".",
"items",
"(",
")",
")",
":",
"type_count",
"[",
"section",
".",
"section_type",
"]",
"+=",
"1",
"if",
"type_count",
"[",
"POINT_TYPE",
".",
"SOMA",
"]",
"!=",
"1",
":",
"L",
".",
"info",
"(",
"'Have %d somas, expected 1'",
",",
"type_count",
"[",
"POINT_TYPE",
".",
"SOMA",
"]",
")"
] |
see if the sections have obvious errors
|
[
"see",
"if",
"the",
"sections",
"have",
"obvious",
"errors"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L279-L286
|
BlueBrain/NeuroM
|
neurom/io/datawrapper.py
|
BlockNeuronBuilder.get_datawrapper
|
def get_datawrapper(self, file_format='BlockNeuronBuilder', data_wrapper=DataWrapper):
'''returns a DataWrapper'''
self._check_consistency()
datablock, sections = self._make_datablock()
return data_wrapper(datablock, file_format, sections)
|
python
|
def get_datawrapper(self, file_format='BlockNeuronBuilder', data_wrapper=DataWrapper):
self._check_consistency()
datablock, sections = self._make_datablock()
return data_wrapper(datablock, file_format, sections)
|
[
"def",
"get_datawrapper",
"(",
"self",
",",
"file_format",
"=",
"'BlockNeuronBuilder'",
",",
"data_wrapper",
"=",
"DataWrapper",
")",
":",
"self",
".",
"_check_consistency",
"(",
")",
"datablock",
",",
"sections",
"=",
"self",
".",
"_make_datablock",
"(",
")",
"return",
"data_wrapper",
"(",
"datablock",
",",
"file_format",
",",
"sections",
")"
] |
returns a DataWrapper
|
[
"returns",
"a",
"DataWrapper"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/datawrapper.py#L288-L292
|
BlueBrain/NeuroM
|
neurom/io/utils.py
|
_is_morphology_file
|
def _is_morphology_file(filepath):
""" Check if `filepath` is a file with one of morphology file extensions. """
return (
os.path.isfile(filepath) and
os.path.splitext(filepath)[1].lower() in ('.swc', '.h5', '.asc')
)
|
python
|
def _is_morphology_file(filepath):
return (
os.path.isfile(filepath) and
os.path.splitext(filepath)[1].lower() in ('.swc', '.h5', '.asc')
)
|
[
"def",
"_is_morphology_file",
"(",
"filepath",
")",
":",
"return",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
"and",
"os",
".",
"path",
".",
"splitext",
"(",
"filepath",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"in",
"(",
"'.swc'",
",",
"'.h5'",
",",
"'.asc'",
")",
")"
] |
Check if `filepath` is a file with one of morphology file extensions.
|
[
"Check",
"if",
"filepath",
"is",
"a",
"file",
"with",
"one",
"of",
"morphology",
"file",
"extensions",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L50-L55
|
BlueBrain/NeuroM
|
neurom/io/utils.py
|
get_morph_files
|
def get_morph_files(directory):
'''Get a list of all morphology files in a directory
Returns:
list with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive)
'''
lsdir = (os.path.join(directory, m) for m in os.listdir(directory))
return list(filter(_is_morphology_file, lsdir))
|
python
|
def get_morph_files(directory):
lsdir = (os.path.join(directory, m) for m in os.listdir(directory))
return list(filter(_is_morphology_file, lsdir))
|
[
"def",
"get_morph_files",
"(",
"directory",
")",
":",
"lsdir",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"m",
")",
"for",
"m",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
")",
"return",
"list",
"(",
"filter",
"(",
"_is_morphology_file",
",",
"lsdir",
")",
")"
] |
Get a list of all morphology files in a directory
Returns:
list with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive)
|
[
"Get",
"a",
"list",
"of",
"all",
"morphology",
"files",
"in",
"a",
"directory"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L92-L99
|
BlueBrain/NeuroM
|
neurom/io/utils.py
|
get_files_by_path
|
def get_files_by_path(path):
'''Get a file or set of files from a file path
Return list of files with path
'''
if os.path.isfile(path):
return [path]
if os.path.isdir(path):
return get_morph_files(path)
raise IOError('Invalid data path %s' % path)
|
python
|
def get_files_by_path(path):
if os.path.isfile(path):
return [path]
if os.path.isdir(path):
return get_morph_files(path)
raise IOError('Invalid data path %s' % path)
|
[
"def",
"get_files_by_path",
"(",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"[",
"path",
"]",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"return",
"get_morph_files",
"(",
"path",
")",
"raise",
"IOError",
"(",
"'Invalid data path %s'",
"%",
"path",
")"
] |
Get a file or set of files from a file path
Return list of files with path
|
[
"Get",
"a",
"file",
"or",
"set",
"of",
"files",
"from",
"a",
"file",
"path"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L102-L112
|
BlueBrain/NeuroM
|
neurom/io/utils.py
|
load_neuron
|
def load_neuron(handle, reader=None):
'''Build section trees from an h5 or swc file'''
rdw = load_data(handle, reader)
if isinstance(handle, StringType):
name = os.path.splitext(os.path.basename(handle))[0]
else:
name = None
return FstNeuron(rdw, name)
|
python
|
def load_neuron(handle, reader=None):
rdw = load_data(handle, reader)
if isinstance(handle, StringType):
name = os.path.splitext(os.path.basename(handle))[0]
else:
name = None
return FstNeuron(rdw, name)
|
[
"def",
"load_neuron",
"(",
"handle",
",",
"reader",
"=",
"None",
")",
":",
"rdw",
"=",
"load_data",
"(",
"handle",
",",
"reader",
")",
"if",
"isinstance",
"(",
"handle",
",",
"StringType",
")",
":",
"name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"handle",
")",
")",
"[",
"0",
"]",
"else",
":",
"name",
"=",
"None",
"return",
"FstNeuron",
"(",
"rdw",
",",
"name",
")"
] |
Build section trees from an h5 or swc file
|
[
"Build",
"section",
"trees",
"from",
"an",
"h5",
"or",
"swc",
"file"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L115-L122
|
BlueBrain/NeuroM
|
neurom/io/utils.py
|
load_neurons
|
def load_neurons(neurons,
neuron_loader=load_neuron,
name=None,
population_class=Population,
ignored_exceptions=()):
'''Create a population object from all morphologies in a directory\
of from morphologies in a list of file names
Parameters:
neurons: directory path or list of neuron file paths
neuron_loader: function taking a filename and returning a neuron
population_class: class representing populations
name (str): optional name of population. By default 'Population' or\
filepath basename depending on whether neurons is list or\
directory path respectively.
Returns:
neuron population object
'''
if isinstance(neurons, (list, tuple)):
files = neurons
name = name if name is not None else 'Population'
elif isinstance(neurons, StringType):
files = get_files_by_path(neurons)
name = name if name is not None else os.path.basename(neurons)
ignored_exceptions = tuple(ignored_exceptions)
pop = []
for f in files:
try:
pop.append(neuron_loader(f))
except NeuroMError as e:
if isinstance(e, ignored_exceptions):
L.info('Ignoring exception "%s" for file %s',
e, os.path.basename(f))
continue
raise
return population_class(pop, name=name)
|
python
|
def load_neurons(neurons,
neuron_loader=load_neuron,
name=None,
population_class=Population,
ignored_exceptions=()):
if isinstance(neurons, (list, tuple)):
files = neurons
name = name if name is not None else 'Population'
elif isinstance(neurons, StringType):
files = get_files_by_path(neurons)
name = name if name is not None else os.path.basename(neurons)
ignored_exceptions = tuple(ignored_exceptions)
pop = []
for f in files:
try:
pop.append(neuron_loader(f))
except NeuroMError as e:
if isinstance(e, ignored_exceptions):
L.info('Ignoring exception "%s" for file %s',
e, os.path.basename(f))
continue
raise
return population_class(pop, name=name)
|
[
"def",
"load_neurons",
"(",
"neurons",
",",
"neuron_loader",
"=",
"load_neuron",
",",
"name",
"=",
"None",
",",
"population_class",
"=",
"Population",
",",
"ignored_exceptions",
"=",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"neurons",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"files",
"=",
"neurons",
"name",
"=",
"name",
"if",
"name",
"is",
"not",
"None",
"else",
"'Population'",
"elif",
"isinstance",
"(",
"neurons",
",",
"StringType",
")",
":",
"files",
"=",
"get_files_by_path",
"(",
"neurons",
")",
"name",
"=",
"name",
"if",
"name",
"is",
"not",
"None",
"else",
"os",
".",
"path",
".",
"basename",
"(",
"neurons",
")",
"ignored_exceptions",
"=",
"tuple",
"(",
"ignored_exceptions",
")",
"pop",
"=",
"[",
"]",
"for",
"f",
"in",
"files",
":",
"try",
":",
"pop",
".",
"append",
"(",
"neuron_loader",
"(",
"f",
")",
")",
"except",
"NeuroMError",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"ignored_exceptions",
")",
":",
"L",
".",
"info",
"(",
"'Ignoring exception \"%s\" for file %s'",
",",
"e",
",",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
")",
"continue",
"raise",
"return",
"population_class",
"(",
"pop",
",",
"name",
"=",
"name",
")"
] |
Create a population object from all morphologies in a directory\
of from morphologies in a list of file names
Parameters:
neurons: directory path or list of neuron file paths
neuron_loader: function taking a filename and returning a neuron
population_class: class representing populations
name (str): optional name of population. By default 'Population' or\
filepath basename depending on whether neurons is list or\
directory path respectively.
Returns:
neuron population object
|
[
"Create",
"a",
"population",
"object",
"from",
"all",
"morphologies",
"in",
"a",
"directory",
"\\",
"of",
"from",
"morphologies",
"in",
"a",
"list",
"of",
"file",
"names"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L125-L164
|
BlueBrain/NeuroM
|
neurom/io/utils.py
|
_get_file
|
def _get_file(handle):
'''Returns the filename of the file to read
If handle is a stream, a temp file is written on disk first
and its filename is returned'''
if not isinstance(handle, IOBase):
return handle
fd, temp_file = tempfile.mkstemp(str(uuid.uuid4()), prefix='neurom-')
os.close(fd)
with open(temp_file, 'w') as fd:
handle.seek(0)
shutil.copyfileobj(handle, fd)
return temp_file
|
python
|
def _get_file(handle):
if not isinstance(handle, IOBase):
return handle
fd, temp_file = tempfile.mkstemp(str(uuid.uuid4()), prefix='neurom-')
os.close(fd)
with open(temp_file, 'w') as fd:
handle.seek(0)
shutil.copyfileobj(handle, fd)
return temp_file
|
[
"def",
"_get_file",
"(",
"handle",
")",
":",
"if",
"not",
"isinstance",
"(",
"handle",
",",
"IOBase",
")",
":",
"return",
"handle",
"fd",
",",
"temp_file",
"=",
"tempfile",
".",
"mkstemp",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
",",
"prefix",
"=",
"'neurom-'",
")",
"os",
".",
"close",
"(",
"fd",
")",
"with",
"open",
"(",
"temp_file",
",",
"'w'",
")",
"as",
"fd",
":",
"handle",
".",
"seek",
"(",
"0",
")",
"shutil",
".",
"copyfileobj",
"(",
"handle",
",",
"fd",
")",
"return",
"temp_file"
] |
Returns the filename of the file to read
If handle is a stream, a temp file is written on disk first
and its filename is returned
|
[
"Returns",
"the",
"filename",
"of",
"the",
"file",
"to",
"read"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L167-L180
|
BlueBrain/NeuroM
|
neurom/io/utils.py
|
load_data
|
def load_data(handle, reader=None):
'''Unpack data into a raw data wrapper'''
if not reader:
reader = os.path.splitext(handle)[1][1:].lower()
if reader not in _READERS:
raise NeuroMError('Do not have a loader for "%s" extension' % reader)
filename = _get_file(handle)
try:
return _READERS[reader](filename)
except Exception as e:
L.exception('Error reading file %s, using "%s" loader', filename, reader)
raise RawDataError('Error reading file %s:\n%s' % (filename, str(e)))
|
python
|
def load_data(handle, reader=None):
if not reader:
reader = os.path.splitext(handle)[1][1:].lower()
if reader not in _READERS:
raise NeuroMError('Do not have a loader for "%s" extension' % reader)
filename = _get_file(handle)
try:
return _READERS[reader](filename)
except Exception as e:
L.exception('Error reading file %s, using "%s" loader', filename, reader)
raise RawDataError('Error reading file %s:\n%s' % (filename, str(e)))
|
[
"def",
"load_data",
"(",
"handle",
",",
"reader",
"=",
"None",
")",
":",
"if",
"not",
"reader",
":",
"reader",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"handle",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
".",
"lower",
"(",
")",
"if",
"reader",
"not",
"in",
"_READERS",
":",
"raise",
"NeuroMError",
"(",
"'Do not have a loader for \"%s\" extension'",
"%",
"reader",
")",
"filename",
"=",
"_get_file",
"(",
"handle",
")",
"try",
":",
"return",
"_READERS",
"[",
"reader",
"]",
"(",
"filename",
")",
"except",
"Exception",
"as",
"e",
":",
"L",
".",
"exception",
"(",
"'Error reading file %s, using \"%s\" loader'",
",",
"filename",
",",
"reader",
")",
"raise",
"RawDataError",
"(",
"'Error reading file %s:\\n%s'",
"%",
"(",
"filename",
",",
"str",
"(",
"e",
")",
")",
")"
] |
Unpack data into a raw data wrapper
|
[
"Unpack",
"data",
"into",
"a",
"raw",
"data",
"wrapper"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L183-L196
|
BlueBrain/NeuroM
|
neurom/io/utils.py
|
_load_h5
|
def _load_h5(filename):
'''Delay loading of h5py until it is needed'''
from neurom.io import hdf5
return hdf5.read(filename,
remove_duplicates=False,
data_wrapper=DataWrapper)
|
python
|
def _load_h5(filename):
from neurom.io import hdf5
return hdf5.read(filename,
remove_duplicates=False,
data_wrapper=DataWrapper)
|
[
"def",
"_load_h5",
"(",
"filename",
")",
":",
"from",
"neurom",
".",
"io",
"import",
"hdf5",
"return",
"hdf5",
".",
"read",
"(",
"filename",
",",
"remove_duplicates",
"=",
"False",
",",
"data_wrapper",
"=",
"DataWrapper",
")"
] |
Delay loading of h5py until it is needed
|
[
"Delay",
"loading",
"of",
"h5py",
"until",
"it",
"is",
"needed"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L199-L204
|
BlueBrain/NeuroM
|
neurom/io/utils.py
|
NeuronLoader._filepath
|
def _filepath(self, name):
""" File path to `name` morphology file. """
if self.file_ext is None:
candidates = glob.glob(os.path.join(self.directory, name + ".*"))
try:
return next(filter(_is_morphology_file, candidates))
except StopIteration:
raise NeuroMError("Can not find morphology file for '%s' " % name)
else:
return os.path.join(self.directory, name + self.file_ext)
|
python
|
def _filepath(self, name):
if self.file_ext is None:
candidates = glob.glob(os.path.join(self.directory, name + ".*"))
try:
return next(filter(_is_morphology_file, candidates))
except StopIteration:
raise NeuroMError("Can not find morphology file for '%s' " % name)
else:
return os.path.join(self.directory, name + self.file_ext)
|
[
"def",
"_filepath",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"file_ext",
"is",
"None",
":",
"candidates",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"directory",
",",
"name",
"+",
"\".*\"",
")",
")",
"try",
":",
"return",
"next",
"(",
"filter",
"(",
"_is_morphology_file",
",",
"candidates",
")",
")",
"except",
"StopIteration",
":",
"raise",
"NeuroMError",
"(",
"\"Can not find morphology file for '%s' \"",
"%",
"name",
")",
"else",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"directory",
",",
"name",
"+",
"self",
".",
"file_ext",
")"
] |
File path to `name` morphology file.
|
[
"File",
"path",
"to",
"name",
"morphology",
"file",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/utils.py#L75-L84
|
BlueBrain/NeuroM
|
neurom/viewer.py
|
draw
|
def draw(obj, mode='2d', **kwargs):
'''Draw a morphology object
Parameters:
obj: morphology object to be drawn (neuron, tree, soma).
mode (Optional[str]): drawing mode ('2d', '3d', 'dendrogram'). Defaults to '2d'.
**kwargs: keyword arguments for underlying neurom.view.view functions.
Raises:
InvalidDrawModeError if mode is not valid
NotDrawableError if obj is not drawable
NotDrawableError if obj type and mode combination is not drawable
Examples:
>>> nrn = ... # load a neuron
>>> fig, _ = viewer.draw(nrn) # 2d plot
>>> fig.show()
>>> fig3d, _ = viewer.draw(nrn, mode='3d') # 3d plot
>>> fig3d.show()
>>> fig, _ = viewer.draw(nrn.neurites[0]) # 2d plot of neurite tree
>>> dend, _ = viewer.draw(nrn, mode='dendrogram')
'''
if mode not in MODES:
raise InvalidDrawModeError('Invalid drawing mode %s' % mode)
if mode in ('2d', 'dendrogram'):
fig, ax = common.get_figure()
else:
fig, ax = common.get_figure(params={'projection': '3d'})
if isinstance(obj, Neuron):
tag = 'neuron'
elif isinstance(obj, (Tree, Neurite)):
tag = 'tree'
elif isinstance(obj, Soma):
tag = 'soma'
else:
raise NotDrawableError('draw not implemented for %s' % obj.__class__)
viewer = '%s_%s' % (tag, mode)
try:
plotter = _VIEWERS[viewer]
except KeyError:
raise NotDrawableError('No drawer for class %s, mode=%s' % (obj.__class__, mode))
output_path = kwargs.pop('output_path', None)
plotter(ax, obj, **kwargs)
if mode != 'dendrogram':
common.plot_style(fig=fig, ax=ax, **kwargs)
if output_path:
common.save_plot(fig=fig, output_path=output_path, **kwargs)
return fig, ax
|
python
|
def draw(obj, mode='2d', **kwargs):
if mode not in MODES:
raise InvalidDrawModeError('Invalid drawing mode %s' % mode)
if mode in ('2d', 'dendrogram'):
fig, ax = common.get_figure()
else:
fig, ax = common.get_figure(params={'projection': '3d'})
if isinstance(obj, Neuron):
tag = 'neuron'
elif isinstance(obj, (Tree, Neurite)):
tag = 'tree'
elif isinstance(obj, Soma):
tag = 'soma'
else:
raise NotDrawableError('draw not implemented for %s' % obj.__class__)
viewer = '%s_%s' % (tag, mode)
try:
plotter = _VIEWERS[viewer]
except KeyError:
raise NotDrawableError('No drawer for class %s, mode=%s' % (obj.__class__, mode))
output_path = kwargs.pop('output_path', None)
plotter(ax, obj, **kwargs)
if mode != 'dendrogram':
common.plot_style(fig=fig, ax=ax, **kwargs)
if output_path:
common.save_plot(fig=fig, output_path=output_path, **kwargs)
return fig, ax
|
[
"def",
"draw",
"(",
"obj",
",",
"mode",
"=",
"'2d'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"mode",
"not",
"in",
"MODES",
":",
"raise",
"InvalidDrawModeError",
"(",
"'Invalid drawing mode %s'",
"%",
"mode",
")",
"if",
"mode",
"in",
"(",
"'2d'",
",",
"'dendrogram'",
")",
":",
"fig",
",",
"ax",
"=",
"common",
".",
"get_figure",
"(",
")",
"else",
":",
"fig",
",",
"ax",
"=",
"common",
".",
"get_figure",
"(",
"params",
"=",
"{",
"'projection'",
":",
"'3d'",
"}",
")",
"if",
"isinstance",
"(",
"obj",
",",
"Neuron",
")",
":",
"tag",
"=",
"'neuron'",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"Tree",
",",
"Neurite",
")",
")",
":",
"tag",
"=",
"'tree'",
"elif",
"isinstance",
"(",
"obj",
",",
"Soma",
")",
":",
"tag",
"=",
"'soma'",
"else",
":",
"raise",
"NotDrawableError",
"(",
"'draw not implemented for %s'",
"%",
"obj",
".",
"__class__",
")",
"viewer",
"=",
"'%s_%s'",
"%",
"(",
"tag",
",",
"mode",
")",
"try",
":",
"plotter",
"=",
"_VIEWERS",
"[",
"viewer",
"]",
"except",
"KeyError",
":",
"raise",
"NotDrawableError",
"(",
"'No drawer for class %s, mode=%s'",
"%",
"(",
"obj",
".",
"__class__",
",",
"mode",
")",
")",
"output_path",
"=",
"kwargs",
".",
"pop",
"(",
"'output_path'",
",",
"None",
")",
"plotter",
"(",
"ax",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
"if",
"mode",
"!=",
"'dendrogram'",
":",
"common",
".",
"plot_style",
"(",
"fig",
"=",
"fig",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")",
"if",
"output_path",
":",
"common",
".",
"save_plot",
"(",
"fig",
"=",
"fig",
",",
"output_path",
"=",
"output_path",
",",
"*",
"*",
"kwargs",
")",
"return",
"fig",
",",
"ax"
] |
Draw a morphology object
Parameters:
obj: morphology object to be drawn (neuron, tree, soma).
mode (Optional[str]): drawing mode ('2d', '3d', 'dendrogram'). Defaults to '2d'.
**kwargs: keyword arguments for underlying neurom.view.view functions.
Raises:
InvalidDrawModeError if mode is not valid
NotDrawableError if obj is not drawable
NotDrawableError if obj type and mode combination is not drawable
Examples:
>>> nrn = ... # load a neuron
>>> fig, _ = viewer.draw(nrn) # 2d plot
>>> fig.show()
>>> fig3d, _ = viewer.draw(nrn, mode='3d') # 3d plot
>>> fig3d.show()
>>> fig, _ = viewer.draw(nrn.neurites[0]) # 2d plot of neurite tree
>>> dend, _ = viewer.draw(nrn, mode='dendrogram')
|
[
"Draw",
"a",
"morphology",
"object"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/viewer.py#L77-L134
|
BlueBrain/NeuroM
|
examples/histogram.py
|
histogram
|
def histogram(neurons, feature, new_fig=True, subplot=False, normed=False, **kwargs):
'''
Plot a histogram of the selected feature for the population of neurons.
Plots x-axis versus y-axis on a scatter|histogram|binned values plot.
More information about the plot and how it works.
Parameters :
neurons : list
List of Neurons. Single neurons must be encapsulated in a list.
feature : str
The feature of interest.
bins : int
Number of bins for the histogram.
cumulative : bool
Sets cumulative histogram on.
subplot : bool
Default is False, which returns a matplotlib figure object. If True,
returns a matplotlib axis object, for use as a subplot.
Returns :
figure_output : list
[fig|ax, figdata, figtext]
The first item is either a figure object (if subplot is False) or an
axis object. The second item is an object containing the data used to
generate the figure. The final item is text used in report generation
as a figure legend. This text needs to be manually entered in each
figure file.
'''
bins = kwargs.get('bins', 25)
cumulative = kwargs.get('cumulative', False)
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
kwargs['xlabel'] = kwargs.get('xlabel', feature)
kwargs['ylabel'] = kwargs.get('ylabel', feature + ' fraction')
kwargs['title'] = kwargs.get('title', feature + ' histogram')
feature_values = [getattr(neu, 'get_' + feature)() for neu in neurons]
neu_labels = [neu.name for neu in neurons]
ax.hist(feature_values, bins=bins, cumulative=cumulative, label=neu_labels, normed=normed)
kwargs['no_legend'] = len(neu_labels) == 1
return common.plot_style(fig=fig, ax=ax, **kwargs)
|
python
|
def histogram(neurons, feature, new_fig=True, subplot=False, normed=False, **kwargs):
bins = kwargs.get('bins', 25)
cumulative = kwargs.get('cumulative', False)
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
kwargs['xlabel'] = kwargs.get('xlabel', feature)
kwargs['ylabel'] = kwargs.get('ylabel', feature + ' fraction')
kwargs['title'] = kwargs.get('title', feature + ' histogram')
feature_values = [getattr(neu, 'get_' + feature)() for neu in neurons]
neu_labels = [neu.name for neu in neurons]
ax.hist(feature_values, bins=bins, cumulative=cumulative, label=neu_labels, normed=normed)
kwargs['no_legend'] = len(neu_labels) == 1
return common.plot_style(fig=fig, ax=ax, **kwargs)
|
[
"def",
"histogram",
"(",
"neurons",
",",
"feature",
",",
"new_fig",
"=",
"True",
",",
"subplot",
"=",
"False",
",",
"normed",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"bins",
"=",
"kwargs",
".",
"get",
"(",
"'bins'",
",",
"25",
")",
"cumulative",
"=",
"kwargs",
".",
"get",
"(",
"'cumulative'",
",",
"False",
")",
"fig",
",",
"ax",
"=",
"common",
".",
"get_figure",
"(",
"new_fig",
"=",
"new_fig",
",",
"subplot",
"=",
"subplot",
")",
"kwargs",
"[",
"'xlabel'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'xlabel'",
",",
"feature",
")",
"kwargs",
"[",
"'ylabel'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'ylabel'",
",",
"feature",
"+",
"' fraction'",
")",
"kwargs",
"[",
"'title'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'title'",
",",
"feature",
"+",
"' histogram'",
")",
"feature_values",
"=",
"[",
"getattr",
"(",
"neu",
",",
"'get_'",
"+",
"feature",
")",
"(",
")",
"for",
"neu",
"in",
"neurons",
"]",
"neu_labels",
"=",
"[",
"neu",
".",
"name",
"for",
"neu",
"in",
"neurons",
"]",
"ax",
".",
"hist",
"(",
"feature_values",
",",
"bins",
"=",
"bins",
",",
"cumulative",
"=",
"cumulative",
",",
"label",
"=",
"neu_labels",
",",
"normed",
"=",
"normed",
")",
"kwargs",
"[",
"'no_legend'",
"]",
"=",
"len",
"(",
"neu_labels",
")",
"==",
"1",
"return",
"common",
".",
"plot_style",
"(",
"fig",
"=",
"fig",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")"
] |
Plot a histogram of the selected feature for the population of neurons.
Plots x-axis versus y-axis on a scatter|histogram|binned values plot.
More information about the plot and how it works.
Parameters :
neurons : list
List of Neurons. Single neurons must be encapsulated in a list.
feature : str
The feature of interest.
bins : int
Number of bins for the histogram.
cumulative : bool
Sets cumulative histogram on.
subplot : bool
Default is False, which returns a matplotlib figure object. If True,
returns a matplotlib axis object, for use as a subplot.
Returns :
figure_output : list
[fig|ax, figdata, figtext]
The first item is either a figure object (if subplot is False) or an
axis object. The second item is an object containing the data used to
generate the figure. The final item is text used in report generation
as a figure legend. This text needs to be manually entered in each
figure file.
|
[
"Plot",
"a",
"histogram",
"of",
"the",
"selected",
"feature",
"for",
"the",
"population",
"of",
"neurons",
".",
"Plots",
"x",
"-",
"axis",
"versus",
"y",
"-",
"axis",
"on",
"a",
"scatter|histogram|binned",
"values",
"plot",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/histogram.py#L38-L93
|
BlueBrain/NeuroM
|
examples/histogram.py
|
population_feature_values
|
def population_feature_values(pops, feature):
'''Extracts feature values per population
'''
pops_feature_values = []
for pop in pops:
feature_values = [getattr(neu, 'get_' + feature)() for neu in pop.neurons]
# ugly hack to chain in case of list of lists
if any([isinstance(p, (list, np.ndarray)) for p in feature_values]):
feature_values = list(chain(*feature_values))
pops_feature_values.append(feature_values)
return pops_feature_values
|
python
|
def population_feature_values(pops, feature):
pops_feature_values = []
for pop in pops:
feature_values = [getattr(neu, 'get_' + feature)() for neu in pop.neurons]
if any([isinstance(p, (list, np.ndarray)) for p in feature_values]):
feature_values = list(chain(*feature_values))
pops_feature_values.append(feature_values)
return pops_feature_values
|
[
"def",
"population_feature_values",
"(",
"pops",
",",
"feature",
")",
":",
"pops_feature_values",
"=",
"[",
"]",
"for",
"pop",
"in",
"pops",
":",
"feature_values",
"=",
"[",
"getattr",
"(",
"neu",
",",
"'get_'",
"+",
"feature",
")",
"(",
")",
"for",
"neu",
"in",
"pop",
".",
"neurons",
"]",
"# ugly hack to chain in case of list of lists",
"if",
"any",
"(",
"[",
"isinstance",
"(",
"p",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
")",
")",
"for",
"p",
"in",
"feature_values",
"]",
")",
":",
"feature_values",
"=",
"list",
"(",
"chain",
"(",
"*",
"feature_values",
")",
")",
"pops_feature_values",
".",
"append",
"(",
"feature_values",
")",
"return",
"pops_feature_values"
] |
Extracts feature values per population
|
[
"Extracts",
"feature",
"values",
"per",
"population"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/histogram.py#L96-L112
|
BlueBrain/NeuroM
|
examples/histogram.py
|
population_histogram
|
def population_histogram(pops, feature, new_fig=True, normed=False, subplot=False, **kwargs):
'''
Plot a histogram of the selected feature for the population of neurons.
Plots x-axis versus y-axis on a scatter|histogram|binned values plot.
More information about the plot and how it works.
Parameters :
populations : populations list
feature : str
The feature of interest.
bins : int
Number of bins for the histogram.
cumulative : bool
Sets cumulative histogram on.
subplot : bool
Default is False, which returns a matplotlib figure object. If True,
returns a matplotlib axis object, for use as a subplot.
Returns :
figure_output : list
[fig|ax, figdata, figtext]
The first item is either a figure object (if subplot is False) or an
axis object. The second item is an object containing the data used to
generate the figure. The final item is text used in report generation
as a figure legend. This text needs to be manually entered in each
figure file.
'''
bins = kwargs.get('bins', 25)
cumulative = kwargs.get('cumulative', False)
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
kwargs['xlabel'] = kwargs.get('xlabel', feature)
kwargs['ylabel'] = kwargs.get('ylabel', feature + ' fraction')
kwargs['title'] = kwargs.get('title', feature + ' histogram')
pops_feature_values = population_feature_values(pops, feature)
pops_labels = [pop.name for pop in pops]
ax.hist(pops_feature_values, bins=bins, cumulative=cumulative, label=pops_labels, normed=normed)
kwargs['no_legend'] = len(pops_labels) == 1
return common.plot_style(fig=fig, ax=ax, **kwargs)
|
python
|
def population_histogram(pops, feature, new_fig=True, normed=False, subplot=False, **kwargs):
bins = kwargs.get('bins', 25)
cumulative = kwargs.get('cumulative', False)
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
kwargs['xlabel'] = kwargs.get('xlabel', feature)
kwargs['ylabel'] = kwargs.get('ylabel', feature + ' fraction')
kwargs['title'] = kwargs.get('title', feature + ' histogram')
pops_feature_values = population_feature_values(pops, feature)
pops_labels = [pop.name for pop in pops]
ax.hist(pops_feature_values, bins=bins, cumulative=cumulative, label=pops_labels, normed=normed)
kwargs['no_legend'] = len(pops_labels) == 1
return common.plot_style(fig=fig, ax=ax, **kwargs)
|
[
"def",
"population_histogram",
"(",
"pops",
",",
"feature",
",",
"new_fig",
"=",
"True",
",",
"normed",
"=",
"False",
",",
"subplot",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"bins",
"=",
"kwargs",
".",
"get",
"(",
"'bins'",
",",
"25",
")",
"cumulative",
"=",
"kwargs",
".",
"get",
"(",
"'cumulative'",
",",
"False",
")",
"fig",
",",
"ax",
"=",
"common",
".",
"get_figure",
"(",
"new_fig",
"=",
"new_fig",
",",
"subplot",
"=",
"subplot",
")",
"kwargs",
"[",
"'xlabel'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'xlabel'",
",",
"feature",
")",
"kwargs",
"[",
"'ylabel'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'ylabel'",
",",
"feature",
"+",
"' fraction'",
")",
"kwargs",
"[",
"'title'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'title'",
",",
"feature",
"+",
"' histogram'",
")",
"pops_feature_values",
"=",
"population_feature_values",
"(",
"pops",
",",
"feature",
")",
"pops_labels",
"=",
"[",
"pop",
".",
"name",
"for",
"pop",
"in",
"pops",
"]",
"ax",
".",
"hist",
"(",
"pops_feature_values",
",",
"bins",
"=",
"bins",
",",
"cumulative",
"=",
"cumulative",
",",
"label",
"=",
"pops_labels",
",",
"normed",
"=",
"normed",
")",
"kwargs",
"[",
"'no_legend'",
"]",
"=",
"len",
"(",
"pops_labels",
")",
"==",
"1",
"return",
"common",
".",
"plot_style",
"(",
"fig",
"=",
"fig",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")"
] |
Plot a histogram of the selected feature for the population of neurons.
Plots x-axis versus y-axis on a scatter|histogram|binned values plot.
More information about the plot and how it works.
Parameters :
populations : populations list
feature : str
The feature of interest.
bins : int
Number of bins for the histogram.
cumulative : bool
Sets cumulative histogram on.
subplot : bool
Default is False, which returns a matplotlib figure object. If True,
returns a matplotlib axis object, for use as a subplot.
Returns :
figure_output : list
[fig|ax, figdata, figtext]
The first item is either a figure object (if subplot is False) or an
axis object. The second item is an object containing the data used to
generate the figure. The final item is text used in report generation
as a figure legend. This text needs to be manually entered in each
figure file.
|
[
"Plot",
"a",
"histogram",
"of",
"the",
"selected",
"feature",
"for",
"the",
"population",
"of",
"neurons",
".",
"Plots",
"x",
"-",
"axis",
"versus",
"y",
"-",
"axis",
"on",
"a",
"scatter|histogram|binned",
"values",
"plot",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/histogram.py#L115-L169
|
BlueBrain/NeuroM
|
examples/section_ids.py
|
get_segment
|
def get_segment(neuron, section_id, segment_id):
'''Get a segment given a section and segment id
Returns:
array of two [x, y, z, r] points defining segment
'''
sec = neuron.sections[section_id]
return sec.points[segment_id:segment_id + 2][:, COLS.XYZR]
|
python
|
def get_segment(neuron, section_id, segment_id):
sec = neuron.sections[section_id]
return sec.points[segment_id:segment_id + 2][:, COLS.XYZR]
|
[
"def",
"get_segment",
"(",
"neuron",
",",
"section_id",
",",
"segment_id",
")",
":",
"sec",
"=",
"neuron",
".",
"sections",
"[",
"section_id",
"]",
"return",
"sec",
".",
"points",
"[",
"segment_id",
":",
"segment_id",
"+",
"2",
"]",
"[",
":",
",",
"COLS",
".",
"XYZR",
"]"
] |
Get a segment given a section and segment id
Returns:
array of two [x, y, z, r] points defining segment
|
[
"Get",
"a",
"segment",
"given",
"a",
"section",
"and",
"segment",
"id"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/section_ids.py#L37-L44
|
BlueBrain/NeuroM
|
examples/extract_distribution.py
|
extract_data
|
def extract_data(data_path, feature):
'''Loads a list of neurons, extracts feature
and transforms the fitted distribution in the correct format.
Returns the optimal distribution, corresponding parameters,
minimun and maximum values.
'''
population = nm.load_neurons(data_path)
feature_data = [nm.get(feature, n) for n in population]
feature_data = list(chain(*feature_data))
return stats.optimal_distribution(feature_data)
|
python
|
def extract_data(data_path, feature):
population = nm.load_neurons(data_path)
feature_data = [nm.get(feature, n) for n in population]
feature_data = list(chain(*feature_data))
return stats.optimal_distribution(feature_data)
|
[
"def",
"extract_data",
"(",
"data_path",
",",
"feature",
")",
":",
"population",
"=",
"nm",
".",
"load_neurons",
"(",
"data_path",
")",
"feature_data",
"=",
"[",
"nm",
".",
"get",
"(",
"feature",
",",
"n",
")",
"for",
"n",
"in",
"population",
"]",
"feature_data",
"=",
"list",
"(",
"chain",
"(",
"*",
"feature_data",
")",
")",
"return",
"stats",
".",
"optimal_distribution",
"(",
"feature_data",
")"
] |
Loads a list of neurons, extracts feature
and transforms the fitted distribution in the correct format.
Returns the optimal distribution, corresponding parameters,
minimun and maximum values.
|
[
"Loads",
"a",
"list",
"of",
"neurons",
"extracts",
"feature",
"and",
"transforms",
"the",
"fitted",
"distribution",
"in",
"the",
"correct",
"format",
".",
"Returns",
"the",
"optimal",
"distribution",
"corresponding",
"parameters",
"minimun",
"and",
"maximum",
"values",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/extract_distribution.py#L59-L70
|
BlueBrain/NeuroM
|
neurom/fst/_bifurcationfunc.py
|
local_bifurcation_angle
|
def local_bifurcation_angle(bif_point):
'''Return the opening angle between two out-going sections
in a bifurcation point
We first ensure that the input point has only two children.
The bifurcation angle is defined as the angle between the first non-zero
length segments of a bifurcation point.
'''
def skip_0_length(sec):
'''Return the first point with non-zero distance to first point'''
p0 = sec[0]
cur = sec[1]
for i, p in enumerate(sec[1:]):
if not np.all(p[:COLS.R] == p0[:COLS.R]):
cur = sec[i + 1]
break
return cur
assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children'
ch0, ch1 = (skip_0_length(bif_point.children[0].points),
skip_0_length(bif_point.children[1].points))
return morphmath.angle_3points(bif_point.points[-1], ch0, ch1)
|
python
|
def local_bifurcation_angle(bif_point):
def skip_0_length(sec):
p0 = sec[0]
cur = sec[1]
for i, p in enumerate(sec[1:]):
if not np.all(p[:COLS.R] == p0[:COLS.R]):
cur = sec[i + 1]
break
return cur
assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children'
ch0, ch1 = (skip_0_length(bif_point.children[0].points),
skip_0_length(bif_point.children[1].points))
return morphmath.angle_3points(bif_point.points[-1], ch0, ch1)
|
[
"def",
"local_bifurcation_angle",
"(",
"bif_point",
")",
":",
"def",
"skip_0_length",
"(",
"sec",
")",
":",
"'''Return the first point with non-zero distance to first point'''",
"p0",
"=",
"sec",
"[",
"0",
"]",
"cur",
"=",
"sec",
"[",
"1",
"]",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"sec",
"[",
"1",
":",
"]",
")",
":",
"if",
"not",
"np",
".",
"all",
"(",
"p",
"[",
":",
"COLS",
".",
"R",
"]",
"==",
"p0",
"[",
":",
"COLS",
".",
"R",
"]",
")",
":",
"cur",
"=",
"sec",
"[",
"i",
"+",
"1",
"]",
"break",
"return",
"cur",
"assert",
"len",
"(",
"bif_point",
".",
"children",
")",
"==",
"2",
",",
"'A bifurcation point must have exactly 2 children'",
"ch0",
",",
"ch1",
"=",
"(",
"skip_0_length",
"(",
"bif_point",
".",
"children",
"[",
"0",
"]",
".",
"points",
")",
",",
"skip_0_length",
"(",
"bif_point",
".",
"children",
"[",
"1",
"]",
".",
"points",
")",
")",
"return",
"morphmath",
".",
"angle_3points",
"(",
"bif_point",
".",
"points",
"[",
"-",
"1",
"]",
",",
"ch0",
",",
"ch1",
")"
] |
Return the opening angle between two out-going sections
in a bifurcation point
We first ensure that the input point has only two children.
The bifurcation angle is defined as the angle between the first non-zero
length segments of a bifurcation point.
|
[
"Return",
"the",
"opening",
"angle",
"between",
"two",
"out",
"-",
"going",
"sections",
"in",
"a",
"bifurcation",
"point"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_bifurcationfunc.py#L36-L61
|
BlueBrain/NeuroM
|
neurom/fst/_bifurcationfunc.py
|
remote_bifurcation_angle
|
def remote_bifurcation_angle(bif_point):
'''Return the opening angle between two out-going sections
in a bifurcation point
We first ensure that the input point has only two children.
The angle is defined as between the bifurcation point and the
last points in the out-going sections.
'''
assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children'
return morphmath.angle_3points(bif_point.points[-1],
bif_point.children[0].points[-1],
bif_point.children[1].points[-1])
|
python
|
def remote_bifurcation_angle(bif_point):
assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children'
return morphmath.angle_3points(bif_point.points[-1],
bif_point.children[0].points[-1],
bif_point.children[1].points[-1])
|
[
"def",
"remote_bifurcation_angle",
"(",
"bif_point",
")",
":",
"assert",
"len",
"(",
"bif_point",
".",
"children",
")",
"==",
"2",
",",
"'A bifurcation point must have exactly 2 children'",
"return",
"morphmath",
".",
"angle_3points",
"(",
"bif_point",
".",
"points",
"[",
"-",
"1",
"]",
",",
"bif_point",
".",
"children",
"[",
"0",
"]",
".",
"points",
"[",
"-",
"1",
"]",
",",
"bif_point",
".",
"children",
"[",
"1",
"]",
".",
"points",
"[",
"-",
"1",
"]",
")"
] |
Return the opening angle between two out-going sections
in a bifurcation point
We first ensure that the input point has only two children.
The angle is defined as between the bifurcation point and the
last points in the out-going sections.
|
[
"Return",
"the",
"opening",
"angle",
"between",
"two",
"out",
"-",
"going",
"sections",
"in",
"a",
"bifurcation",
"point"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_bifurcationfunc.py#L64-L77
|
BlueBrain/NeuroM
|
neurom/fst/_bifurcationfunc.py
|
bifurcation_partition
|
def bifurcation_partition(bif_point):
'''Calculate the partition at a bifurcation point
We first ensure that the input point has only two children.
The number of nodes in each child tree is counted. The partition is
defined as the ratio of the largest number to the smallest number.'''
assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children'
n = float(sum(1 for _ in bif_point.children[0].ipreorder()))
m = float(sum(1 for _ in bif_point.children[1].ipreorder()))
return max(n, m) / min(n, m)
|
python
|
def bifurcation_partition(bif_point):
assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children'
n = float(sum(1 for _ in bif_point.children[0].ipreorder()))
m = float(sum(1 for _ in bif_point.children[1].ipreorder()))
return max(n, m) / min(n, m)
|
[
"def",
"bifurcation_partition",
"(",
"bif_point",
")",
":",
"assert",
"len",
"(",
"bif_point",
".",
"children",
")",
"==",
"2",
",",
"'A bifurcation point must have exactly 2 children'",
"n",
"=",
"float",
"(",
"sum",
"(",
"1",
"for",
"_",
"in",
"bif_point",
".",
"children",
"[",
"0",
"]",
".",
"ipreorder",
"(",
")",
")",
")",
"m",
"=",
"float",
"(",
"sum",
"(",
"1",
"for",
"_",
"in",
"bif_point",
".",
"children",
"[",
"1",
"]",
".",
"ipreorder",
"(",
")",
")",
")",
"return",
"max",
"(",
"n",
",",
"m",
")",
"/",
"min",
"(",
"n",
",",
"m",
")"
] |
Calculate the partition at a bifurcation point
We first ensure that the input point has only two children.
The number of nodes in each child tree is counted. The partition is
defined as the ratio of the largest number to the smallest number.
|
[
"Calculate",
"the",
"partition",
"at",
"a",
"bifurcation",
"point"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_bifurcationfunc.py#L80-L91
|
BlueBrain/NeuroM
|
neurom/fst/_bifurcationfunc.py
|
partition_asymmetry
|
def partition_asymmetry(bif_point):
'''Calculate the partition asymmetry at a bifurcation point
as defined in https://www.ncbi.nlm.nih.gov/pubmed/18568015
The number of nodes in each child tree is counted. The partition
is defined as the ratio of the absolute difference and the sum
of the number of bifurcations in the two daughter subtrees
at each branch point.'''
assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children'
n = float(sum(1 for _ in bif_point.children[0].ipreorder()))
m = float(sum(1 for _ in bif_point.children[1].ipreorder()))
if n == m:
return 0.0
return abs(n - m) / abs(n + m)
|
python
|
def partition_asymmetry(bif_point):
assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children'
n = float(sum(1 for _ in bif_point.children[0].ipreorder()))
m = float(sum(1 for _ in bif_point.children[1].ipreorder()))
if n == m:
return 0.0
return abs(n - m) / abs(n + m)
|
[
"def",
"partition_asymmetry",
"(",
"bif_point",
")",
":",
"assert",
"len",
"(",
"bif_point",
".",
"children",
")",
"==",
"2",
",",
"'A bifurcation point must have exactly 2 children'",
"n",
"=",
"float",
"(",
"sum",
"(",
"1",
"for",
"_",
"in",
"bif_point",
".",
"children",
"[",
"0",
"]",
".",
"ipreorder",
"(",
")",
")",
")",
"m",
"=",
"float",
"(",
"sum",
"(",
"1",
"for",
"_",
"in",
"bif_point",
".",
"children",
"[",
"1",
"]",
".",
"ipreorder",
"(",
")",
")",
")",
"if",
"n",
"==",
"m",
":",
"return",
"0.0",
"return",
"abs",
"(",
"n",
"-",
"m",
")",
"/",
"abs",
"(",
"n",
"+",
"m",
")"
] |
Calculate the partition asymmetry at a bifurcation point
as defined in https://www.ncbi.nlm.nih.gov/pubmed/18568015
The number of nodes in each child tree is counted. The partition
is defined as the ratio of the absolute difference and the sum
of the number of bifurcations in the two daughter subtrees
at each branch point.
|
[
"Calculate",
"the",
"partition",
"asymmetry",
"at",
"a",
"bifurcation",
"point",
"as",
"defined",
"in",
"https",
":",
"//",
"www",
".",
"ncbi",
".",
"nlm",
".",
"nih",
".",
"gov",
"/",
"pubmed",
"/",
"18568015"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_bifurcationfunc.py#L94-L107
|
BlueBrain/NeuroM
|
neurom/fst/_bifurcationfunc.py
|
partition_pair
|
def partition_pair(bif_point):
'''Calculate the partition pairs at a bifurcation point
The number of nodes in each child tree is counted. The partition
pairs is the number of bifurcations in the two daughter subtrees
at each branch point.'''
n = float(sum(1 for _ in bif_point.children[0].ipreorder()))
m = float(sum(1 for _ in bif_point.children[1].ipreorder()))
return (n, m)
|
python
|
def partition_pair(bif_point):
n = float(sum(1 for _ in bif_point.children[0].ipreorder()))
m = float(sum(1 for _ in bif_point.children[1].ipreorder()))
return (n, m)
|
[
"def",
"partition_pair",
"(",
"bif_point",
")",
":",
"n",
"=",
"float",
"(",
"sum",
"(",
"1",
"for",
"_",
"in",
"bif_point",
".",
"children",
"[",
"0",
"]",
".",
"ipreorder",
"(",
")",
")",
")",
"m",
"=",
"float",
"(",
"sum",
"(",
"1",
"for",
"_",
"in",
"bif_point",
".",
"children",
"[",
"1",
"]",
".",
"ipreorder",
"(",
")",
")",
")",
"return",
"(",
"n",
",",
"m",
")"
] |
Calculate the partition pairs at a bifurcation point
The number of nodes in each child tree is counted. The partition
pairs is the number of bifurcations in the two daughter subtrees
at each branch point.
|
[
"Calculate",
"the",
"partition",
"pairs",
"at",
"a",
"bifurcation",
"point"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_bifurcationfunc.py#L110-L118
|
BlueBrain/NeuroM
|
neurom/io/neurolucida.py
|
_match_section
|
def _match_section(section, match):
'''checks whether the `type` of section is in the `match` dictionary
Works around the unknown ordering of s-expressions in each section.
For instance, the `type` is the 3-rd one in for CellBodies
("CellBody"
(Color Yellow)
(CellBody)
(Set "cell10")
)
Returns:
value associated with match[section_type], None if no match
'''
# TODO: rewrite this so it is more clear, and handles sets & dictionaries for matching
for i in range(5):
if i >= len(section):
return None
if isinstance(section[i], StringType) and section[i] in match:
return match[section[i]]
return None
|
python
|
def _match_section(section, match):
for i in range(5):
if i >= len(section):
return None
if isinstance(section[i], StringType) and section[i] in match:
return match[section[i]]
return None
|
[
"def",
"_match_section",
"(",
"section",
",",
"match",
")",
":",
"# TODO: rewrite this so it is more clear, and handles sets & dictionaries for matching",
"for",
"i",
"in",
"range",
"(",
"5",
")",
":",
"if",
"i",
">=",
"len",
"(",
"section",
")",
":",
"return",
"None",
"if",
"isinstance",
"(",
"section",
"[",
"i",
"]",
",",
"StringType",
")",
"and",
"section",
"[",
"i",
"]",
"in",
"match",
":",
"return",
"match",
"[",
"section",
"[",
"i",
"]",
"]",
"return",
"None"
] |
checks whether the `type` of section is in the `match` dictionary
Works around the unknown ordering of s-expressions in each section.
For instance, the `type` is the 3-rd one in for CellBodies
("CellBody"
(Color Yellow)
(CellBody)
(Set "cell10")
)
Returns:
value associated with match[section_type], None if no match
|
[
"checks",
"whether",
"the",
"type",
"of",
"section",
"is",
"in",
"the",
"match",
"dictionary"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L64-L84
|
BlueBrain/NeuroM
|
neurom/io/neurolucida.py
|
_get_tokens
|
def _get_tokens(morph_fd):
'''split a file-like into tokens: split on whitespace
Note: this also strips newlines and comments
'''
for line in morph_fd:
line = line.rstrip() # remove \r\n
line = line.split(';', 1)[0] # strip comments
squash_token = [] # quoted strings get squashed into one token
if '<(' in line: # skip spines, which exist on a single line
assert ')>' in line, 'Missing end of spine'
continue
for token in line.replace('(', ' ( ').replace(')', ' ) ').split():
if squash_token:
squash_token.append(token)
if token.endswith('"'):
token = ' '.join(squash_token)
squash_token = []
yield token
elif token.startswith('"') and not token.endswith('"'):
squash_token.append(token)
else:
yield token
|
python
|
def _get_tokens(morph_fd):
for line in morph_fd:
line = line.rstrip()
line = line.split(';', 1)[0]
squash_token = []
if '<(' in line:
assert ')>' in line, 'Missing end of spine'
continue
for token in line.replace('(', ' ( ').replace(')', ' ) ').split():
if squash_token:
squash_token.append(token)
if token.endswith('"'):
token = ' '.join(squash_token)
squash_token = []
yield token
elif token.startswith('"') and not token.endswith('"'):
squash_token.append(token)
else:
yield token
|
[
"def",
"_get_tokens",
"(",
"morph_fd",
")",
":",
"for",
"line",
"in",
"morph_fd",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"# remove \\r\\n",
"line",
"=",
"line",
".",
"split",
"(",
"';'",
",",
"1",
")",
"[",
"0",
"]",
"# strip comments",
"squash_token",
"=",
"[",
"]",
"# quoted strings get squashed into one token",
"if",
"'<('",
"in",
"line",
":",
"# skip spines, which exist on a single line",
"assert",
"')>'",
"in",
"line",
",",
"'Missing end of spine'",
"continue",
"for",
"token",
"in",
"line",
".",
"replace",
"(",
"'('",
",",
"' ( '",
")",
".",
"replace",
"(",
"')'",
",",
"' ) '",
")",
".",
"split",
"(",
")",
":",
"if",
"squash_token",
":",
"squash_token",
".",
"append",
"(",
"token",
")",
"if",
"token",
".",
"endswith",
"(",
"'\"'",
")",
":",
"token",
"=",
"' '",
".",
"join",
"(",
"squash_token",
")",
"squash_token",
"=",
"[",
"]",
"yield",
"token",
"elif",
"token",
".",
"startswith",
"(",
"'\"'",
")",
"and",
"not",
"token",
".",
"endswith",
"(",
"'\"'",
")",
":",
"squash_token",
".",
"append",
"(",
"token",
")",
"else",
":",
"yield",
"token"
] |
split a file-like into tokens: split on whitespace
Note: this also strips newlines and comments
|
[
"split",
"a",
"file",
"-",
"like",
"into",
"tokens",
":",
"split",
"on",
"whitespace"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L87-L111
|
BlueBrain/NeuroM
|
neurom/io/neurolucida.py
|
_parse_section
|
def _parse_section(token_iter):
'''take a stream of tokens, and create the tree structure that is defined
by the s-expressions
'''
sexp = []
for token in token_iter:
if token == '(':
new_sexp = _parse_section(token_iter)
if not _match_section(new_sexp, UNWANTED_SECTIONS):
sexp.append(new_sexp)
elif token == ')':
return sexp
else:
sexp.append(token)
return sexp
|
python
|
def _parse_section(token_iter):
sexp = []
for token in token_iter:
if token == '(':
new_sexp = _parse_section(token_iter)
if not _match_section(new_sexp, UNWANTED_SECTIONS):
sexp.append(new_sexp)
elif token == ')':
return sexp
else:
sexp.append(token)
return sexp
|
[
"def",
"_parse_section",
"(",
"token_iter",
")",
":",
"sexp",
"=",
"[",
"]",
"for",
"token",
"in",
"token_iter",
":",
"if",
"token",
"==",
"'('",
":",
"new_sexp",
"=",
"_parse_section",
"(",
"token_iter",
")",
"if",
"not",
"_match_section",
"(",
"new_sexp",
",",
"UNWANTED_SECTIONS",
")",
":",
"sexp",
".",
"append",
"(",
"new_sexp",
")",
"elif",
"token",
"==",
"')'",
":",
"return",
"sexp",
"else",
":",
"sexp",
".",
"append",
"(",
"token",
")",
"return",
"sexp"
] |
take a stream of tokens, and create the tree structure that is defined
by the s-expressions
|
[
"take",
"a",
"stream",
"of",
"tokens",
"and",
"create",
"the",
"tree",
"structure",
"that",
"is",
"defined",
"by",
"the",
"s",
"-",
"expressions"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L114-L128
|
BlueBrain/NeuroM
|
neurom/io/neurolucida.py
|
_parse_sections
|
def _parse_sections(morph_fd):
'''returns array of all the sections that exist
The format is nested lists that correspond to the s-expressions
'''
sections = []
token_iter = _get_tokens(morph_fd)
for token in token_iter:
if token == '(': # find top-level sections
section = _parse_section(token_iter)
if not _match_section(section, UNWANTED_SECTIONS):
sections.append(section)
return sections
|
python
|
def _parse_sections(morph_fd):
sections = []
token_iter = _get_tokens(morph_fd)
for token in token_iter:
if token == '(':
section = _parse_section(token_iter)
if not _match_section(section, UNWANTED_SECTIONS):
sections.append(section)
return sections
|
[
"def",
"_parse_sections",
"(",
"morph_fd",
")",
":",
"sections",
"=",
"[",
"]",
"token_iter",
"=",
"_get_tokens",
"(",
"morph_fd",
")",
"for",
"token",
"in",
"token_iter",
":",
"if",
"token",
"==",
"'('",
":",
"# find top-level sections",
"section",
"=",
"_parse_section",
"(",
"token_iter",
")",
"if",
"not",
"_match_section",
"(",
"section",
",",
"UNWANTED_SECTIONS",
")",
":",
"sections",
".",
"append",
"(",
"section",
")",
"return",
"sections"
] |
returns array of all the sections that exist
The format is nested lists that correspond to the s-expressions
|
[
"returns",
"array",
"of",
"all",
"the",
"sections",
"that",
"exist"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L131-L143
|
BlueBrain/NeuroM
|
neurom/io/neurolucida.py
|
_flatten_subsection
|
def _flatten_subsection(subsection, _type, offset, parent):
'''Flatten a subsection from its nested version
Args:
subsection: Nested subsection as produced by _parse_section, except one level in
_type: type of section, ie: AXON, etc
parent: first element has this as it's parent
offset: position in the final array of the first element
Returns:
Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]
'''
for row in subsection:
# TODO: Figure out what these correspond to in neurolucida
if row in ('Low', 'Generated', 'High', ):
continue
elif isinstance(row[0], StringType):
if len(row) in (4, 5, ):
if len(row) == 5:
assert row[4][0] == 'S', \
'Only known usage of a fifth member is Sn, found: %s' % row[4][0]
yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2.,
_type, offset, parent)
parent = offset
offset += 1
elif isinstance(row[0], list):
split_parent = offset - 1
start_offset = 0
slices = []
start = 0
for i, value in enumerate(row):
if value == '|':
slices.append(slice(start + start_offset, i))
start = i + 1
slices.append(slice(start + start_offset, len(row)))
for split_slice in slices:
for _row in _flatten_subsection(row[split_slice], _type, offset,
split_parent):
offset += 1
yield _row
|
python
|
def _flatten_subsection(subsection, _type, offset, parent):
for row in subsection:
if row in ('Low', 'Generated', 'High', ):
continue
elif isinstance(row[0], StringType):
if len(row) in (4, 5, ):
if len(row) == 5:
assert row[4][0] == 'S', \
'Only known usage of a fifth member is Sn, found: %s' % row[4][0]
yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2.,
_type, offset, parent)
parent = offset
offset += 1
elif isinstance(row[0], list):
split_parent = offset - 1
start_offset = 0
slices = []
start = 0
for i, value in enumerate(row):
if value == '|':
slices.append(slice(start + start_offset, i))
start = i + 1
slices.append(slice(start + start_offset, len(row)))
for split_slice in slices:
for _row in _flatten_subsection(row[split_slice], _type, offset,
split_parent):
offset += 1
yield _row
|
[
"def",
"_flatten_subsection",
"(",
"subsection",
",",
"_type",
",",
"offset",
",",
"parent",
")",
":",
"for",
"row",
"in",
"subsection",
":",
"# TODO: Figure out what these correspond to in neurolucida",
"if",
"row",
"in",
"(",
"'Low'",
",",
"'Generated'",
",",
"'High'",
",",
")",
":",
"continue",
"elif",
"isinstance",
"(",
"row",
"[",
"0",
"]",
",",
"StringType",
")",
":",
"if",
"len",
"(",
"row",
")",
"in",
"(",
"4",
",",
"5",
",",
")",
":",
"if",
"len",
"(",
"row",
")",
"==",
"5",
":",
"assert",
"row",
"[",
"4",
"]",
"[",
"0",
"]",
"==",
"'S'",
",",
"'Only known usage of a fifth member is Sn, found: %s'",
"%",
"row",
"[",
"4",
"]",
"[",
"0",
"]",
"yield",
"(",
"float",
"(",
"row",
"[",
"0",
"]",
")",
",",
"float",
"(",
"row",
"[",
"1",
"]",
")",
",",
"float",
"(",
"row",
"[",
"2",
"]",
")",
",",
"float",
"(",
"row",
"[",
"3",
"]",
")",
"/",
"2.",
",",
"_type",
",",
"offset",
",",
"parent",
")",
"parent",
"=",
"offset",
"offset",
"+=",
"1",
"elif",
"isinstance",
"(",
"row",
"[",
"0",
"]",
",",
"list",
")",
":",
"split_parent",
"=",
"offset",
"-",
"1",
"start_offset",
"=",
"0",
"slices",
"=",
"[",
"]",
"start",
"=",
"0",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"row",
")",
":",
"if",
"value",
"==",
"'|'",
":",
"slices",
".",
"append",
"(",
"slice",
"(",
"start",
"+",
"start_offset",
",",
"i",
")",
")",
"start",
"=",
"i",
"+",
"1",
"slices",
".",
"append",
"(",
"slice",
"(",
"start",
"+",
"start_offset",
",",
"len",
"(",
"row",
")",
")",
")",
"for",
"split_slice",
"in",
"slices",
":",
"for",
"_row",
"in",
"_flatten_subsection",
"(",
"row",
"[",
"split_slice",
"]",
",",
"_type",
",",
"offset",
",",
"split_parent",
")",
":",
"offset",
"+=",
"1",
"yield",
"_row"
] |
Flatten a subsection from its nested version
Args:
subsection: Nested subsection as produced by _parse_section, except one level in
_type: type of section, ie: AXON, etc
parent: first element has this as it's parent
offset: position in the final array of the first element
Returns:
Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]
|
[
"Flatten",
"a",
"subsection",
"from",
"its",
"nested",
"version"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L146-L187
|
BlueBrain/NeuroM
|
neurom/io/neurolucida.py
|
_extract_section
|
def _extract_section(section):
'''Find top level sections, and get their flat contents, and append them all
Returns a numpy array with the row format:
[X, Y, Z, R, TYPE, ID, PARENT_ID]
Note: PARENT_ID starts at -1 for soma and 0 for neurites
'''
# sections with only one element will be skipped,
if len(section) == 1:
assert section[0] == 'Sections', \
('Only known usage of a single Section content is "Sections", found %s' %
section[0])
return None
# try and detect type
_type = WANTED_SECTIONS.get(section[0][0], None)
start = 1
# CellBody often has [['"CellBody"'], ['CellBody'] as its first two elements
if _type is None:
_type = WANTED_SECTIONS.get(section[1][0], None)
if _type is None: # can't determine the type
return None
start = 2
parent = -1 if _type == POINT_TYPE.SOMA else 0
subsection_iter = _flatten_subsection(section[start:], _type, offset=0,
parent=parent)
ret = np.array([row for row in subsection_iter])
return ret
|
python
|
def _extract_section(section):
if len(section) == 1:
assert section[0] == 'Sections', \
('Only known usage of a single Section content is "Sections", found %s' %
section[0])
return None
_type = WANTED_SECTIONS.get(section[0][0], None)
start = 1
if _type is None:
_type = WANTED_SECTIONS.get(section[1][0], None)
if _type is None:
return None
start = 2
parent = -1 if _type == POINT_TYPE.SOMA else 0
subsection_iter = _flatten_subsection(section[start:], _type, offset=0,
parent=parent)
ret = np.array([row for row in subsection_iter])
return ret
|
[
"def",
"_extract_section",
"(",
"section",
")",
":",
"# sections with only one element will be skipped,",
"if",
"len",
"(",
"section",
")",
"==",
"1",
":",
"assert",
"section",
"[",
"0",
"]",
"==",
"'Sections'",
",",
"(",
"'Only known usage of a single Section content is \"Sections\", found %s'",
"%",
"section",
"[",
"0",
"]",
")",
"return",
"None",
"# try and detect type",
"_type",
"=",
"WANTED_SECTIONS",
".",
"get",
"(",
"section",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"None",
")",
"start",
"=",
"1",
"# CellBody often has [['\"CellBody\"'], ['CellBody'] as its first two elements",
"if",
"_type",
"is",
"None",
":",
"_type",
"=",
"WANTED_SECTIONS",
".",
"get",
"(",
"section",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"None",
")",
"if",
"_type",
"is",
"None",
":",
"# can't determine the type",
"return",
"None",
"start",
"=",
"2",
"parent",
"=",
"-",
"1",
"if",
"_type",
"==",
"POINT_TYPE",
".",
"SOMA",
"else",
"0",
"subsection_iter",
"=",
"_flatten_subsection",
"(",
"section",
"[",
"start",
":",
"]",
",",
"_type",
",",
"offset",
"=",
"0",
",",
"parent",
"=",
"parent",
")",
"ret",
"=",
"np",
".",
"array",
"(",
"[",
"row",
"for",
"row",
"in",
"subsection_iter",
"]",
")",
"return",
"ret"
] |
Find top level sections, and get their flat contents, and append them all
Returns a numpy array with the row format:
[X, Y, Z, R, TYPE, ID, PARENT_ID]
Note: PARENT_ID starts at -1 for soma and 0 for neurites
|
[
"Find",
"top",
"level",
"sections",
"and",
"get",
"their",
"flat",
"contents",
"and",
"append",
"them",
"all"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L190-L222
|
BlueBrain/NeuroM
|
neurom/io/neurolucida.py
|
_sections_to_raw_data
|
def _sections_to_raw_data(sections):
'''convert list of sections into the `raw_data` format used in neurom
This finds the soma, and attaches the neurites
'''
soma = None
neurites = []
for section in sections:
neurite = _extract_section(section)
if neurite is None:
continue
elif neurite[0][COLS.TYPE] == POINT_TYPE.SOMA:
assert soma is None, 'Multiple somas defined in file'
soma = neurite
else:
neurites.append(neurite)
assert soma is not None, 'Missing CellBody element (ie. soma)'
total_length = len(soma) + sum(len(neurite) for neurite in neurites)
ret = np.zeros((total_length, 7,), dtype=np.float64)
pos = len(soma)
ret[0:pos, :] = soma
for neurite in neurites:
end = pos + len(neurite)
ret[pos:end, :] = neurite
ret[pos:end, COLS.P] += pos
ret[pos:end, COLS.ID] += pos
# TODO: attach the neurite at the closest point on the soma
ret[pos, COLS.P] = len(soma) - 1
pos = end
return ret
|
python
|
def _sections_to_raw_data(sections):
soma = None
neurites = []
for section in sections:
neurite = _extract_section(section)
if neurite is None:
continue
elif neurite[0][COLS.TYPE] == POINT_TYPE.SOMA:
assert soma is None, 'Multiple somas defined in file'
soma = neurite
else:
neurites.append(neurite)
assert soma is not None, 'Missing CellBody element (ie. soma)'
total_length = len(soma) + sum(len(neurite) for neurite in neurites)
ret = np.zeros((total_length, 7,), dtype=np.float64)
pos = len(soma)
ret[0:pos, :] = soma
for neurite in neurites:
end = pos + len(neurite)
ret[pos:end, :] = neurite
ret[pos:end, COLS.P] += pos
ret[pos:end, COLS.ID] += pos
ret[pos, COLS.P] = len(soma) - 1
pos = end
return ret
|
[
"def",
"_sections_to_raw_data",
"(",
"sections",
")",
":",
"soma",
"=",
"None",
"neurites",
"=",
"[",
"]",
"for",
"section",
"in",
"sections",
":",
"neurite",
"=",
"_extract_section",
"(",
"section",
")",
"if",
"neurite",
"is",
"None",
":",
"continue",
"elif",
"neurite",
"[",
"0",
"]",
"[",
"COLS",
".",
"TYPE",
"]",
"==",
"POINT_TYPE",
".",
"SOMA",
":",
"assert",
"soma",
"is",
"None",
",",
"'Multiple somas defined in file'",
"soma",
"=",
"neurite",
"else",
":",
"neurites",
".",
"append",
"(",
"neurite",
")",
"assert",
"soma",
"is",
"not",
"None",
",",
"'Missing CellBody element (ie. soma)'",
"total_length",
"=",
"len",
"(",
"soma",
")",
"+",
"sum",
"(",
"len",
"(",
"neurite",
")",
"for",
"neurite",
"in",
"neurites",
")",
"ret",
"=",
"np",
".",
"zeros",
"(",
"(",
"total_length",
",",
"7",
",",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"pos",
"=",
"len",
"(",
"soma",
")",
"ret",
"[",
"0",
":",
"pos",
",",
":",
"]",
"=",
"soma",
"for",
"neurite",
"in",
"neurites",
":",
"end",
"=",
"pos",
"+",
"len",
"(",
"neurite",
")",
"ret",
"[",
"pos",
":",
"end",
",",
":",
"]",
"=",
"neurite",
"ret",
"[",
"pos",
":",
"end",
",",
"COLS",
".",
"P",
"]",
"+=",
"pos",
"ret",
"[",
"pos",
":",
"end",
",",
"COLS",
".",
"ID",
"]",
"+=",
"pos",
"# TODO: attach the neurite at the closest point on the soma",
"ret",
"[",
"pos",
",",
"COLS",
".",
"P",
"]",
"=",
"len",
"(",
"soma",
")",
"-",
"1",
"pos",
"=",
"end",
"return",
"ret"
] |
convert list of sections into the `raw_data` format used in neurom
This finds the soma, and attaches the neurites
|
[
"convert",
"list",
"of",
"sections",
"into",
"the",
"raw_data",
"format",
"used",
"in",
"neurom"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L225-L257
|
BlueBrain/NeuroM
|
neurom/io/neurolucida.py
|
read
|
def read(morph_file, data_wrapper=DataWrapper):
'''return a 'raw_data' np.array with the full neuron, and the format of the file
suitable to be wrapped by DataWrapper
'''
msg = ('This is an experimental reader. '
'There are no guarantees regarding ability to parse '
'Neurolucida .asc files or correctness of output.')
warnings.warn(msg)
L.warning(msg)
with open(morph_file, encoding='utf-8', errors='replace') as morph_fd:
sections = _parse_sections(morph_fd)
raw_data = _sections_to_raw_data(sections)
return data_wrapper(raw_data, 'NL-ASCII')
|
python
|
def read(morph_file, data_wrapper=DataWrapper):
msg = ('This is an experimental reader. '
'There are no guarantees regarding ability to parse '
'Neurolucida .asc files or correctness of output.')
warnings.warn(msg)
L.warning(msg)
with open(morph_file, encoding='utf-8', errors='replace') as morph_fd:
sections = _parse_sections(morph_fd)
raw_data = _sections_to_raw_data(sections)
return data_wrapper(raw_data, 'NL-ASCII')
|
[
"def",
"read",
"(",
"morph_file",
",",
"data_wrapper",
"=",
"DataWrapper",
")",
":",
"msg",
"=",
"(",
"'This is an experimental reader. '",
"'There are no guarantees regarding ability to parse '",
"'Neurolucida .asc files or correctness of output.'",
")",
"warnings",
".",
"warn",
"(",
"msg",
")",
"L",
".",
"warning",
"(",
"msg",
")",
"with",
"open",
"(",
"morph_file",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"'replace'",
")",
"as",
"morph_fd",
":",
"sections",
"=",
"_parse_sections",
"(",
"morph_fd",
")",
"raw_data",
"=",
"_sections_to_raw_data",
"(",
"sections",
")",
"return",
"data_wrapper",
"(",
"raw_data",
",",
"'NL-ASCII'",
")"
] |
return a 'raw_data' np.array with the full neuron, and the format of the file
suitable to be wrapped by DataWrapper
|
[
"return",
"a",
"raw_data",
"np",
".",
"array",
"with",
"the",
"full",
"neuron",
"and",
"the",
"format",
"of",
"the",
"file",
"suitable",
"to",
"be",
"wrapped",
"by",
"DataWrapper"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/io/neurolucida.py#L260-L275
|
BlueBrain/NeuroM
|
examples/get_features.py
|
stats
|
def stats(data):
'''Dictionary with summary stats for data
Returns:
dicitonary with length, mean, sum, standard deviation,\
min and max of data
'''
return {'len': len(data),
'mean': np.mean(data),
'sum': np.sum(data),
'std': np.std(data),
'min': np.min(data),
'max': np.max(data)}
|
python
|
def stats(data):
return {'len': len(data),
'mean': np.mean(data),
'sum': np.sum(data),
'std': np.std(data),
'min': np.min(data),
'max': np.max(data)}
|
[
"def",
"stats",
"(",
"data",
")",
":",
"return",
"{",
"'len'",
":",
"len",
"(",
"data",
")",
",",
"'mean'",
":",
"np",
".",
"mean",
"(",
"data",
")",
",",
"'sum'",
":",
"np",
".",
"sum",
"(",
"data",
")",
",",
"'std'",
":",
"np",
".",
"std",
"(",
"data",
")",
",",
"'min'",
":",
"np",
".",
"min",
"(",
"data",
")",
",",
"'max'",
":",
"np",
".",
"max",
"(",
"data",
")",
"}"
] |
Dictionary with summary stats for data
Returns:
dicitonary with length, mean, sum, standard deviation,\
min and max of data
|
[
"Dictionary",
"with",
"summary",
"stats",
"for",
"data"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/get_features.py#L43-L55
|
BlueBrain/NeuroM
|
neurom/apps/__init__.py
|
get_config
|
def get_config(config, default_config):
'''Load configuration from file if in config, else use default'''
if not config:
logging.warning('Using default config: %s', default_config)
config = default_config
try:
with open(config, 'r') as config_file:
return yaml.load(config_file)
except (yaml.reader.ReaderError,
yaml.parser.ParserError,
yaml.scanner.ScannerError) as e:
raise ConfigError('Invalid yaml file: \n %s' % str(e))
|
python
|
def get_config(config, default_config):
if not config:
logging.warning('Using default config: %s', default_config)
config = default_config
try:
with open(config, 'r') as config_file:
return yaml.load(config_file)
except (yaml.reader.ReaderError,
yaml.parser.ParserError,
yaml.scanner.ScannerError) as e:
raise ConfigError('Invalid yaml file: \n %s' % str(e))
|
[
"def",
"get_config",
"(",
"config",
",",
"default_config",
")",
":",
"if",
"not",
"config",
":",
"logging",
".",
"warning",
"(",
"'Using default config: %s'",
",",
"default_config",
")",
"config",
"=",
"default_config",
"try",
":",
"with",
"open",
"(",
"config",
",",
"'r'",
")",
"as",
"config_file",
":",
"return",
"yaml",
".",
"load",
"(",
"config_file",
")",
"except",
"(",
"yaml",
".",
"reader",
".",
"ReaderError",
",",
"yaml",
".",
"parser",
".",
"ParserError",
",",
"yaml",
".",
"scanner",
".",
"ScannerError",
")",
"as",
"e",
":",
"raise",
"ConfigError",
"(",
"'Invalid yaml file: \\n %s'",
"%",
"str",
"(",
"e",
")",
")"
] |
Load configuration from file if in config, else use default
|
[
"Load",
"configuration",
"from",
"file",
"if",
"in",
"config",
"else",
"use",
"default"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/__init__.py#L36-L48
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
soma_surface_area
|
def soma_surface_area(nrn, neurite_type=NeuriteType.soma):
'''Get the surface area of a neuron's soma.
Note:
The surface area is calculated by assuming the soma is spherical.
'''
assert neurite_type == NeuriteType.soma, 'Neurite type must be soma'
return 4 * math.pi * nrn.soma.radius ** 2
|
python
|
def soma_surface_area(nrn, neurite_type=NeuriteType.soma):
assert neurite_type == NeuriteType.soma, 'Neurite type must be soma'
return 4 * math.pi * nrn.soma.radius ** 2
|
[
"def",
"soma_surface_area",
"(",
"nrn",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"soma",
")",
":",
"assert",
"neurite_type",
"==",
"NeuriteType",
".",
"soma",
",",
"'Neurite type must be soma'",
"return",
"4",
"*",
"math",
".",
"pi",
"*",
"nrn",
".",
"soma",
".",
"radius",
"**",
"2"
] |
Get the surface area of a neuron's soma.
Note:
The surface area is calculated by assuming the soma is spherical.
|
[
"Get",
"the",
"surface",
"area",
"of",
"a",
"neuron",
"s",
"soma",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L46-L53
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
soma_surface_areas
|
def soma_surface_areas(nrn_pop, neurite_type=NeuriteType.soma):
'''Get the surface areas of the somata in a population of neurons
Note:
The surface area is calculated by assuming the soma is spherical.
Note:
If a single neuron is passed, a single element list with the surface
area of its soma member is returned.
'''
nrns = neuron_population(nrn_pop)
assert neurite_type == NeuriteType.soma, 'Neurite type must be soma'
return [soma_surface_area(n) for n in nrns]
|
python
|
def soma_surface_areas(nrn_pop, neurite_type=NeuriteType.soma):
nrns = neuron_population(nrn_pop)
assert neurite_type == NeuriteType.soma, 'Neurite type must be soma'
return [soma_surface_area(n) for n in nrns]
|
[
"def",
"soma_surface_areas",
"(",
"nrn_pop",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"soma",
")",
":",
"nrns",
"=",
"neuron_population",
"(",
"nrn_pop",
")",
"assert",
"neurite_type",
"==",
"NeuriteType",
".",
"soma",
",",
"'Neurite type must be soma'",
"return",
"[",
"soma_surface_area",
"(",
"n",
")",
"for",
"n",
"in",
"nrns",
"]"
] |
Get the surface areas of the somata in a population of neurons
Note:
The surface area is calculated by assuming the soma is spherical.
Note:
If a single neuron is passed, a single element list with the surface
area of its soma member is returned.
|
[
"Get",
"the",
"surface",
"areas",
"of",
"the",
"somata",
"in",
"a",
"population",
"of",
"neurons"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L56-L67
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
soma_radii
|
def soma_radii(nrn_pop, neurite_type=NeuriteType.soma):
''' Get the radii of the somata of a population of neurons
Note:
If a single neuron is passed, a single element list with the
radius of its soma member is returned.
'''
assert neurite_type == NeuriteType.soma, 'Neurite type must be soma'
nrns = neuron_population(nrn_pop)
return [n.soma.radius for n in nrns]
|
python
|
def soma_radii(nrn_pop, neurite_type=NeuriteType.soma):
assert neurite_type == NeuriteType.soma, 'Neurite type must be soma'
nrns = neuron_population(nrn_pop)
return [n.soma.radius for n in nrns]
|
[
"def",
"soma_radii",
"(",
"nrn_pop",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"soma",
")",
":",
"assert",
"neurite_type",
"==",
"NeuriteType",
".",
"soma",
",",
"'Neurite type must be soma'",
"nrns",
"=",
"neuron_population",
"(",
"nrn_pop",
")",
"return",
"[",
"n",
".",
"soma",
".",
"radius",
"for",
"n",
"in",
"nrns",
"]"
] |
Get the radii of the somata of a population of neurons
Note:
If a single neuron is passed, a single element list with the
radius of its soma member is returned.
|
[
"Get",
"the",
"radii",
"of",
"the",
"somata",
"of",
"a",
"population",
"of",
"neurons"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L70-L79
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
trunk_section_lengths
|
def trunk_section_lengths(nrn, neurite_type=NeuriteType.all):
'''list of lengths of trunk sections of neurites in a neuron'''
neurite_filter = is_type(neurite_type)
return [morphmath.section_length(s.root_node.points)
for s in nrn.neurites if neurite_filter(s)]
|
python
|
def trunk_section_lengths(nrn, neurite_type=NeuriteType.all):
neurite_filter = is_type(neurite_type)
return [morphmath.section_length(s.root_node.points)
for s in nrn.neurites if neurite_filter(s)]
|
[
"def",
"trunk_section_lengths",
"(",
"nrn",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"all",
")",
":",
"neurite_filter",
"=",
"is_type",
"(",
"neurite_type",
")",
"return",
"[",
"morphmath",
".",
"section_length",
"(",
"s",
".",
"root_node",
".",
"points",
")",
"for",
"s",
"in",
"nrn",
".",
"neurites",
"if",
"neurite_filter",
"(",
"s",
")",
"]"
] |
list of lengths of trunk sections of neurites in a neuron
|
[
"list",
"of",
"lengths",
"of",
"trunk",
"sections",
"of",
"neurites",
"in",
"a",
"neuron"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L82-L86
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
trunk_origin_radii
|
def trunk_origin_radii(nrn, neurite_type=NeuriteType.all):
'''radii of the trunk sections of neurites in a neuron'''
neurite_filter = is_type(neurite_type)
return [s.root_node.points[0][COLS.R] for s in nrn.neurites if neurite_filter(s)]
|
python
|
def trunk_origin_radii(nrn, neurite_type=NeuriteType.all):
neurite_filter = is_type(neurite_type)
return [s.root_node.points[0][COLS.R] for s in nrn.neurites if neurite_filter(s)]
|
[
"def",
"trunk_origin_radii",
"(",
"nrn",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"all",
")",
":",
"neurite_filter",
"=",
"is_type",
"(",
"neurite_type",
")",
"return",
"[",
"s",
".",
"root_node",
".",
"points",
"[",
"0",
"]",
"[",
"COLS",
".",
"R",
"]",
"for",
"s",
"in",
"nrn",
".",
"neurites",
"if",
"neurite_filter",
"(",
"s",
")",
"]"
] |
radii of the trunk sections of neurites in a neuron
|
[
"radii",
"of",
"the",
"trunk",
"sections",
"of",
"neurites",
"in",
"a",
"neuron"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L89-L92
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
trunk_origin_azimuths
|
def trunk_origin_azimuths(nrn, neurite_type=NeuriteType.all):
'''Get a list of all the trunk origin azimuths of a neuron or population
The azimuth is defined as Angle between x-axis and the vector
defined by (initial tree point - soma center) on the x-z plane.
The range of the azimuth angle [-pi, pi] radians
'''
neurite_filter = is_type(neurite_type)
nrns = neuron_population(nrn)
def _azimuth(section, soma):
'''Azimuth of a section'''
vector = morphmath.vector(section[0], soma.center)
return np.arctan2(vector[COLS.Z], vector[COLS.X])
return [_azimuth(s.root_node.points, n.soma)
for n in nrns
for s in n.neurites if neurite_filter(s)]
|
python
|
def trunk_origin_azimuths(nrn, neurite_type=NeuriteType.all):
neurite_filter = is_type(neurite_type)
nrns = neuron_population(nrn)
def _azimuth(section, soma):
vector = morphmath.vector(section[0], soma.center)
return np.arctan2(vector[COLS.Z], vector[COLS.X])
return [_azimuth(s.root_node.points, n.soma)
for n in nrns
for s in n.neurites if neurite_filter(s)]
|
[
"def",
"trunk_origin_azimuths",
"(",
"nrn",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"all",
")",
":",
"neurite_filter",
"=",
"is_type",
"(",
"neurite_type",
")",
"nrns",
"=",
"neuron_population",
"(",
"nrn",
")",
"def",
"_azimuth",
"(",
"section",
",",
"soma",
")",
":",
"'''Azimuth of a section'''",
"vector",
"=",
"morphmath",
".",
"vector",
"(",
"section",
"[",
"0",
"]",
",",
"soma",
".",
"center",
")",
"return",
"np",
".",
"arctan2",
"(",
"vector",
"[",
"COLS",
".",
"Z",
"]",
",",
"vector",
"[",
"COLS",
".",
"X",
"]",
")",
"return",
"[",
"_azimuth",
"(",
"s",
".",
"root_node",
".",
"points",
",",
"n",
".",
"soma",
")",
"for",
"n",
"in",
"nrns",
"for",
"s",
"in",
"n",
".",
"neurites",
"if",
"neurite_filter",
"(",
"s",
")",
"]"
] |
Get a list of all the trunk origin azimuths of a neuron or population
The azimuth is defined as Angle between x-axis and the vector
defined by (initial tree point - soma center) on the x-z plane.
The range of the azimuth angle [-pi, pi] radians
|
[
"Get",
"a",
"list",
"of",
"all",
"the",
"trunk",
"origin",
"azimuths",
"of",
"a",
"neuron",
"or",
"population"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L95-L113
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
trunk_origin_elevations
|
def trunk_origin_elevations(nrn, neurite_type=NeuriteType.all):
'''Get a list of all the trunk origin elevations of a neuron or population
The elevation is defined as the angle between x-axis and the
vector defined by (initial tree point - soma center)
on the x-y half-plane.
The range of the elevation angle [-pi/2, pi/2] radians
'''
neurite_filter = is_type(neurite_type)
nrns = neuron_population(nrn)
def _elevation(section, soma):
'''Elevation of a section'''
vector = morphmath.vector(section[0], soma.center)
norm_vector = np.linalg.norm(vector)
if norm_vector >= np.finfo(type(norm_vector)).eps:
return np.arcsin(vector[COLS.Y] / norm_vector)
raise ValueError("Norm of vector between soma center and section is almost zero.")
return [_elevation(s.root_node.points, n.soma)
for n in nrns
for s in n.neurites if neurite_filter(s)]
|
python
|
def trunk_origin_elevations(nrn, neurite_type=NeuriteType.all):
neurite_filter = is_type(neurite_type)
nrns = neuron_population(nrn)
def _elevation(section, soma):
vector = morphmath.vector(section[0], soma.center)
norm_vector = np.linalg.norm(vector)
if norm_vector >= np.finfo(type(norm_vector)).eps:
return np.arcsin(vector[COLS.Y] / norm_vector)
raise ValueError("Norm of vector between soma center and section is almost zero.")
return [_elevation(s.root_node.points, n.soma)
for n in nrns
for s in n.neurites if neurite_filter(s)]
|
[
"def",
"trunk_origin_elevations",
"(",
"nrn",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"all",
")",
":",
"neurite_filter",
"=",
"is_type",
"(",
"neurite_type",
")",
"nrns",
"=",
"neuron_population",
"(",
"nrn",
")",
"def",
"_elevation",
"(",
"section",
",",
"soma",
")",
":",
"'''Elevation of a section'''",
"vector",
"=",
"morphmath",
".",
"vector",
"(",
"section",
"[",
"0",
"]",
",",
"soma",
".",
"center",
")",
"norm_vector",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"vector",
")",
"if",
"norm_vector",
">=",
"np",
".",
"finfo",
"(",
"type",
"(",
"norm_vector",
")",
")",
".",
"eps",
":",
"return",
"np",
".",
"arcsin",
"(",
"vector",
"[",
"COLS",
".",
"Y",
"]",
"/",
"norm_vector",
")",
"raise",
"ValueError",
"(",
"\"Norm of vector between soma center and section is almost zero.\"",
")",
"return",
"[",
"_elevation",
"(",
"s",
".",
"root_node",
".",
"points",
",",
"n",
".",
"soma",
")",
"for",
"n",
"in",
"nrns",
"for",
"s",
"in",
"n",
".",
"neurites",
"if",
"neurite_filter",
"(",
"s",
")",
"]"
] |
Get a list of all the trunk origin elevations of a neuron or population
The elevation is defined as the angle between x-axis and the
vector defined by (initial tree point - soma center)
on the x-y half-plane.
The range of the elevation angle [-pi/2, pi/2] radians
|
[
"Get",
"a",
"list",
"of",
"all",
"the",
"trunk",
"origin",
"elevations",
"of",
"a",
"neuron",
"or",
"population"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L116-L139
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
trunk_vectors
|
def trunk_vectors(nrn, neurite_type=NeuriteType.all):
'''Calculates the vectors between all the trunks of the neuron
and the soma center.
'''
neurite_filter = is_type(neurite_type)
nrns = neuron_population(nrn)
return np.array([morphmath.vector(s.root_node.points[0], n.soma.center)
for n in nrns
for s in n.neurites if neurite_filter(s)])
|
python
|
def trunk_vectors(nrn, neurite_type=NeuriteType.all):
neurite_filter = is_type(neurite_type)
nrns = neuron_population(nrn)
return np.array([morphmath.vector(s.root_node.points[0], n.soma.center)
for n in nrns
for s in n.neurites if neurite_filter(s)])
|
[
"def",
"trunk_vectors",
"(",
"nrn",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"all",
")",
":",
"neurite_filter",
"=",
"is_type",
"(",
"neurite_type",
")",
"nrns",
"=",
"neuron_population",
"(",
"nrn",
")",
"return",
"np",
".",
"array",
"(",
"[",
"morphmath",
".",
"vector",
"(",
"s",
".",
"root_node",
".",
"points",
"[",
"0",
"]",
",",
"n",
".",
"soma",
".",
"center",
")",
"for",
"n",
"in",
"nrns",
"for",
"s",
"in",
"n",
".",
"neurites",
"if",
"neurite_filter",
"(",
"s",
")",
"]",
")"
] |
Calculates the vectors between all the trunks of the neuron
and the soma center.
|
[
"Calculates",
"the",
"vectors",
"between",
"all",
"the",
"trunks",
"of",
"the",
"neuron",
"and",
"the",
"soma",
"center",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L142-L151
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
trunk_angles
|
def trunk_angles(nrn, neurite_type=NeuriteType.all):
'''Calculates the angles between all the trunks of the neuron.
The angles are defined on the x-y plane and the trees
are sorted from the y axis and anticlock-wise.
'''
vectors = trunk_vectors(nrn, neurite_type=neurite_type)
# In order to avoid the failure of the process in case the neurite_type does not exist
if not vectors.size:
return []
def _sort_angle(p1, p2):
"""Angle between p1-p2 to sort vectors"""
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return (ang1 - ang2)
# Sorting angles according to x-y plane
order = np.argsort(np.array([_sort_angle(i / np.linalg.norm(i), [0, 1])
for i in vectors[:, 0:2]]))
ordered_vectors = vectors[order][:, [COLS.X, COLS.Y]]
return [morphmath.angle_between_vectors(ordered_vectors[i], ordered_vectors[i - 1])
for i, _ in enumerate(ordered_vectors)]
|
python
|
def trunk_angles(nrn, neurite_type=NeuriteType.all):
vectors = trunk_vectors(nrn, neurite_type=neurite_type)
if not vectors.size:
return []
def _sort_angle(p1, p2):
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return (ang1 - ang2)
order = np.argsort(np.array([_sort_angle(i / np.linalg.norm(i), [0, 1])
for i in vectors[:, 0:2]]))
ordered_vectors = vectors[order][:, [COLS.X, COLS.Y]]
return [morphmath.angle_between_vectors(ordered_vectors[i], ordered_vectors[i - 1])
for i, _ in enumerate(ordered_vectors)]
|
[
"def",
"trunk_angles",
"(",
"nrn",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"all",
")",
":",
"vectors",
"=",
"trunk_vectors",
"(",
"nrn",
",",
"neurite_type",
"=",
"neurite_type",
")",
"# In order to avoid the failure of the process in case the neurite_type does not exist",
"if",
"not",
"vectors",
".",
"size",
":",
"return",
"[",
"]",
"def",
"_sort_angle",
"(",
"p1",
",",
"p2",
")",
":",
"\"\"\"Angle between p1-p2 to sort vectors\"\"\"",
"ang1",
"=",
"np",
".",
"arctan2",
"(",
"*",
"p1",
"[",
":",
":",
"-",
"1",
"]",
")",
"ang2",
"=",
"np",
".",
"arctan2",
"(",
"*",
"p2",
"[",
":",
":",
"-",
"1",
"]",
")",
"return",
"(",
"ang1",
"-",
"ang2",
")",
"# Sorting angles according to x-y plane",
"order",
"=",
"np",
".",
"argsort",
"(",
"np",
".",
"array",
"(",
"[",
"_sort_angle",
"(",
"i",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"i",
")",
",",
"[",
"0",
",",
"1",
"]",
")",
"for",
"i",
"in",
"vectors",
"[",
":",
",",
"0",
":",
"2",
"]",
"]",
")",
")",
"ordered_vectors",
"=",
"vectors",
"[",
"order",
"]",
"[",
":",
",",
"[",
"COLS",
".",
"X",
",",
"COLS",
".",
"Y",
"]",
"]",
"return",
"[",
"morphmath",
".",
"angle_between_vectors",
"(",
"ordered_vectors",
"[",
"i",
"]",
",",
"ordered_vectors",
"[",
"i",
"-",
"1",
"]",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"ordered_vectors",
")",
"]"
] |
Calculates the angles between all the trunks of the neuron.
The angles are defined on the x-y plane and the trees
are sorted from the y axis and anticlock-wise.
|
[
"Calculates",
"the",
"angles",
"between",
"all",
"the",
"trunks",
"of",
"the",
"neuron",
".",
"The",
"angles",
"are",
"defined",
"on",
"the",
"x",
"-",
"y",
"plane",
"and",
"the",
"trees",
"are",
"sorted",
"from",
"the",
"y",
"axis",
"and",
"anticlock",
"-",
"wise",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L154-L177
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
sholl_crossings
|
def sholl_crossings(neurites, center, radii):
'''calculate crossings of neurites
Args:
nrn(morph): morphology on which to perform Sholl analysis
radii(iterable of floats): radii for which crossings will be counted
Returns:
Array of same length as radii, with a count of the number of crossings
for the respective radius
'''
def _count_crossings(neurite, radius):
'''count_crossings of segments in neurite with radius'''
r2 = radius ** 2
count = 0
for start, end in iter_segments(neurite):
start_dist2, end_dist2 = (morphmath.point_dist2(center, start),
morphmath.point_dist2(center, end))
count += int(start_dist2 <= r2 <= end_dist2 or
end_dist2 <= r2 <= start_dist2)
return count
return np.array([sum(_count_crossings(neurite, r)
for neurite in iter_neurites(neurites))
for r in radii])
|
python
|
def sholl_crossings(neurites, center, radii):
def _count_crossings(neurite, radius):
r2 = radius ** 2
count = 0
for start, end in iter_segments(neurite):
start_dist2, end_dist2 = (morphmath.point_dist2(center, start),
morphmath.point_dist2(center, end))
count += int(start_dist2 <= r2 <= end_dist2 or
end_dist2 <= r2 <= start_dist2)
return count
return np.array([sum(_count_crossings(neurite, r)
for neurite in iter_neurites(neurites))
for r in radii])
|
[
"def",
"sholl_crossings",
"(",
"neurites",
",",
"center",
",",
"radii",
")",
":",
"def",
"_count_crossings",
"(",
"neurite",
",",
"radius",
")",
":",
"'''count_crossings of segments in neurite with radius'''",
"r2",
"=",
"radius",
"**",
"2",
"count",
"=",
"0",
"for",
"start",
",",
"end",
"in",
"iter_segments",
"(",
"neurite",
")",
":",
"start_dist2",
",",
"end_dist2",
"=",
"(",
"morphmath",
".",
"point_dist2",
"(",
"center",
",",
"start",
")",
",",
"morphmath",
".",
"point_dist2",
"(",
"center",
",",
"end",
")",
")",
"count",
"+=",
"int",
"(",
"start_dist2",
"<=",
"r2",
"<=",
"end_dist2",
"or",
"end_dist2",
"<=",
"r2",
"<=",
"start_dist2",
")",
"return",
"count",
"return",
"np",
".",
"array",
"(",
"[",
"sum",
"(",
"_count_crossings",
"(",
"neurite",
",",
"r",
")",
"for",
"neurite",
"in",
"iter_neurites",
"(",
"neurites",
")",
")",
"for",
"r",
"in",
"radii",
"]",
")"
] |
calculate crossings of neurites
Args:
nrn(morph): morphology on which to perform Sholl analysis
radii(iterable of floats): radii for which crossings will be counted
Returns:
Array of same length as radii, with a count of the number of crossings
for the respective radius
|
[
"calculate",
"crossings",
"of",
"neurites"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L180-L206
|
BlueBrain/NeuroM
|
neurom/fst/_neuronfunc.py
|
sholl_frequency
|
def sholl_frequency(nrn, neurite_type=NeuriteType.all, step_size=10):
'''perform Sholl frequency calculations on a population of neurites
Args:
nrn(morph): nrn or population
neurite_type(NeuriteType): which neurites to operate on
step_size(float): step size between Sholl radii
Note:
Given a neuron, the soma center is used for the concentric circles,
which range from the soma radii, and the maximum radial distance
in steps of `step_size`. When a population is given, the concentric
circles range from the smallest soma radius to the largest radial neurite
distance. Finally, each segment of the neuron is tested, so a neurite that
bends back on itself, and crosses the same Sholl radius will get counted as
having crossed multiple times.
'''
nrns = neuron_population(nrn)
neurite_filter = is_type(neurite_type)
min_soma_edge = float('Inf')
max_radii = 0
neurites_list = []
for neuron in nrns:
neurites_list.extend(((neurites, neuron.soma.center)
for neurites in neuron.neurites
if neurite_filter(neurites)))
min_soma_edge = min(min_soma_edge, neuron.soma.radius)
max_radii = max(max_radii, np.max(np.abs(bounding_box(neuron))))
radii = np.arange(min_soma_edge, max_radii + step_size, step_size)
ret = np.zeros_like(radii)
for neurites, center in neurites_list:
ret += sholl_crossings(neurites, center, radii)
return ret
|
python
|
def sholl_frequency(nrn, neurite_type=NeuriteType.all, step_size=10):
nrns = neuron_population(nrn)
neurite_filter = is_type(neurite_type)
min_soma_edge = float('Inf')
max_radii = 0
neurites_list = []
for neuron in nrns:
neurites_list.extend(((neurites, neuron.soma.center)
for neurites in neuron.neurites
if neurite_filter(neurites)))
min_soma_edge = min(min_soma_edge, neuron.soma.radius)
max_radii = max(max_radii, np.max(np.abs(bounding_box(neuron))))
radii = np.arange(min_soma_edge, max_radii + step_size, step_size)
ret = np.zeros_like(radii)
for neurites, center in neurites_list:
ret += sholl_crossings(neurites, center, radii)
return ret
|
[
"def",
"sholl_frequency",
"(",
"nrn",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"all",
",",
"step_size",
"=",
"10",
")",
":",
"nrns",
"=",
"neuron_population",
"(",
"nrn",
")",
"neurite_filter",
"=",
"is_type",
"(",
"neurite_type",
")",
"min_soma_edge",
"=",
"float",
"(",
"'Inf'",
")",
"max_radii",
"=",
"0",
"neurites_list",
"=",
"[",
"]",
"for",
"neuron",
"in",
"nrns",
":",
"neurites_list",
".",
"extend",
"(",
"(",
"(",
"neurites",
",",
"neuron",
".",
"soma",
".",
"center",
")",
"for",
"neurites",
"in",
"neuron",
".",
"neurites",
"if",
"neurite_filter",
"(",
"neurites",
")",
")",
")",
"min_soma_edge",
"=",
"min",
"(",
"min_soma_edge",
",",
"neuron",
".",
"soma",
".",
"radius",
")",
"max_radii",
"=",
"max",
"(",
"max_radii",
",",
"np",
".",
"max",
"(",
"np",
".",
"abs",
"(",
"bounding_box",
"(",
"neuron",
")",
")",
")",
")",
"radii",
"=",
"np",
".",
"arange",
"(",
"min_soma_edge",
",",
"max_radii",
"+",
"step_size",
",",
"step_size",
")",
"ret",
"=",
"np",
".",
"zeros_like",
"(",
"radii",
")",
"for",
"neurites",
",",
"center",
"in",
"neurites_list",
":",
"ret",
"+=",
"sholl_crossings",
"(",
"neurites",
",",
"center",
",",
"radii",
")",
"return",
"ret"
] |
perform Sholl frequency calculations on a population of neurites
Args:
nrn(morph): nrn or population
neurite_type(NeuriteType): which neurites to operate on
step_size(float): step size between Sholl radii
Note:
Given a neuron, the soma center is used for the concentric circles,
which range from the soma radii, and the maximum radial distance
in steps of `step_size`. When a population is given, the concentric
circles range from the smallest soma radius to the largest radial neurite
distance. Finally, each segment of the neuron is tested, so a neurite that
bends back on itself, and crosses the same Sholl radius will get counted as
having crossed multiple times.
|
[
"perform",
"Sholl",
"frequency",
"calculations",
"on",
"a",
"population",
"of",
"neurites"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L209-L245
|
BlueBrain/NeuroM
|
examples/plot_features.py
|
dist_points
|
def dist_points(bin_edges, d):
"""Return an array of values according to a distribution
Points are calculated at the center of each bin
"""
bc = bin_centers(bin_edges)
if d is not None:
d = DISTS[d['type']](d, bc)
return d, bc
|
python
|
def dist_points(bin_edges, d):
bc = bin_centers(bin_edges)
if d is not None:
d = DISTS[d['type']](d, bc)
return d, bc
|
[
"def",
"dist_points",
"(",
"bin_edges",
",",
"d",
")",
":",
"bc",
"=",
"bin_centers",
"(",
"bin_edges",
")",
"if",
"d",
"is",
"not",
"None",
":",
"d",
"=",
"DISTS",
"[",
"d",
"[",
"'type'",
"]",
"]",
"(",
"d",
",",
"bc",
")",
"return",
"d",
",",
"bc"
] |
Return an array of values according to a distribution
Points are calculated at the center of each bin
|
[
"Return",
"an",
"array",
"of",
"values",
"according",
"to",
"a",
"distribution"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_features.py#L70-L78
|
BlueBrain/NeuroM
|
examples/plot_features.py
|
calc_limits
|
def calc_limits(data, dist=None, padding=0.25):
"""Calculate a suitable range for a histogram
Returns:
tuple of (min, max)
"""
dmin = sys.float_info.max if dist is None else dist.get('min',
sys.float_info.max)
dmax = sys.float_info.min if dist is None else dist.get('max',
sys.float_info.min)
_min = min(min(data), dmin)
_max = max(max(data), dmax)
padding = padding * (_max - _min)
return _min - padding, _max + padding
|
python
|
def calc_limits(data, dist=None, padding=0.25):
dmin = sys.float_info.max if dist is None else dist.get('min',
sys.float_info.max)
dmax = sys.float_info.min if dist is None else dist.get('max',
sys.float_info.min)
_min = min(min(data), dmin)
_max = max(max(data), dmax)
padding = padding * (_max - _min)
return _min - padding, _max + padding
|
[
"def",
"calc_limits",
"(",
"data",
",",
"dist",
"=",
"None",
",",
"padding",
"=",
"0.25",
")",
":",
"dmin",
"=",
"sys",
".",
"float_info",
".",
"max",
"if",
"dist",
"is",
"None",
"else",
"dist",
".",
"get",
"(",
"'min'",
",",
"sys",
".",
"float_info",
".",
"max",
")",
"dmax",
"=",
"sys",
".",
"float_info",
".",
"min",
"if",
"dist",
"is",
"None",
"else",
"dist",
".",
"get",
"(",
"'max'",
",",
"sys",
".",
"float_info",
".",
"min",
")",
"_min",
"=",
"min",
"(",
"min",
"(",
"data",
")",
",",
"dmin",
")",
"_max",
"=",
"max",
"(",
"max",
"(",
"data",
")",
",",
"dmax",
")",
"padding",
"=",
"padding",
"*",
"(",
"_max",
"-",
"_min",
")",
"return",
"_min",
"-",
"padding",
",",
"_max",
"+",
"padding"
] |
Calculate a suitable range for a histogram
Returns:
tuple of (min, max)
|
[
"Calculate",
"a",
"suitable",
"range",
"for",
"a",
"histogram"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_features.py#L81-L95
|
BlueBrain/NeuroM
|
examples/plot_features.py
|
load_neurite_features
|
def load_neurite_features(filepath):
'''Unpack relevant data into megadict'''
stuff = defaultdict(lambda: defaultdict(list))
nrns = nm.load_neurons(filepath)
# unpack data into arrays
for nrn in nrns:
for t in NEURITES_:
for feat in FEATURES:
stuff[feat][str(t).split('.')[1]].extend(
nm.get(feat, nrn, neurite_type=t)
)
return stuff
|
python
|
def load_neurite_features(filepath):
stuff = defaultdict(lambda: defaultdict(list))
nrns = nm.load_neurons(filepath)
for nrn in nrns:
for t in NEURITES_:
for feat in FEATURES:
stuff[feat][str(t).split('.')[1]].extend(
nm.get(feat, nrn, neurite_type=t)
)
return stuff
|
[
"def",
"load_neurite_features",
"(",
"filepath",
")",
":",
"stuff",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"list",
")",
")",
"nrns",
"=",
"nm",
".",
"load_neurons",
"(",
"filepath",
")",
"# unpack data into arrays",
"for",
"nrn",
"in",
"nrns",
":",
"for",
"t",
"in",
"NEURITES_",
":",
"for",
"feat",
"in",
"FEATURES",
":",
"stuff",
"[",
"feat",
"]",
"[",
"str",
"(",
"t",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"]",
".",
"extend",
"(",
"nm",
".",
"get",
"(",
"feat",
",",
"nrn",
",",
"neurite_type",
"=",
"t",
")",
")",
"return",
"stuff"
] |
Unpack relevant data into megadict
|
[
"Unpack",
"relevant",
"data",
"into",
"megadict"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_features.py#L112-L123
|
BlueBrain/NeuroM
|
examples/plot_features.py
|
parse_args
|
def parse_args():
'''Parse command line arguments'''
parser = argparse.ArgumentParser(
description='Morphology feature plotter',
epilog='Note: Makes plots of various features and superimposes\
input distributions. Plots are saved to PDF file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('datapath',
help='Morphology data directory path')
parser.add_argument('--mtypeconfig',
required=True,
help='Get mtype JSON configuration file')
parser.add_argument('--output',
default='plots.pdf',
help='Output PDF file name')
return parser.parse_args()
|
python
|
def parse_args():
parser = argparse.ArgumentParser(
description='Morphology feature plotter',
epilog='Note: Makes plots of various features and superimposes\
input distributions. Plots are saved to PDF file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('datapath',
help='Morphology data directory path')
parser.add_argument('--mtypeconfig',
required=True,
help='Get mtype JSON configuration file')
parser.add_argument('--output',
default='plots.pdf',
help='Output PDF file name')
return parser.parse_args()
|
[
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Morphology feature plotter'",
",",
"epilog",
"=",
"'Note: Makes plots of various features and superimposes\\\n input distributions. Plots are saved to PDF file.'",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"'datapath'",
",",
"help",
"=",
"'Morphology data directory path'",
")",
"parser",
".",
"add_argument",
"(",
"'--mtypeconfig'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Get mtype JSON configuration file'",
")",
"parser",
".",
"add_argument",
"(",
"'--output'",
",",
"default",
"=",
"'plots.pdf'",
",",
"help",
"=",
"'Output PDF file name'",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
] |
Parse command line arguments
|
[
"Parse",
"command",
"line",
"arguments"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_features.py#L129-L147
|
BlueBrain/NeuroM
|
examples/plot_features.py
|
main
|
def main(data_dir, mtype_file): # pylint: disable=too-many-locals
'''Run the stuff'''
# data structure to store results
stuff = load_neurite_features(data_dir)
sim_params = json.load(open(mtype_file))
# load histograms, distribution parameter sets and figures into arrays.
# To plot figures, do
# plots[i].fig.show()
# To modify an axis, do
# plots[i].ax.something()
_plots = []
for feat, d in stuff.items():
for typ, data in d.items():
dist = sim_params['components'][typ].get(feat, None)
print('Type = %s, Feature = %s, Distribution = %s' % (typ, feat, dist))
# if no data available, skip this feature
if not data:
print("No data found for feature %s (%s)" % (feat, typ))
continue
# print 'DATA', data
num_bins = 100
limits = calc_limits(data, dist)
bin_edges = np.linspace(limits[0], limits[1], num_bins + 1)
histo = np.histogram(data, bin_edges, normed=True)
print('PLOT LIMITS:', limits)
# print 'DATA:', data
# print 'BIN HEIGHT', histo[0]
plot = Plot(*view_utils.get_figure(new_fig=True, subplot=111))
plot.ax.set_xlim(*limits)
plot.ax.bar(histo[1][:-1], histo[0], width=bin_widths(histo[1]))
dp, bc = dist_points(histo[1], dist)
# print 'BIN CENTERS:', bc, len(bc)
if dp is not None:
# print 'DIST POINTS:', dp, len(dp)
plot.ax.plot(bc, dp, 'r*')
plot.ax.set_title('%s (%s)' % (feat, typ))
_plots.append(plot)
return _plots
|
python
|
def main(data_dir, mtype_file):
stuff = load_neurite_features(data_dir)
sim_params = json.load(open(mtype_file))
_plots = []
for feat, d in stuff.items():
for typ, data in d.items():
dist = sim_params['components'][typ].get(feat, None)
print('Type = %s, Feature = %s, Distribution = %s' % (typ, feat, dist))
if not data:
print("No data found for feature %s (%s)" % (feat, typ))
continue
num_bins = 100
limits = calc_limits(data, dist)
bin_edges = np.linspace(limits[0], limits[1], num_bins + 1)
histo = np.histogram(data, bin_edges, normed=True)
print('PLOT LIMITS:', limits)
plot = Plot(*view_utils.get_figure(new_fig=True, subplot=111))
plot.ax.set_xlim(*limits)
plot.ax.bar(histo[1][:-1], histo[0], width=bin_widths(histo[1]))
dp, bc = dist_points(histo[1], dist)
if dp is not None:
plot.ax.plot(bc, dp, 'r*')
plot.ax.set_title('%s (%s)' % (feat, typ))
_plots.append(plot)
return _plots
|
[
"def",
"main",
"(",
"data_dir",
",",
"mtype_file",
")",
":",
"# pylint: disable=too-many-locals",
"# data structure to store results",
"stuff",
"=",
"load_neurite_features",
"(",
"data_dir",
")",
"sim_params",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"mtype_file",
")",
")",
"# load histograms, distribution parameter sets and figures into arrays.",
"# To plot figures, do",
"# plots[i].fig.show()",
"# To modify an axis, do",
"# plots[i].ax.something()",
"_plots",
"=",
"[",
"]",
"for",
"feat",
",",
"d",
"in",
"stuff",
".",
"items",
"(",
")",
":",
"for",
"typ",
",",
"data",
"in",
"d",
".",
"items",
"(",
")",
":",
"dist",
"=",
"sim_params",
"[",
"'components'",
"]",
"[",
"typ",
"]",
".",
"get",
"(",
"feat",
",",
"None",
")",
"print",
"(",
"'Type = %s, Feature = %s, Distribution = %s'",
"%",
"(",
"typ",
",",
"feat",
",",
"dist",
")",
")",
"# if no data available, skip this feature",
"if",
"not",
"data",
":",
"print",
"(",
"\"No data found for feature %s (%s)\"",
"%",
"(",
"feat",
",",
"typ",
")",
")",
"continue",
"# print 'DATA', data",
"num_bins",
"=",
"100",
"limits",
"=",
"calc_limits",
"(",
"data",
",",
"dist",
")",
"bin_edges",
"=",
"np",
".",
"linspace",
"(",
"limits",
"[",
"0",
"]",
",",
"limits",
"[",
"1",
"]",
",",
"num_bins",
"+",
"1",
")",
"histo",
"=",
"np",
".",
"histogram",
"(",
"data",
",",
"bin_edges",
",",
"normed",
"=",
"True",
")",
"print",
"(",
"'PLOT LIMITS:'",
",",
"limits",
")",
"# print 'DATA:', data",
"# print 'BIN HEIGHT', histo[0]",
"plot",
"=",
"Plot",
"(",
"*",
"view_utils",
".",
"get_figure",
"(",
"new_fig",
"=",
"True",
",",
"subplot",
"=",
"111",
")",
")",
"plot",
".",
"ax",
".",
"set_xlim",
"(",
"*",
"limits",
")",
"plot",
".",
"ax",
".",
"bar",
"(",
"histo",
"[",
"1",
"]",
"[",
":",
"-",
"1",
"]",
",",
"histo",
"[",
"0",
"]",
",",
"width",
"=",
"bin_widths",
"(",
"histo",
"[",
"1",
"]",
")",
")",
"dp",
",",
"bc",
"=",
"dist_points",
"(",
"histo",
"[",
"1",
"]",
",",
"dist",
")",
"# print 'BIN CENTERS:', bc, len(bc)",
"if",
"dp",
"is",
"not",
"None",
":",
"# print 'DIST POINTS:', dp, len(dp)",
"plot",
".",
"ax",
".",
"plot",
"(",
"bc",
",",
"dp",
",",
"'r*'",
")",
"plot",
".",
"ax",
".",
"set_title",
"(",
"'%s (%s)'",
"%",
"(",
"feat",
",",
"typ",
")",
")",
"_plots",
".",
"append",
"(",
"plot",
")",
"return",
"_plots"
] |
Run the stuff
|
[
"Run",
"the",
"stuff"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_features.py#L150-L191
|
BlueBrain/NeuroM
|
examples/density_plot.py
|
extract_density
|
def extract_density(population, plane='xy', bins=100, neurite_type=NeuriteType.basal_dendrite):
'''Extracts the 2d histogram of the center
coordinates of segments in the selected plane.
'''
segment_midpoints = get_feat('segment_midpoints', population, neurite_type=neurite_type)
horiz = segment_midpoints[:, 'xyz'.index(plane[0])]
vert = segment_midpoints[:, 'xyz'.index(plane[1])]
return np.histogram2d(np.array(horiz), np.array(vert), bins=(bins, bins))
|
python
|
def extract_density(population, plane='xy', bins=100, neurite_type=NeuriteType.basal_dendrite):
segment_midpoints = get_feat('segment_midpoints', population, neurite_type=neurite_type)
horiz = segment_midpoints[:, 'xyz'.index(plane[0])]
vert = segment_midpoints[:, 'xyz'.index(plane[1])]
return np.histogram2d(np.array(horiz), np.array(vert), bins=(bins, bins))
|
[
"def",
"extract_density",
"(",
"population",
",",
"plane",
"=",
"'xy'",
",",
"bins",
"=",
"100",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"basal_dendrite",
")",
":",
"segment_midpoints",
"=",
"get_feat",
"(",
"'segment_midpoints'",
",",
"population",
",",
"neurite_type",
"=",
"neurite_type",
")",
"horiz",
"=",
"segment_midpoints",
"[",
":",
",",
"'xyz'",
".",
"index",
"(",
"plane",
"[",
"0",
"]",
")",
"]",
"vert",
"=",
"segment_midpoints",
"[",
":",
",",
"'xyz'",
".",
"index",
"(",
"plane",
"[",
"1",
"]",
")",
"]",
"return",
"np",
".",
"histogram2d",
"(",
"np",
".",
"array",
"(",
"horiz",
")",
",",
"np",
".",
"array",
"(",
"vert",
")",
",",
"bins",
"=",
"(",
"bins",
",",
"bins",
")",
")"
] |
Extracts the 2d histogram of the center
coordinates of segments in the selected plane.
|
[
"Extracts",
"the",
"2d",
"histogram",
"of",
"the",
"center",
"coordinates",
"of",
"segments",
"in",
"the",
"selected",
"plane",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/density_plot.py#L39-L46
|
BlueBrain/NeuroM
|
examples/density_plot.py
|
plot_density
|
def plot_density(population, # pylint: disable=too-many-arguments, too-many-locals
bins=100, new_fig=True, subplot=111, levels=None, plane='xy',
colorlabel='Nodes per unit area', labelfontsize=16,
color_map='Reds', no_colorbar=False, threshold=0.01,
neurite_type=NeuriteType.basal_dendrite, **kwargs):
'''Plots the 2d histogram of the center
coordinates of segments in the selected plane.
'''
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
H1, xedges1, yedges1 = extract_density(population, plane=plane, bins=bins,
neurite_type=neurite_type)
mask = H1 < threshold # mask = H1==0
H2 = np.ma.masked_array(H1, mask)
getattr(plt.cm, color_map).set_bad(color='white', alpha=None)
plots = ax.contourf((xedges1[:-1] + xedges1[1:]) / 2,
(yedges1[:-1] + yedges1[1:]) / 2,
np.transpose(H2), # / np.max(H2),
cmap=getattr(plt.cm, color_map), levels=levels)
if not no_colorbar:
cbar = plt.colorbar(plots)
cbar.ax.set_ylabel(colorlabel, fontsize=labelfontsize)
kwargs['title'] = kwargs.get('title', '')
kwargs['xlabel'] = kwargs.get('xlabel', plane[0])
kwargs['ylabel'] = kwargs.get('ylabel', plane[1])
return common.plot_style(fig=fig, ax=ax, **kwargs)
|
python
|
def plot_density(population,
bins=100, new_fig=True, subplot=111, levels=None, plane='xy',
colorlabel='Nodes per unit area', labelfontsize=16,
color_map='Reds', no_colorbar=False, threshold=0.01,
neurite_type=NeuriteType.basal_dendrite, **kwargs):
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
H1, xedges1, yedges1 = extract_density(population, plane=plane, bins=bins,
neurite_type=neurite_type)
mask = H1 < threshold
H2 = np.ma.masked_array(H1, mask)
getattr(plt.cm, color_map).set_bad(color='white', alpha=None)
plots = ax.contourf((xedges1[:-1] + xedges1[1:]) / 2,
(yedges1[:-1] + yedges1[1:]) / 2,
np.transpose(H2),
cmap=getattr(plt.cm, color_map), levels=levels)
if not no_colorbar:
cbar = plt.colorbar(plots)
cbar.ax.set_ylabel(colorlabel, fontsize=labelfontsize)
kwargs['title'] = kwargs.get('title', '')
kwargs['xlabel'] = kwargs.get('xlabel', plane[0])
kwargs['ylabel'] = kwargs.get('ylabel', plane[1])
return common.plot_style(fig=fig, ax=ax, **kwargs)
|
[
"def",
"plot_density",
"(",
"population",
",",
"# pylint: disable=too-many-arguments, too-many-locals",
"bins",
"=",
"100",
",",
"new_fig",
"=",
"True",
",",
"subplot",
"=",
"111",
",",
"levels",
"=",
"None",
",",
"plane",
"=",
"'xy'",
",",
"colorlabel",
"=",
"'Nodes per unit area'",
",",
"labelfontsize",
"=",
"16",
",",
"color_map",
"=",
"'Reds'",
",",
"no_colorbar",
"=",
"False",
",",
"threshold",
"=",
"0.01",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"basal_dendrite",
",",
"*",
"*",
"kwargs",
")",
":",
"fig",
",",
"ax",
"=",
"common",
".",
"get_figure",
"(",
"new_fig",
"=",
"new_fig",
",",
"subplot",
"=",
"subplot",
")",
"H1",
",",
"xedges1",
",",
"yedges1",
"=",
"extract_density",
"(",
"population",
",",
"plane",
"=",
"plane",
",",
"bins",
"=",
"bins",
",",
"neurite_type",
"=",
"neurite_type",
")",
"mask",
"=",
"H1",
"<",
"threshold",
"# mask = H1==0",
"H2",
"=",
"np",
".",
"ma",
".",
"masked_array",
"(",
"H1",
",",
"mask",
")",
"getattr",
"(",
"plt",
".",
"cm",
",",
"color_map",
")",
".",
"set_bad",
"(",
"color",
"=",
"'white'",
",",
"alpha",
"=",
"None",
")",
"plots",
"=",
"ax",
".",
"contourf",
"(",
"(",
"xedges1",
"[",
":",
"-",
"1",
"]",
"+",
"xedges1",
"[",
"1",
":",
"]",
")",
"/",
"2",
",",
"(",
"yedges1",
"[",
":",
"-",
"1",
"]",
"+",
"yedges1",
"[",
"1",
":",
"]",
")",
"/",
"2",
",",
"np",
".",
"transpose",
"(",
"H2",
")",
",",
"# / np.max(H2),",
"cmap",
"=",
"getattr",
"(",
"plt",
".",
"cm",
",",
"color_map",
")",
",",
"levels",
"=",
"levels",
")",
"if",
"not",
"no_colorbar",
":",
"cbar",
"=",
"plt",
".",
"colorbar",
"(",
"plots",
")",
"cbar",
".",
"ax",
".",
"set_ylabel",
"(",
"colorlabel",
",",
"fontsize",
"=",
"labelfontsize",
")",
"kwargs",
"[",
"'title'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'title'",
",",
"''",
")",
"kwargs",
"[",
"'xlabel'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'xlabel'",
",",
"plane",
"[",
"0",
"]",
")",
"kwargs",
"[",
"'ylabel'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'ylabel'",
",",
"plane",
"[",
"1",
"]",
")",
"return",
"common",
".",
"plot_style",
"(",
"fig",
"=",
"fig",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")"
] |
Plots the 2d histogram of the center
coordinates of segments in the selected plane.
|
[
"Plots",
"the",
"2d",
"histogram",
"of",
"the",
"center",
"coordinates",
"of",
"segments",
"in",
"the",
"selected",
"plane",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/density_plot.py#L49-L80
|
BlueBrain/NeuroM
|
examples/density_plot.py
|
plot_neuron_on_density
|
def plot_neuron_on_density(population, # pylint: disable=too-many-arguments
bins=100, new_fig=True, subplot=111, levels=None, plane='xy',
colorlabel='Nodes per unit area', labelfontsize=16,
color_map='Reds', no_colorbar=False, threshold=0.01,
neurite_type=NeuriteType.basal_dendrite, **kwargs):
'''Plots the 2d histogram of the center
coordinates of segments in the selected plane
and superimposes the view of the first neurite of the collection.
'''
_, ax = common.get_figure(new_fig=new_fig)
view.plot_tree(ax, population.neurites[0])
return plot_density(population, plane=plane, bins=bins, new_fig=False, subplot=subplot,
colorlabel=colorlabel, labelfontsize=labelfontsize, levels=levels,
color_map=color_map, no_colorbar=no_colorbar, threshold=threshold,
neurite_type=neurite_type, **kwargs)
|
python
|
def plot_neuron_on_density(population,
bins=100, new_fig=True, subplot=111, levels=None, plane='xy',
colorlabel='Nodes per unit area', labelfontsize=16,
color_map='Reds', no_colorbar=False, threshold=0.01,
neurite_type=NeuriteType.basal_dendrite, **kwargs):
_, ax = common.get_figure(new_fig=new_fig)
view.plot_tree(ax, population.neurites[0])
return plot_density(population, plane=plane, bins=bins, new_fig=False, subplot=subplot,
colorlabel=colorlabel, labelfontsize=labelfontsize, levels=levels,
color_map=color_map, no_colorbar=no_colorbar, threshold=threshold,
neurite_type=neurite_type, **kwargs)
|
[
"def",
"plot_neuron_on_density",
"(",
"population",
",",
"# pylint: disable=too-many-arguments",
"bins",
"=",
"100",
",",
"new_fig",
"=",
"True",
",",
"subplot",
"=",
"111",
",",
"levels",
"=",
"None",
",",
"plane",
"=",
"'xy'",
",",
"colorlabel",
"=",
"'Nodes per unit area'",
",",
"labelfontsize",
"=",
"16",
",",
"color_map",
"=",
"'Reds'",
",",
"no_colorbar",
"=",
"False",
",",
"threshold",
"=",
"0.01",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"basal_dendrite",
",",
"*",
"*",
"kwargs",
")",
":",
"_",
",",
"ax",
"=",
"common",
".",
"get_figure",
"(",
"new_fig",
"=",
"new_fig",
")",
"view",
".",
"plot_tree",
"(",
"ax",
",",
"population",
".",
"neurites",
"[",
"0",
"]",
")",
"return",
"plot_density",
"(",
"population",
",",
"plane",
"=",
"plane",
",",
"bins",
"=",
"bins",
",",
"new_fig",
"=",
"False",
",",
"subplot",
"=",
"subplot",
",",
"colorlabel",
"=",
"colorlabel",
",",
"labelfontsize",
"=",
"labelfontsize",
",",
"levels",
"=",
"levels",
",",
"color_map",
"=",
"color_map",
",",
"no_colorbar",
"=",
"no_colorbar",
",",
"threshold",
"=",
"threshold",
",",
"neurite_type",
"=",
"neurite_type",
",",
"*",
"*",
"kwargs",
")"
] |
Plots the 2d histogram of the center
coordinates of segments in the selected plane
and superimposes the view of the first neurite of the collection.
|
[
"Plots",
"the",
"2d",
"histogram",
"of",
"the",
"center",
"coordinates",
"of",
"segments",
"in",
"the",
"selected",
"plane",
"and",
"superimposes",
"the",
"view",
"of",
"the",
"first",
"neurite",
"of",
"the",
"collection",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/density_plot.py#L83-L99
|
BlueBrain/NeuroM
|
neurom/geom/__init__.py
|
bounding_box
|
def bounding_box(obj):
'''Get the (x, y, z) bounding box of an object containing points
Returns:
2D numpy array of [[min_x, min_y, min_z], [max_x, max_y, max_z]]
'''
return np.array([np.min(obj.points[:, 0:3], axis=0),
np.max(obj.points[:, 0:3], axis=0)])
|
python
|
def bounding_box(obj):
return np.array([np.min(obj.points[:, 0:3], axis=0),
np.max(obj.points[:, 0:3], axis=0)])
|
[
"def",
"bounding_box",
"(",
"obj",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"np",
".",
"min",
"(",
"obj",
".",
"points",
"[",
":",
",",
"0",
":",
"3",
"]",
",",
"axis",
"=",
"0",
")",
",",
"np",
".",
"max",
"(",
"obj",
".",
"points",
"[",
":",
",",
"0",
":",
"3",
"]",
",",
"axis",
"=",
"0",
")",
"]",
")"
] |
Get the (x, y, z) bounding box of an object containing points
Returns:
2D numpy array of [[min_x, min_y, min_z], [max_x, max_y, max_z]]
|
[
"Get",
"the",
"(",
"x",
"y",
"z",
")",
"bounding",
"box",
"of",
"an",
"object",
"containing",
"points"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/geom/__init__.py#L36-L43
|
BlueBrain/NeuroM
|
neurom/check/morphtree.py
|
is_monotonic
|
def is_monotonic(neurite, tol):
'''Check if neurite tree is monotonic
If each child has smaller or equal diameters from its parent
Args:
neurite(Neurite): neurite to operate on
tol(float): tolerance
Returns:
True if neurite monotonic
'''
for node in neurite.iter_sections():
# check that points in section satisfy monotonicity
sec = node.points
for point_id in range(len(sec) - 1):
if sec[point_id + 1][COLS.R] > sec[point_id][COLS.R] + tol:
return False
# Check that section boundary points satisfy monotonicity
if(node.parent is not None and
sec[0][COLS.R] > node.parent.points[-1][COLS.R] + tol):
return False
return True
|
python
|
def is_monotonic(neurite, tol):
for node in neurite.iter_sections():
sec = node.points
for point_id in range(len(sec) - 1):
if sec[point_id + 1][COLS.R] > sec[point_id][COLS.R] + tol:
return False
if(node.parent is not None and
sec[0][COLS.R] > node.parent.points[-1][COLS.R] + tol):
return False
return True
|
[
"def",
"is_monotonic",
"(",
"neurite",
",",
"tol",
")",
":",
"for",
"node",
"in",
"neurite",
".",
"iter_sections",
"(",
")",
":",
"# check that points in section satisfy monotonicity",
"sec",
"=",
"node",
".",
"points",
"for",
"point_id",
"in",
"range",
"(",
"len",
"(",
"sec",
")",
"-",
"1",
")",
":",
"if",
"sec",
"[",
"point_id",
"+",
"1",
"]",
"[",
"COLS",
".",
"R",
"]",
">",
"sec",
"[",
"point_id",
"]",
"[",
"COLS",
".",
"R",
"]",
"+",
"tol",
":",
"return",
"False",
"# Check that section boundary points satisfy monotonicity",
"if",
"(",
"node",
".",
"parent",
"is",
"not",
"None",
"and",
"sec",
"[",
"0",
"]",
"[",
"COLS",
".",
"R",
"]",
">",
"node",
".",
"parent",
".",
"points",
"[",
"-",
"1",
"]",
"[",
"COLS",
".",
"R",
"]",
"+",
"tol",
")",
":",
"return",
"False",
"return",
"True"
] |
Check if neurite tree is monotonic
If each child has smaller or equal diameters from its parent
Args:
neurite(Neurite): neurite to operate on
tol(float): tolerance
Returns:
True if neurite monotonic
|
[
"Check",
"if",
"neurite",
"tree",
"is",
"monotonic"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L40-L64
|
BlueBrain/NeuroM
|
neurom/check/morphtree.py
|
is_flat
|
def is_flat(neurite, tol, method='tolerance'):
'''Check if neurite is flat using the given method
Args:
neurite(Neurite): neurite to operate on
tol(float): tolerance
method(string): the method of flatness estimation:
'tolerance' returns true if any extent of the tree is smaller
than the given tolerance
'ratio' returns true if the ratio of the smallest directions
is smaller than tol. e.g. [1,2,3] -> 1/2 < tol
Returns:
True if neurite is flat
'''
ext = principal_direction_extent(neurite.points[:, COLS.XYZ])
assert method in ('tolerance', 'ratio'), "Method must be one of 'tolerance', 'ratio'"
if method == 'ratio':
sorted_ext = np.sort(ext)
return sorted_ext[0] / sorted_ext[1] < float(tol)
return any(ext < float(tol))
|
python
|
def is_flat(neurite, tol, method='tolerance'):
ext = principal_direction_extent(neurite.points[:, COLS.XYZ])
assert method in ('tolerance', 'ratio'), "Method must be one of 'tolerance', 'ratio'"
if method == 'ratio':
sorted_ext = np.sort(ext)
return sorted_ext[0] / sorted_ext[1] < float(tol)
return any(ext < float(tol))
|
[
"def",
"is_flat",
"(",
"neurite",
",",
"tol",
",",
"method",
"=",
"'tolerance'",
")",
":",
"ext",
"=",
"principal_direction_extent",
"(",
"neurite",
".",
"points",
"[",
":",
",",
"COLS",
".",
"XYZ",
"]",
")",
"assert",
"method",
"in",
"(",
"'tolerance'",
",",
"'ratio'",
")",
",",
"\"Method must be one of 'tolerance', 'ratio'\"",
"if",
"method",
"==",
"'ratio'",
":",
"sorted_ext",
"=",
"np",
".",
"sort",
"(",
"ext",
")",
"return",
"sorted_ext",
"[",
"0",
"]",
"/",
"sorted_ext",
"[",
"1",
"]",
"<",
"float",
"(",
"tol",
")",
"return",
"any",
"(",
"ext",
"<",
"float",
"(",
"tol",
")",
")"
] |
Check if neurite is flat using the given method
Args:
neurite(Neurite): neurite to operate on
tol(float): tolerance
method(string): the method of flatness estimation:
'tolerance' returns true if any extent of the tree is smaller
than the given tolerance
'ratio' returns true if the ratio of the smallest directions
is smaller than tol. e.g. [1,2,3] -> 1/2 < tol
Returns:
True if neurite is flat
|
[
"Check",
"if",
"neurite",
"is",
"flat",
"using",
"the",
"given",
"method"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L67-L88
|
BlueBrain/NeuroM
|
neurom/check/morphtree.py
|
is_back_tracking
|
def is_back_tracking(neurite):
''' Check if a neurite process backtracks to a previous node. Back-tracking takes place
when a daughter of a branching process goes back and either overlaps with a previous point, or
lies inside the cylindrical volume of the latter.
Args:
neurite(Neurite): neurite to operate on
Returns:
True Under the following scenaria:
1. A segment endpoint falls back and overlaps with a previous segment's point
2. The geometry of a segment overlaps with a previous one in the section
'''
def pair(segs):
''' Pairs the input list into triplets'''
return zip(segs, segs[1:])
def coords(node):
''' Returns the first three values of the tree that correspond to the x, y, z coordinates'''
return node[COLS.XYZ]
def max_radius(seg):
''' Returns maximum radius from the two segment endpoints'''
return max(seg[0][COLS.R], seg[1][COLS.R])
def is_not_zero_seg(seg):
''' Returns True if segment has zero length'''
return not np.allclose(coords(seg[0]), coords(seg[1]))
def is_in_the_same_verse(seg1, seg2):
''' Checks if the vectors face the same direction. This
is true if their dot product is greater than zero.
'''
v1 = coords(seg2[1]) - coords(seg2[0])
v2 = coords(seg1[1]) - coords(seg1[0])
return np.dot(v1, v2) >= 0
def is_seg2_within_seg1_radius(dist, seg1, seg2):
''' Checks whether the orthogonal distance from the point at the end of
seg1 to seg2 segment body is smaller than the sum of their radii
'''
return dist <= max_radius(seg1) + max_radius(seg2)
def is_seg1_overlapping_with_seg2(seg1, seg2):
'''Checks if a segment is in proximity of another one upstream'''
# get the coordinates of seg2 (from the origin)
s1 = coords(seg2[0])
s2 = coords(seg2[1])
# vector of the center of seg2 (from the origin)
C = 0.5 * (s1 + s2)
# endpoint of seg1 (from the origin)
P = coords(seg1[1])
# vector from the center C of seg2 to the endpoint P of seg1
CP = P - C
# vector of seg2
S1S2 = s2 - s1
# projection of CP upon seg2
prj = mm.vector_projection(CP, S1S2)
# check if the distance of the orthogonal complement of CP projection on S1S2
# (vertical distance from P to seg2) is smaller than the sum of the radii. (overlap)
# If not exit early, because there is no way that backtracking can feasible
if not is_seg2_within_seg1_radius(np.linalg.norm(CP - prj), seg1, seg2):
return False
# projection lies within the length of the cylinder. Check if the distance between
# the center C of seg2 and the projection of the end point of seg1, P is smaller than
# half of the others length plus a 5% tolerance
return np.linalg.norm(prj) < 0.55 * np.linalg.norm(S1S2)
def is_inside_cylinder(seg1, seg2):
''' Checks if seg2 approximately lies within a cylindrical volume of seg1.
Two conditions must be satisfied:
1. The two segments are not facing the same direction (seg2 comes back to seg1)
2. seg2 is overlaping with seg1
'''
return not is_in_the_same_verse(seg1, seg2) and is_seg1_overlapping_with_seg2(seg1, seg2)
# filter out single segment sections
section_itr = (snode for snode in neurite.iter_sections() if snode.points.shape[0] > 2)
for snode in section_itr:
# group each section's points intro triplets
segment_pairs = list(filter(is_not_zero_seg, pair(snode.points)))
# filter out zero length segments
for i, seg1 in enumerate(segment_pairs[1:]):
# check if the end point of the segment lies within the previous
# ones in the current sectionmake
for seg2 in segment_pairs[0: i + 1]:
if is_inside_cylinder(seg1, seg2):
return True
return False
|
python
|
def is_back_tracking(neurite):
def pair(segs):
return zip(segs, segs[1:])
def coords(node):
return node[COLS.XYZ]
def max_radius(seg):
return max(seg[0][COLS.R], seg[1][COLS.R])
def is_not_zero_seg(seg):
return not np.allclose(coords(seg[0]), coords(seg[1]))
def is_in_the_same_verse(seg1, seg2):
v1 = coords(seg2[1]) - coords(seg2[0])
v2 = coords(seg1[1]) - coords(seg1[0])
return np.dot(v1, v2) >= 0
def is_seg2_within_seg1_radius(dist, seg1, seg2):
return dist <= max_radius(seg1) + max_radius(seg2)
def is_seg1_overlapping_with_seg2(seg1, seg2):
s1 = coords(seg2[0])
s2 = coords(seg2[1])
C = 0.5 * (s1 + s2)
P = coords(seg1[1])
CP = P - C
S1S2 = s2 - s1
prj = mm.vector_projection(CP, S1S2)
if not is_seg2_within_seg1_radius(np.linalg.norm(CP - prj), seg1, seg2):
return False
return np.linalg.norm(prj) < 0.55 * np.linalg.norm(S1S2)
def is_inside_cylinder(seg1, seg2):
return not is_in_the_same_verse(seg1, seg2) and is_seg1_overlapping_with_seg2(seg1, seg2)
section_itr = (snode for snode in neurite.iter_sections() if snode.points.shape[0] > 2)
for snode in section_itr:
segment_pairs = list(filter(is_not_zero_seg, pair(snode.points)))
for i, seg1 in enumerate(segment_pairs[1:]):
for seg2 in segment_pairs[0: i + 1]:
if is_inside_cylinder(seg1, seg2):
return True
return False
|
[
"def",
"is_back_tracking",
"(",
"neurite",
")",
":",
"def",
"pair",
"(",
"segs",
")",
":",
"''' Pairs the input list into triplets'''",
"return",
"zip",
"(",
"segs",
",",
"segs",
"[",
"1",
":",
"]",
")",
"def",
"coords",
"(",
"node",
")",
":",
"''' Returns the first three values of the tree that correspond to the x, y, z coordinates'''",
"return",
"node",
"[",
"COLS",
".",
"XYZ",
"]",
"def",
"max_radius",
"(",
"seg",
")",
":",
"''' Returns maximum radius from the two segment endpoints'''",
"return",
"max",
"(",
"seg",
"[",
"0",
"]",
"[",
"COLS",
".",
"R",
"]",
",",
"seg",
"[",
"1",
"]",
"[",
"COLS",
".",
"R",
"]",
")",
"def",
"is_not_zero_seg",
"(",
"seg",
")",
":",
"''' Returns True if segment has zero length'''",
"return",
"not",
"np",
".",
"allclose",
"(",
"coords",
"(",
"seg",
"[",
"0",
"]",
")",
",",
"coords",
"(",
"seg",
"[",
"1",
"]",
")",
")",
"def",
"is_in_the_same_verse",
"(",
"seg1",
",",
"seg2",
")",
":",
"''' Checks if the vectors face the same direction. This\n is true if their dot product is greater than zero.\n '''",
"v1",
"=",
"coords",
"(",
"seg2",
"[",
"1",
"]",
")",
"-",
"coords",
"(",
"seg2",
"[",
"0",
"]",
")",
"v2",
"=",
"coords",
"(",
"seg1",
"[",
"1",
"]",
")",
"-",
"coords",
"(",
"seg1",
"[",
"0",
"]",
")",
"return",
"np",
".",
"dot",
"(",
"v1",
",",
"v2",
")",
">=",
"0",
"def",
"is_seg2_within_seg1_radius",
"(",
"dist",
",",
"seg1",
",",
"seg2",
")",
":",
"''' Checks whether the orthogonal distance from the point at the end of\n seg1 to seg2 segment body is smaller than the sum of their radii\n '''",
"return",
"dist",
"<=",
"max_radius",
"(",
"seg1",
")",
"+",
"max_radius",
"(",
"seg2",
")",
"def",
"is_seg1_overlapping_with_seg2",
"(",
"seg1",
",",
"seg2",
")",
":",
"'''Checks if a segment is in proximity of another one upstream'''",
"# get the coordinates of seg2 (from the origin)",
"s1",
"=",
"coords",
"(",
"seg2",
"[",
"0",
"]",
")",
"s2",
"=",
"coords",
"(",
"seg2",
"[",
"1",
"]",
")",
"# vector of the center of seg2 (from the origin)",
"C",
"=",
"0.5",
"*",
"(",
"s1",
"+",
"s2",
")",
"# endpoint of seg1 (from the origin)",
"P",
"=",
"coords",
"(",
"seg1",
"[",
"1",
"]",
")",
"# vector from the center C of seg2 to the endpoint P of seg1",
"CP",
"=",
"P",
"-",
"C",
"# vector of seg2",
"S1S2",
"=",
"s2",
"-",
"s1",
"# projection of CP upon seg2",
"prj",
"=",
"mm",
".",
"vector_projection",
"(",
"CP",
",",
"S1S2",
")",
"# check if the distance of the orthogonal complement of CP projection on S1S2",
"# (vertical distance from P to seg2) is smaller than the sum of the radii. (overlap)",
"# If not exit early, because there is no way that backtracking can feasible",
"if",
"not",
"is_seg2_within_seg1_radius",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"CP",
"-",
"prj",
")",
",",
"seg1",
",",
"seg2",
")",
":",
"return",
"False",
"# projection lies within the length of the cylinder. Check if the distance between",
"# the center C of seg2 and the projection of the end point of seg1, P is smaller than",
"# half of the others length plus a 5% tolerance",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"prj",
")",
"<",
"0.55",
"*",
"np",
".",
"linalg",
".",
"norm",
"(",
"S1S2",
")",
"def",
"is_inside_cylinder",
"(",
"seg1",
",",
"seg2",
")",
":",
"''' Checks if seg2 approximately lies within a cylindrical volume of seg1.\n Two conditions must be satisfied:\n 1. The two segments are not facing the same direction (seg2 comes back to seg1)\n 2. seg2 is overlaping with seg1\n '''",
"return",
"not",
"is_in_the_same_verse",
"(",
"seg1",
",",
"seg2",
")",
"and",
"is_seg1_overlapping_with_seg2",
"(",
"seg1",
",",
"seg2",
")",
"# filter out single segment sections",
"section_itr",
"=",
"(",
"snode",
"for",
"snode",
"in",
"neurite",
".",
"iter_sections",
"(",
")",
"if",
"snode",
".",
"points",
".",
"shape",
"[",
"0",
"]",
">",
"2",
")",
"for",
"snode",
"in",
"section_itr",
":",
"# group each section's points intro triplets",
"segment_pairs",
"=",
"list",
"(",
"filter",
"(",
"is_not_zero_seg",
",",
"pair",
"(",
"snode",
".",
"points",
")",
")",
")",
"# filter out zero length segments",
"for",
"i",
",",
"seg1",
"in",
"enumerate",
"(",
"segment_pairs",
"[",
"1",
":",
"]",
")",
":",
"# check if the end point of the segment lies within the previous",
"# ones in the current sectionmake",
"for",
"seg2",
"in",
"segment_pairs",
"[",
"0",
":",
"i",
"+",
"1",
"]",
":",
"if",
"is_inside_cylinder",
"(",
"seg1",
",",
"seg2",
")",
":",
"return",
"True",
"return",
"False"
] |
Check if a neurite process backtracks to a previous node. Back-tracking takes place
when a daughter of a branching process goes back and either overlaps with a previous point, or
lies inside the cylindrical volume of the latter.
Args:
neurite(Neurite): neurite to operate on
Returns:
True Under the following scenaria:
1. A segment endpoint falls back and overlaps with a previous segment's point
2. The geometry of a segment overlaps with a previous one in the section
|
[
"Check",
"if",
"a",
"neurite",
"process",
"backtracks",
"to",
"a",
"previous",
"node",
".",
"Back",
"-",
"tracking",
"takes",
"place",
"when",
"a",
"daughter",
"of",
"a",
"branching",
"process",
"goes",
"back",
"and",
"either",
"overlaps",
"with",
"a",
"previous",
"point",
"or",
"lies",
"inside",
"the",
"cylindrical",
"volume",
"of",
"the",
"latter",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L91-L187
|
BlueBrain/NeuroM
|
neurom/check/morphtree.py
|
get_flat_neurites
|
def get_flat_neurites(neuron, tol=0.1, method='ratio'):
'''Check if a neuron has neurites that are flat within a tolerance
Args:
neurite(Neurite): neurite to operate on
tol(float): the tolerance or the ratio
method(string): 'tolerance' or 'ratio' described in :meth:`is_flat`
Returns:
Bool list corresponding to the flatness check for each neurite
in neuron neurites with respect to the given criteria
'''
return [n for n in neuron.neurites if is_flat(n, tol, method)]
|
python
|
def get_flat_neurites(neuron, tol=0.1, method='ratio'):
return [n for n in neuron.neurites if is_flat(n, tol, method)]
|
[
"def",
"get_flat_neurites",
"(",
"neuron",
",",
"tol",
"=",
"0.1",
",",
"method",
"=",
"'ratio'",
")",
":",
"return",
"[",
"n",
"for",
"n",
"in",
"neuron",
".",
"neurites",
"if",
"is_flat",
"(",
"n",
",",
"tol",
",",
"method",
")",
"]"
] |
Check if a neuron has neurites that are flat within a tolerance
Args:
neurite(Neurite): neurite to operate on
tol(float): the tolerance or the ratio
method(string): 'tolerance' or 'ratio' described in :meth:`is_flat`
Returns:
Bool list corresponding to the flatness check for each neurite
in neuron neurites with respect to the given criteria
|
[
"Check",
"if",
"a",
"neuron",
"has",
"neurites",
"that",
"are",
"flat",
"within",
"a",
"tolerance"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L190-L202
|
BlueBrain/NeuroM
|
neurom/check/morphtree.py
|
get_nonmonotonic_neurites
|
def get_nonmonotonic_neurites(neuron, tol=1e-6):
'''Get neurites that are not monotonic
Args:
neurite(Neurite): neurite to operate on
tol(float): the tolerance or the ratio
Returns:
list of neurites that do not satisfy monotonicity test
'''
return [n for n in neuron.neurites if not is_monotonic(n, tol)]
|
python
|
def get_nonmonotonic_neurites(neuron, tol=1e-6):
return [n for n in neuron.neurites if not is_monotonic(n, tol)]
|
[
"def",
"get_nonmonotonic_neurites",
"(",
"neuron",
",",
"tol",
"=",
"1e-6",
")",
":",
"return",
"[",
"n",
"for",
"n",
"in",
"neuron",
".",
"neurites",
"if",
"not",
"is_monotonic",
"(",
"n",
",",
"tol",
")",
"]"
] |
Get neurites that are not monotonic
Args:
neurite(Neurite): neurite to operate on
tol(float): the tolerance or the ratio
Returns:
list of neurites that do not satisfy monotonicity test
|
[
"Get",
"neurites",
"that",
"are",
"not",
"monotonic"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L205-L215
|
BlueBrain/NeuroM
|
examples/radius_of_gyration.py
|
segment_centre_of_mass
|
def segment_centre_of_mass(seg):
'''Calculate and return centre of mass of a segment.
C, seg_volalculated as centre of mass of conical frustum'''
h = mm.segment_length(seg)
r0 = seg[0][COLS.R]
r1 = seg[1][COLS.R]
num = r0 * r0 + 2 * r0 * r1 + 3 * r1 * r1
denom = 4 * (r0 * r0 + r0 * r1 + r1 * r1)
centre_of_mass_z_loc = num / denom
return seg[0][COLS.XYZ] + (centre_of_mass_z_loc / h) * (seg[1][COLS.XYZ] - seg[0][COLS.XYZ])
|
python
|
def segment_centre_of_mass(seg):
h = mm.segment_length(seg)
r0 = seg[0][COLS.R]
r1 = seg[1][COLS.R]
num = r0 * r0 + 2 * r0 * r1 + 3 * r1 * r1
denom = 4 * (r0 * r0 + r0 * r1 + r1 * r1)
centre_of_mass_z_loc = num / denom
return seg[0][COLS.XYZ] + (centre_of_mass_z_loc / h) * (seg[1][COLS.XYZ] - seg[0][COLS.XYZ])
|
[
"def",
"segment_centre_of_mass",
"(",
"seg",
")",
":",
"h",
"=",
"mm",
".",
"segment_length",
"(",
"seg",
")",
"r0",
"=",
"seg",
"[",
"0",
"]",
"[",
"COLS",
".",
"R",
"]",
"r1",
"=",
"seg",
"[",
"1",
"]",
"[",
"COLS",
".",
"R",
"]",
"num",
"=",
"r0",
"*",
"r0",
"+",
"2",
"*",
"r0",
"*",
"r1",
"+",
"3",
"*",
"r1",
"*",
"r1",
"denom",
"=",
"4",
"*",
"(",
"r0",
"*",
"r0",
"+",
"r0",
"*",
"r1",
"+",
"r1",
"*",
"r1",
")",
"centre_of_mass_z_loc",
"=",
"num",
"/",
"denom",
"return",
"seg",
"[",
"0",
"]",
"[",
"COLS",
".",
"XYZ",
"]",
"+",
"(",
"centre_of_mass_z_loc",
"/",
"h",
")",
"*",
"(",
"seg",
"[",
"1",
"]",
"[",
"COLS",
".",
"XYZ",
"]",
"-",
"seg",
"[",
"0",
"]",
"[",
"COLS",
".",
"XYZ",
"]",
")"
] |
Calculate and return centre of mass of a segment.
C, seg_volalculated as centre of mass of conical frustum
|
[
"Calculate",
"and",
"return",
"centre",
"of",
"mass",
"of",
"a",
"segment",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/radius_of_gyration.py#L38-L48
|
BlueBrain/NeuroM
|
examples/radius_of_gyration.py
|
neurite_centre_of_mass
|
def neurite_centre_of_mass(neurite):
'''Calculate and return centre of mass of a neurite.'''
centre_of_mass = np.zeros(3)
total_volume = 0
seg_vol = np.array(map(mm.segment_volume, nm.iter_segments(neurite)))
seg_centre_of_mass = np.array(map(segment_centre_of_mass, nm.iter_segments(neurite)))
# multiply array of scalars with array of arrays
# http://stackoverflow.com/questions/5795700/multiply-numpy-array-of-scalars-by-array-of-vectors
seg_centre_of_mass = seg_centre_of_mass * seg_vol[:, np.newaxis]
centre_of_mass = np.sum(seg_centre_of_mass, axis=0)
total_volume = np.sum(seg_vol)
return centre_of_mass / total_volume
|
python
|
def neurite_centre_of_mass(neurite):
centre_of_mass = np.zeros(3)
total_volume = 0
seg_vol = np.array(map(mm.segment_volume, nm.iter_segments(neurite)))
seg_centre_of_mass = np.array(map(segment_centre_of_mass, nm.iter_segments(neurite)))
seg_centre_of_mass = seg_centre_of_mass * seg_vol[:, np.newaxis]
centre_of_mass = np.sum(seg_centre_of_mass, axis=0)
total_volume = np.sum(seg_vol)
return centre_of_mass / total_volume
|
[
"def",
"neurite_centre_of_mass",
"(",
"neurite",
")",
":",
"centre_of_mass",
"=",
"np",
".",
"zeros",
"(",
"3",
")",
"total_volume",
"=",
"0",
"seg_vol",
"=",
"np",
".",
"array",
"(",
"map",
"(",
"mm",
".",
"segment_volume",
",",
"nm",
".",
"iter_segments",
"(",
"neurite",
")",
")",
")",
"seg_centre_of_mass",
"=",
"np",
".",
"array",
"(",
"map",
"(",
"segment_centre_of_mass",
",",
"nm",
".",
"iter_segments",
"(",
"neurite",
")",
")",
")",
"# multiply array of scalars with array of arrays",
"# http://stackoverflow.com/questions/5795700/multiply-numpy-array-of-scalars-by-array-of-vectors",
"seg_centre_of_mass",
"=",
"seg_centre_of_mass",
"*",
"seg_vol",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"centre_of_mass",
"=",
"np",
".",
"sum",
"(",
"seg_centre_of_mass",
",",
"axis",
"=",
"0",
")",
"total_volume",
"=",
"np",
".",
"sum",
"(",
"seg_vol",
")",
"return",
"centre_of_mass",
"/",
"total_volume"
] |
Calculate and return centre of mass of a neurite.
|
[
"Calculate",
"and",
"return",
"centre",
"of",
"mass",
"of",
"a",
"neurite",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/radius_of_gyration.py#L51-L64
|
BlueBrain/NeuroM
|
examples/radius_of_gyration.py
|
distance_sqr
|
def distance_sqr(point, seg):
'''Calculate and return square Euclidian distance from given point to
centre of mass of given segment.'''
centre_of_mass = segment_centre_of_mass(seg)
return sum(pow(np.subtract(point, centre_of_mass), 2))
|
python
|
def distance_sqr(point, seg):
centre_of_mass = segment_centre_of_mass(seg)
return sum(pow(np.subtract(point, centre_of_mass), 2))
|
[
"def",
"distance_sqr",
"(",
"point",
",",
"seg",
")",
":",
"centre_of_mass",
"=",
"segment_centre_of_mass",
"(",
"seg",
")",
"return",
"sum",
"(",
"pow",
"(",
"np",
".",
"subtract",
"(",
"point",
",",
"centre_of_mass",
")",
",",
"2",
")",
")"
] |
Calculate and return square Euclidian distance from given point to
centre of mass of given segment.
|
[
"Calculate",
"and",
"return",
"square",
"Euclidian",
"distance",
"from",
"given",
"point",
"to",
"centre",
"of",
"mass",
"of",
"given",
"segment",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/radius_of_gyration.py#L67-L71
|
BlueBrain/NeuroM
|
examples/radius_of_gyration.py
|
radius_of_gyration
|
def radius_of_gyration(neurite):
'''Calculate and return radius of gyration of a given neurite.'''
centre_mass = neurite_centre_of_mass(neurite)
sum_sqr_distance = 0
N = 0
dist_sqr = [distance_sqr(centre_mass, s) for s in nm.iter_segments(neurite)]
sum_sqr_distance = np.sum(dist_sqr)
N = len(dist_sqr)
return np.sqrt(sum_sqr_distance / N)
|
python
|
def radius_of_gyration(neurite):
centre_mass = neurite_centre_of_mass(neurite)
sum_sqr_distance = 0
N = 0
dist_sqr = [distance_sqr(centre_mass, s) for s in nm.iter_segments(neurite)]
sum_sqr_distance = np.sum(dist_sqr)
N = len(dist_sqr)
return np.sqrt(sum_sqr_distance / N)
|
[
"def",
"radius_of_gyration",
"(",
"neurite",
")",
":",
"centre_mass",
"=",
"neurite_centre_of_mass",
"(",
"neurite",
")",
"sum_sqr_distance",
"=",
"0",
"N",
"=",
"0",
"dist_sqr",
"=",
"[",
"distance_sqr",
"(",
"centre_mass",
",",
"s",
")",
"for",
"s",
"in",
"nm",
".",
"iter_segments",
"(",
"neurite",
")",
"]",
"sum_sqr_distance",
"=",
"np",
".",
"sum",
"(",
"dist_sqr",
")",
"N",
"=",
"len",
"(",
"dist_sqr",
")",
"return",
"np",
".",
"sqrt",
"(",
"sum_sqr_distance",
"/",
"N",
")"
] |
Calculate and return radius of gyration of a given neurite.
|
[
"Calculate",
"and",
"return",
"radius",
"of",
"gyration",
"of",
"a",
"given",
"neurite",
"."
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/radius_of_gyration.py#L74-L82
|
BlueBrain/NeuroM
|
apps/__main__.py
|
view
|
def view(input_file, plane, backend):
'''A simple neuron viewer'''
if backend == 'matplotlib':
from neurom.viewer import draw
kwargs = {
'mode': '3d' if plane == '3d' else '2d',
}
if plane != '3d':
kwargs['plane'] = plane
draw(load_neuron(input_file), **kwargs)
else:
from neurom.view.plotly import draw
draw(load_neuron(input_file), plane=plane)
if backend == 'matplotlib':
import matplotlib.pyplot as plt
plt.show()
|
python
|
def view(input_file, plane, backend):
if backend == 'matplotlib':
from neurom.viewer import draw
kwargs = {
'mode': '3d' if plane == '3d' else '2d',
}
if plane != '3d':
kwargs['plane'] = plane
draw(load_neuron(input_file), **kwargs)
else:
from neurom.view.plotly import draw
draw(load_neuron(input_file), plane=plane)
if backend == 'matplotlib':
import matplotlib.pyplot as plt
plt.show()
|
[
"def",
"view",
"(",
"input_file",
",",
"plane",
",",
"backend",
")",
":",
"if",
"backend",
"==",
"'matplotlib'",
":",
"from",
"neurom",
".",
"viewer",
"import",
"draw",
"kwargs",
"=",
"{",
"'mode'",
":",
"'3d'",
"if",
"plane",
"==",
"'3d'",
"else",
"'2d'",
",",
"}",
"if",
"plane",
"!=",
"'3d'",
":",
"kwargs",
"[",
"'plane'",
"]",
"=",
"plane",
"draw",
"(",
"load_neuron",
"(",
"input_file",
")",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"from",
"neurom",
".",
"view",
".",
"plotly",
"import",
"draw",
"draw",
"(",
"load_neuron",
"(",
"input_file",
")",
",",
"plane",
"=",
"plane",
")",
"if",
"backend",
"==",
"'matplotlib'",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"plt",
".",
"show",
"(",
")"
] |
A simple neuron viewer
|
[
"A",
"simple",
"neuron",
"viewer"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/apps/__main__.py#L23-L39
|
BlueBrain/NeuroM
|
neurom/apps/annotate.py
|
generate_annotation
|
def generate_annotation(result, settings):
'''Generate the annotation for a given checker
Arguments
neuron(Neuron): The neuron object
checker: A tuple where the first item is the checking function (usually from neuron_checks)
and the second item is a dictionary of settings for the annotation. It must
contain the keys name, label and color
Returns
An S-expression-like string representing the annotation
'''
if result.status:
return ""
header = ("\n\n"
"({label} ; MUK_ANNOTATION\n"
" (Color {color}) ; MUK_ANNOTATION\n"
" (Name \"{name}\") ; MUK_ANNOTATION").format(**settings)
points = [p for _, _points in result.info for p in _points]
annotations = (" ({0} {1} {2} 0.50) ; MUK_ANNOTATION".format(
p[COLS.X], p[COLS.Y], p[COLS.Z]) for p in points)
footer = ") ; MUK_ANNOTATION\n"
return '\n'.join(chain.from_iterable(([header], annotations, [footer])))
|
python
|
def generate_annotation(result, settings):
if result.status:
return ""
header = ("\n\n"
"({label} ; MUK_ANNOTATION\n"
" (Color {color}) ; MUK_ANNOTATION\n"
" (Name \"{name}\") ; MUK_ANNOTATION").format(**settings)
points = [p for _, _points in result.info for p in _points]
annotations = (" ({0} {1} {2} 0.50) ; MUK_ANNOTATION".format(
p[COLS.X], p[COLS.Y], p[COLS.Z]) for p in points)
footer = ") ; MUK_ANNOTATION\n"
return '\n'.join(chain.from_iterable(([header], annotations, [footer])))
|
[
"def",
"generate_annotation",
"(",
"result",
",",
"settings",
")",
":",
"if",
"result",
".",
"status",
":",
"return",
"\"\"",
"header",
"=",
"(",
"\"\\n\\n\"",
"\"({label} ; MUK_ANNOTATION\\n\"",
"\" (Color {color}) ; MUK_ANNOTATION\\n\"",
"\" (Name \\\"{name}\\\") ; MUK_ANNOTATION\"",
")",
".",
"format",
"(",
"*",
"*",
"settings",
")",
"points",
"=",
"[",
"p",
"for",
"_",
",",
"_points",
"in",
"result",
".",
"info",
"for",
"p",
"in",
"_points",
"]",
"annotations",
"=",
"(",
"\" ({0} {1} {2} 0.50) ; MUK_ANNOTATION\"",
".",
"format",
"(",
"p",
"[",
"COLS",
".",
"X",
"]",
",",
"p",
"[",
"COLS",
".",
"Y",
"]",
",",
"p",
"[",
"COLS",
".",
"Z",
"]",
")",
"for",
"p",
"in",
"points",
")",
"footer",
"=",
"\") ; MUK_ANNOTATION\\n\"",
"return",
"'\\n'",
".",
"join",
"(",
"chain",
".",
"from_iterable",
"(",
"(",
"[",
"header",
"]",
",",
"annotations",
",",
"[",
"footer",
"]",
")",
")",
")"
] |
Generate the annotation for a given checker
Arguments
neuron(Neuron): The neuron object
checker: A tuple where the first item is the checking function (usually from neuron_checks)
and the second item is a dictionary of settings for the annotation. It must
contain the keys name, label and color
Returns
An S-expression-like string representing the annotation
|
[
"Generate",
"the",
"annotation",
"for",
"a",
"given",
"checker"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/annotate.py#L37-L62
|
BlueBrain/NeuroM
|
neurom/apps/annotate.py
|
annotate
|
def annotate(results, settings):
'''Concatenate the annotations of all checkers'''
annotations = (generate_annotation(result, setting)
for result, setting in zip(results, settings))
return '\n'.join(annot for annot in annotations if annot)
|
python
|
def annotate(results, settings):
annotations = (generate_annotation(result, setting)
for result, setting in zip(results, settings))
return '\n'.join(annot for annot in annotations if annot)
|
[
"def",
"annotate",
"(",
"results",
",",
"settings",
")",
":",
"annotations",
"=",
"(",
"generate_annotation",
"(",
"result",
",",
"setting",
")",
"for",
"result",
",",
"setting",
"in",
"zip",
"(",
"results",
",",
"settings",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"annot",
"for",
"annot",
"in",
"annotations",
"if",
"annot",
")"
] |
Concatenate the annotations of all checkers
|
[
"Concatenate",
"the",
"annotations",
"of",
"all",
"checkers"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/apps/annotate.py#L65-L69
|
BlueBrain/NeuroM
|
neurom/core/point.py
|
as_point
|
def as_point(row):
'''Create a Point from a data block row'''
return Point(row[COLS.X], row[COLS.Y], row[COLS.Z],
row[COLS.R], int(row[COLS.TYPE]))
|
python
|
def as_point(row):
return Point(row[COLS.X], row[COLS.Y], row[COLS.Z],
row[COLS.R], int(row[COLS.TYPE]))
|
[
"def",
"as_point",
"(",
"row",
")",
":",
"return",
"Point",
"(",
"row",
"[",
"COLS",
".",
"X",
"]",
",",
"row",
"[",
"COLS",
".",
"Y",
"]",
",",
"row",
"[",
"COLS",
".",
"Z",
"]",
",",
"row",
"[",
"COLS",
".",
"R",
"]",
",",
"int",
"(",
"row",
"[",
"COLS",
".",
"TYPE",
"]",
")",
")"
] |
Create a Point from a data block row
|
[
"Create",
"a",
"Point",
"from",
"a",
"data",
"block",
"row"
] |
train
|
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/point.py#L38-L41
|
jambonsw/django-improved-user
|
src/improved_user/managers.py
|
UserManager.create_superuser
|
def create_superuser(self, email, password, **extra_fields):
"""Save new User with is_staff and is_superuser set to True"""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
|
python
|
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
|
[
"def",
"create_superuser",
"(",
"self",
",",
"email",
",",
"password",
",",
"*",
"*",
"extra_fields",
")",
":",
"extra_fields",
".",
"setdefault",
"(",
"'is_staff'",
",",
"True",
")",
"extra_fields",
".",
"setdefault",
"(",
"'is_superuser'",
",",
"True",
")",
"if",
"extra_fields",
".",
"get",
"(",
"'is_staff'",
")",
"is",
"not",
"True",
":",
"raise",
"ValueError",
"(",
"'Superuser must have is_staff=True.'",
")",
"if",
"extra_fields",
".",
"get",
"(",
"'is_superuser'",
")",
"is",
"not",
"True",
":",
"raise",
"ValueError",
"(",
"'Superuser must have is_superuser=True.'",
")",
"return",
"self",
".",
"_create_user",
"(",
"email",
",",
"password",
",",
"*",
"*",
"extra_fields",
")"
] |
Save new User with is_staff and is_superuser set to True
|
[
"Save",
"new",
"User",
"with",
"is_staff",
"and",
"is_superuser",
"set",
"to",
"True"
] |
train
|
https://github.com/jambonsw/django-improved-user/blob/e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4/src/improved_user/managers.py#L43-L51
|
jambonsw/django-improved-user
|
setup.py
|
load_file_contents
|
def load_file_contents(file_path, as_list=True):
"""Load file as string or list"""
abs_file_path = join(HERE, file_path)
with open(abs_file_path, encoding='utf-8') as file_pointer:
if as_list:
return file_pointer.read().splitlines()
return file_pointer.read()
|
python
|
def load_file_contents(file_path, as_list=True):
abs_file_path = join(HERE, file_path)
with open(abs_file_path, encoding='utf-8') as file_pointer:
if as_list:
return file_pointer.read().splitlines()
return file_pointer.read()
|
[
"def",
"load_file_contents",
"(",
"file_path",
",",
"as_list",
"=",
"True",
")",
":",
"abs_file_path",
"=",
"join",
"(",
"HERE",
",",
"file_path",
")",
"with",
"open",
"(",
"abs_file_path",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"file_pointer",
":",
"if",
"as_list",
":",
"return",
"file_pointer",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"return",
"file_pointer",
".",
"read",
"(",
")"
] |
Load file as string or list
|
[
"Load",
"file",
"as",
"string",
"or",
"list"
] |
train
|
https://github.com/jambonsw/django-improved-user/blob/e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4/setup.py#L22-L28
|
jambonsw/django-improved-user
|
src/improved_user/forms.py
|
AbstractUserCreationForm.clean_password2
|
def clean_password2(self):
"""
Check wether password 1 and password 2 are equivalent
While ideally this would be done in clean, there is a chance a
superclass could declare clean and forget to call super. We
therefore opt to run this password mismatch check in password2
clean, but to show the error above password1 (as we are unsure
whether password 1 or password 2 contains the typo, and putting
it above password 2 may lead some users to believe the typo is
in just one).
"""
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
self.add_error(
'password1',
forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
))
return password2
|
python
|
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
self.add_error(
'password1',
forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
))
return password2
|
[
"def",
"clean_password2",
"(",
"self",
")",
":",
"password1",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'password1'",
")",
"password2",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'password2'",
")",
"if",
"password1",
"and",
"password2",
"and",
"password1",
"!=",
"password2",
":",
"self",
".",
"add_error",
"(",
"'password1'",
",",
"forms",
".",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'password_mismatch'",
"]",
",",
"code",
"=",
"'password_mismatch'",
",",
")",
")",
"return",
"password2"
] |
Check wether password 1 and password 2 are equivalent
While ideally this would be done in clean, there is a chance a
superclass could declare clean and forget to call super. We
therefore opt to run this password mismatch check in password2
clean, but to show the error above password1 (as we are unsure
whether password 1 or password 2 contains the typo, and putting
it above password 2 may lead some users to believe the typo is
in just one).
|
[
"Check",
"wether",
"password",
"1",
"and",
"password",
"2",
"are",
"equivalent"
] |
train
|
https://github.com/jambonsw/django-improved-user/blob/e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4/src/improved_user/forms.py#L62-L84
|
jambonsw/django-improved-user
|
src/improved_user/forms.py
|
AbstractUserCreationForm._post_clean
|
def _post_clean(self):
"""Run password validaton after clean methods
When clean methods are run, the user instance does not yet
exist. To properly compare model values agains the password (in
the UserAttributeSimilarityValidator), we wait until we have an
instance to compare against.
https://code.djangoproject.com/ticket/28127
https://github.com/django/django/pull/8408
Has no effect in Django prior to 1.9
May become unnecessary in Django 2.0 (if this superclass changes)
"""
super()._post_clean() # updates self.instance with form data
password = self.cleaned_data.get('password1')
if password:
try:
password_validation.validate_password(password, self.instance)
except ValidationError as error:
self.add_error('password1', error)
|
python
|
def _post_clean(self):
super()._post_clean()
password = self.cleaned_data.get('password1')
if password:
try:
password_validation.validate_password(password, self.instance)
except ValidationError as error:
self.add_error('password1', error)
|
[
"def",
"_post_clean",
"(",
"self",
")",
":",
"super",
"(",
")",
".",
"_post_clean",
"(",
")",
"# updates self.instance with form data",
"password",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'password1'",
")",
"if",
"password",
":",
"try",
":",
"password_validation",
".",
"validate_password",
"(",
"password",
",",
"self",
".",
"instance",
")",
"except",
"ValidationError",
"as",
"error",
":",
"self",
".",
"add_error",
"(",
"'password1'",
",",
"error",
")"
] |
Run password validaton after clean methods
When clean methods are run, the user instance does not yet
exist. To properly compare model values agains the password (in
the UserAttributeSimilarityValidator), we wait until we have an
instance to compare against.
https://code.djangoproject.com/ticket/28127
https://github.com/django/django/pull/8408
Has no effect in Django prior to 1.9
May become unnecessary in Django 2.0 (if this superclass changes)
|
[
"Run",
"password",
"validaton",
"after",
"clean",
"methods"
] |
train
|
https://github.com/jambonsw/django-improved-user/blob/e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4/src/improved_user/forms.py#L86-L107
|
jambonsw/django-improved-user
|
src/improved_user/model_mixins.py
|
EmailAuthMixin.clean
|
def clean(self):
"""Override default clean method to normalize email.
Call :code:`super().clean()` if overriding.
"""
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
|
python
|
def clean(self):
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
|
[
"def",
"clean",
"(",
"self",
")",
":",
"super",
"(",
")",
".",
"clean",
"(",
")",
"self",
".",
"email",
"=",
"self",
".",
"__class__",
".",
"objects",
".",
"normalize_email",
"(",
"self",
".",
"email",
")"
] |
Override default clean method to normalize email.
Call :code:`super().clean()` if overriding.
|
[
"Override",
"default",
"clean",
"method",
"to",
"normalize",
"email",
"."
] |
train
|
https://github.com/jambonsw/django-improved-user/blob/e5fbb4f0d5f7491b9f06f7eb2812127b5e4616d4/src/improved_user/model_mixins.py#L69-L76
|
sfalkner/pynisher
|
pynisher/limit_function_call.py
|
subprocess_func
|
def subprocess_func(func, pipe, logger, mem_in_mb, cpu_time_limit_in_s, wall_time_limit_in_s, num_procs, grace_period_in_s, tmp_dir, *args, **kwargs):
# simple signal handler to catch the signals for time limits
def handler(signum, frame):
# logs message with level debug on this logger
logger.debug("signal handler: %i"%signum)
if (signum == signal.SIGXCPU):
# when process reaches soft limit --> a SIGXCPU signal is sent (it normally terminats the process)
raise(CpuTimeoutException)
elif (signum == signal.SIGALRM):
# SIGALRM is sent to process when the specified time limit to an alarm function elapses (when real or clock time elapses)
logger.debug("timeout")
raise(TimeoutException)
raise AnythingException
# temporary directory to store stdout and stderr
if not tmp_dir is None:
logger.debug('Redirecting output of the function to files. Access them via the stdout and stderr attributes of the wrapped function.')
stdout = open(os.path.join(tmp_dir, 'std.out'), 'a', buffering=1)
sys.stdout=stdout
stderr = open(os.path.join(tmp_dir, 'std.err'), 'a', buffering=1)
sys.stderr=stderr
# catching all signals at this point turned out to interfer with the subprocess (e.g. using ROS)
signal.signal(signal.SIGALRM, handler)
signal.signal(signal.SIGXCPU, handler)
signal.signal(signal.SIGQUIT, handler)
# code to catch EVERY catchable signal (even X11 related ones ... )
# only use for debugging/testing as this seems to be too intrusive.
"""
for i in [x for x in dir(signal) if x.startswith("SIG")]:
try:
signum = getattr(signal,i)
print("register {}, {}".format(signum, i))
signal.signal(signum, handler)
except:
print("Skipping %s"%i)
"""
# set the memory limit
if mem_in_mb is not None:
# byte --> megabyte
mem_in_b = mem_in_mb*1024*1024
# the maximum area (in bytes) of address space which may be taken by the process.
resource.setrlimit(resource.RLIMIT_AS, (mem_in_b, mem_in_b))
# for now: don't allow the function to spawn subprocesses itself.
#resource.setrlimit(resource.RLIMIT_NPROC, (1, 1))
# Turns out, this is quite restrictive, so we don't use this option by default
if num_procs is not None:
resource.setrlimit(resource.RLIMIT_NPROC, (num_procs, num_procs))
# schedule an alarm in specified number of seconds
if wall_time_limit_in_s is not None:
signal.alarm(wall_time_limit_in_s)
if cpu_time_limit_in_s is not None:
# From the Linux man page:
# When the process reaches the soft limit, it is sent a SIGXCPU signal.
# The default action for this signal is to terminate the process.
# However, the signal can be caught, and the handler can return control
# to the main program. If the process continues to consume CPU time,
# it will be sent SIGXCPU once per second until the hard limit is reached,
# at which time it is sent SIGKILL.
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time_limit_in_s,cpu_time_limit_in_s+grace_period_in_s))
# the actual function call
try:
logger.debug("call function")
return_value = ((func(*args, **kwargs), 0))
logger.debug("function returned properly: {}".format(return_value))
except MemoryError:
return_value = (None, MemorylimitException)
except OSError as e:
if (e.errno == 11):
return_value = (None, SubprocessException)
else:
return_value = (None, AnythingException)
except CpuTimeoutException:
return_value = (None, CpuTimeoutException)
except TimeoutException:
return_value = (None, TimeoutException)
except AnythingException as e:
return_value = (None, AnythingException)
except:
raise
logger.debug("Some wired exception occured!")
finally:
try:
logger.debug("return value: {}".format(return_value))
pipe.send(return_value)
pipe.close()
except:
# this part should only fail if the parent process is alread dead, so there is not much to do anymore :)
pass
finally:
# recursively kill all children
p = psutil.Process()
for child in p.children(recursive=True):
child.kill()
|
python
|
def subprocess_func(func, pipe, logger, mem_in_mb, cpu_time_limit_in_s, wall_time_limit_in_s, num_procs, grace_period_in_s, tmp_dir, *args, **kwargs):
def handler(signum, frame):
logger.debug("signal handler: %i"%signum)
if (signum == signal.SIGXCPU):
raise(CpuTimeoutException)
elif (signum == signal.SIGALRM):
logger.debug("timeout")
raise(TimeoutException)
raise AnythingException
if not tmp_dir is None:
logger.debug('Redirecting output of the function to files. Access them via the stdout and stderr attributes of the wrapped function.')
stdout = open(os.path.join(tmp_dir, 'std.out'), 'a', buffering=1)
sys.stdout=stdout
stderr = open(os.path.join(tmp_dir, 'std.err'), 'a', buffering=1)
sys.stderr=stderr
signal.signal(signal.SIGALRM, handler)
signal.signal(signal.SIGXCPU, handler)
signal.signal(signal.SIGQUIT, handler)
if mem_in_mb is not None:
mem_in_b = mem_in_mb*1024*1024
resource.setrlimit(resource.RLIMIT_AS, (mem_in_b, mem_in_b))
if num_procs is not None:
resource.setrlimit(resource.RLIMIT_NPROC, (num_procs, num_procs))
if wall_time_limit_in_s is not None:
signal.alarm(wall_time_limit_in_s)
if cpu_time_limit_in_s is not None:
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time_limit_in_s,cpu_time_limit_in_s+grace_period_in_s))
try:
logger.debug("call function")
return_value = ((func(*args, **kwargs), 0))
logger.debug("function returned properly: {}".format(return_value))
except MemoryError:
return_value = (None, MemorylimitException)
except OSError as e:
if (e.errno == 11):
return_value = (None, SubprocessException)
else:
return_value = (None, AnythingException)
except CpuTimeoutException:
return_value = (None, CpuTimeoutException)
except TimeoutException:
return_value = (None, TimeoutException)
except AnythingException as e:
return_value = (None, AnythingException)
except:
raise
logger.debug("Some wired exception occured!")
finally:
try:
logger.debug("return value: {}".format(return_value))
pipe.send(return_value)
pipe.close()
except:
pass
finally:
p = psutil.Process()
for child in p.children(recursive=True):
child.kill()
|
[
"def",
"subprocess_func",
"(",
"func",
",",
"pipe",
",",
"logger",
",",
"mem_in_mb",
",",
"cpu_time_limit_in_s",
",",
"wall_time_limit_in_s",
",",
"num_procs",
",",
"grace_period_in_s",
",",
"tmp_dir",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# simple signal handler to catch the signals for time limits",
"def",
"handler",
"(",
"signum",
",",
"frame",
")",
":",
"# logs message with level debug on this logger ",
"logger",
".",
"debug",
"(",
"\"signal handler: %i\"",
"%",
"signum",
")",
"if",
"(",
"signum",
"==",
"signal",
".",
"SIGXCPU",
")",
":",
"# when process reaches soft limit --> a SIGXCPU signal is sent (it normally terminats the process)",
"raise",
"(",
"CpuTimeoutException",
")",
"elif",
"(",
"signum",
"==",
"signal",
".",
"SIGALRM",
")",
":",
"# SIGALRM is sent to process when the specified time limit to an alarm function elapses (when real or clock time elapses)",
"logger",
".",
"debug",
"(",
"\"timeout\"",
")",
"raise",
"(",
"TimeoutException",
")",
"raise",
"AnythingException",
"# temporary directory to store stdout and stderr",
"if",
"not",
"tmp_dir",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'Redirecting output of the function to files. Access them via the stdout and stderr attributes of the wrapped function.'",
")",
"stdout",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"'std.out'",
")",
",",
"'a'",
",",
"buffering",
"=",
"1",
")",
"sys",
".",
"stdout",
"=",
"stdout",
"stderr",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"'std.err'",
")",
",",
"'a'",
",",
"buffering",
"=",
"1",
")",
"sys",
".",
"stderr",
"=",
"stderr",
"# catching all signals at this point turned out to interfer with the subprocess (e.g. using ROS)",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGALRM",
",",
"handler",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGXCPU",
",",
"handler",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGQUIT",
",",
"handler",
")",
"# code to catch EVERY catchable signal (even X11 related ones ... )",
"# only use for debugging/testing as this seems to be too intrusive.",
"# set the memory limit",
"if",
"mem_in_mb",
"is",
"not",
"None",
":",
"# byte --> megabyte",
"mem_in_b",
"=",
"mem_in_mb",
"*",
"1024",
"*",
"1024",
"# the maximum area (in bytes) of address space which may be taken by the process.",
"resource",
".",
"setrlimit",
"(",
"resource",
".",
"RLIMIT_AS",
",",
"(",
"mem_in_b",
",",
"mem_in_b",
")",
")",
"# for now: don't allow the function to spawn subprocesses itself.",
"#resource.setrlimit(resource.RLIMIT_NPROC, (1, 1))",
"# Turns out, this is quite restrictive, so we don't use this option by default",
"if",
"num_procs",
"is",
"not",
"None",
":",
"resource",
".",
"setrlimit",
"(",
"resource",
".",
"RLIMIT_NPROC",
",",
"(",
"num_procs",
",",
"num_procs",
")",
")",
"# schedule an alarm in specified number of seconds",
"if",
"wall_time_limit_in_s",
"is",
"not",
"None",
":",
"signal",
".",
"alarm",
"(",
"wall_time_limit_in_s",
")",
"if",
"cpu_time_limit_in_s",
"is",
"not",
"None",
":",
"# From the Linux man page:",
"# When the process reaches the soft limit, it is sent a SIGXCPU signal.",
"# The default action for this signal is to terminate the process.",
"# However, the signal can be caught, and the handler can return control ",
"# to the main program. If the process continues to consume CPU time,",
"# it will be sent SIGXCPU once per second until the hard limit is reached,",
"# at which time it is sent SIGKILL.",
"resource",
".",
"setrlimit",
"(",
"resource",
".",
"RLIMIT_CPU",
",",
"(",
"cpu_time_limit_in_s",
",",
"cpu_time_limit_in_s",
"+",
"grace_period_in_s",
")",
")",
"# the actual function call",
"try",
":",
"logger",
".",
"debug",
"(",
"\"call function\"",
")",
"return_value",
"=",
"(",
"(",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"0",
")",
")",
"logger",
".",
"debug",
"(",
"\"function returned properly: {}\"",
".",
"format",
"(",
"return_value",
")",
")",
"except",
"MemoryError",
":",
"return_value",
"=",
"(",
"None",
",",
"MemorylimitException",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"(",
"e",
".",
"errno",
"==",
"11",
")",
":",
"return_value",
"=",
"(",
"None",
",",
"SubprocessException",
")",
"else",
":",
"return_value",
"=",
"(",
"None",
",",
"AnythingException",
")",
"except",
"CpuTimeoutException",
":",
"return_value",
"=",
"(",
"None",
",",
"CpuTimeoutException",
")",
"except",
"TimeoutException",
":",
"return_value",
"=",
"(",
"None",
",",
"TimeoutException",
")",
"except",
"AnythingException",
"as",
"e",
":",
"return_value",
"=",
"(",
"None",
",",
"AnythingException",
")",
"except",
":",
"raise",
"logger",
".",
"debug",
"(",
"\"Some wired exception occured!\"",
")",
"finally",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"return value: {}\"",
".",
"format",
"(",
"return_value",
")",
")",
"pipe",
".",
"send",
"(",
"return_value",
")",
"pipe",
".",
"close",
"(",
")",
"except",
":",
"# this part should only fail if the parent process is alread dead, so there is not much to do anymore :)",
"pass",
"finally",
":",
"# recursively kill all children",
"p",
"=",
"psutil",
".",
"Process",
"(",
")",
"for",
"child",
"in",
"p",
".",
"children",
"(",
"recursive",
"=",
"True",
")",
":",
"child",
".",
"kill",
"(",
")"
] |
for i in [x for x in dir(signal) if x.startswith("SIG")]:
try:
signum = getattr(signal,i)
print("register {}, {}".format(signum, i))
signal.signal(signum, handler)
except:
print("Skipping %s"%i)
|
[
"for",
"i",
"in",
"[",
"x",
"for",
"x",
"in",
"dir",
"(",
"signal",
")",
"if",
"x",
".",
"startswith",
"(",
"SIG",
")",
"]",
":",
"try",
":",
"signum",
"=",
"getattr",
"(",
"signal",
"i",
")",
"print",
"(",
"register",
"{}",
"{}",
".",
"format",
"(",
"signum",
"i",
"))",
"signal",
".",
"signal",
"(",
"signum",
"handler",
")",
"except",
":",
"print",
"(",
"Skipping",
"%s",
"%i",
")"
] |
train
|
https://github.com/sfalkner/pynisher/blob/8e9518e874673bfc0a62a54fa1580cd1df931617/pynisher/limit_function_call.py#L19-L130
|
mapbox/cligj
|
cligj/features.py
|
normalize_feature_inputs
|
def normalize_feature_inputs(ctx, param, value):
"""Click callback that normalizes feature input values.
Returns a generator over features from the input value.
Parameters
----------
ctx: a Click context
param: the name of the argument or option
value: object
The value argument may be one of the following:
1. A list of paths to files containing GeoJSON feature
collections or feature sequences.
2. A list of string-encoded coordinate pairs of the form
"[lng, lat]", or "lng, lat", or "lng lat".
If no value is provided, features will be read from stdin.
"""
for feature_like in value or ('-',):
try:
with click.open_file(feature_like) as src:
for feature in iter_features(iter(src)):
yield feature
except IOError:
coords = list(coords_from_query(feature_like))
yield {
'type': 'Feature',
'properties': {},
'geometry': {
'type': 'Point',
'coordinates': coords}}
|
python
|
def normalize_feature_inputs(ctx, param, value):
for feature_like in value or ('-',):
try:
with click.open_file(feature_like) as src:
for feature in iter_features(iter(src)):
yield feature
except IOError:
coords = list(coords_from_query(feature_like))
yield {
'type': 'Feature',
'properties': {},
'geometry': {
'type': 'Point',
'coordinates': coords}}
|
[
"def",
"normalize_feature_inputs",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"for",
"feature_like",
"in",
"value",
"or",
"(",
"'-'",
",",
")",
":",
"try",
":",
"with",
"click",
".",
"open_file",
"(",
"feature_like",
")",
"as",
"src",
":",
"for",
"feature",
"in",
"iter_features",
"(",
"iter",
"(",
"src",
")",
")",
":",
"yield",
"feature",
"except",
"IOError",
":",
"coords",
"=",
"list",
"(",
"coords_from_query",
"(",
"feature_like",
")",
")",
"yield",
"{",
"'type'",
":",
"'Feature'",
",",
"'properties'",
":",
"{",
"}",
",",
"'geometry'",
":",
"{",
"'type'",
":",
"'Point'",
",",
"'coordinates'",
":",
"coords",
"}",
"}"
] |
Click callback that normalizes feature input values.
Returns a generator over features from the input value.
Parameters
----------
ctx: a Click context
param: the name of the argument or option
value: object
The value argument may be one of the following:
1. A list of paths to files containing GeoJSON feature
collections or feature sequences.
2. A list of string-encoded coordinate pairs of the form
"[lng, lat]", or "lng, lat", or "lng lat".
If no value is provided, features will be read from stdin.
|
[
"Click",
"callback",
"that",
"normalizes",
"feature",
"input",
"values",
"."
] |
train
|
https://github.com/mapbox/cligj/blob/1815692d99abfb4bc4b2d0411f67fa568f112c05/cligj/features.py#L8-L39
|
mapbox/cligj
|
cligj/features.py
|
iter_features
|
def iter_features(geojsonfile, func=None):
"""Extract GeoJSON features from a text file object.
Given a file-like object containing a single GeoJSON feature
collection text or a sequence of GeoJSON features, iter_features()
iterates over lines of the file and yields GeoJSON features.
Parameters
----------
geojsonfile: a file-like object
The geojsonfile implements the iterator protocol and yields
lines of JSON text.
func: function, optional
A function that will be applied to each extracted feature. It
takes a feature object and may return a replacement feature or
None -- in which case iter_features does not yield.
"""
func = func or (lambda x: x)
first_line = next(geojsonfile)
# Does the geojsonfile contain RS-delimited JSON sequences?
if first_line.startswith(u'\x1e'):
text_buffer = first_line.strip(u'\x1e')
for line in geojsonfile:
if line.startswith(u'\x1e'):
if text_buffer:
obj = json.loads(text_buffer)
if 'coordinates' in obj:
obj = to_feature(obj)
newfeat = func(obj)
if newfeat:
yield newfeat
text_buffer = line.strip(u'\x1e')
else:
text_buffer += line
# complete our parsing with a for-else clause.
else:
obj = json.loads(text_buffer)
if 'coordinates' in obj:
obj = to_feature(obj)
newfeat = func(obj)
if newfeat:
yield newfeat
# If not, it may contains LF-delimited GeoJSON objects or a single
# multi-line pretty-printed GeoJSON object.
else:
# Try to parse LF-delimited sequences of features or feature
# collections produced by, e.g., `jq -c ...`.
try:
obj = json.loads(first_line)
if obj['type'] == 'Feature':
newfeat = func(obj)
if newfeat:
yield newfeat
for line in geojsonfile:
newfeat = func(json.loads(line))
if newfeat:
yield newfeat
elif obj['type'] == 'FeatureCollection':
for feat in obj['features']:
newfeat = func(feat)
if newfeat:
yield newfeat
elif 'coordinates' in obj:
newfeat = func(to_feature(obj))
if newfeat:
yield newfeat
for line in geojsonfile:
newfeat = func(to_feature(json.loads(line)))
if newfeat:
yield newfeat
# Indented or pretty-printed GeoJSON features or feature
# collections will fail out of the try clause above since
# they'll have no complete JSON object on their first line.
# To handle these, we slurp in the entire file and parse its
# text.
except ValueError:
text = "".join(chain([first_line], geojsonfile))
obj = json.loads(text)
if obj['type'] == 'Feature':
newfeat = func(obj)
if newfeat:
yield newfeat
elif obj['type'] == 'FeatureCollection':
for feat in obj['features']:
newfeat = func(feat)
if newfeat:
yield newfeat
elif 'coordinates' in obj:
newfeat = func(to_feature(obj))
if newfeat:
yield newfeat
|
python
|
def iter_features(geojsonfile, func=None):
func = func or (lambda x: x)
first_line = next(geojsonfile)
if first_line.startswith(u'\x1e'):
text_buffer = first_line.strip(u'\x1e')
for line in geojsonfile:
if line.startswith(u'\x1e'):
if text_buffer:
obj = json.loads(text_buffer)
if 'coordinates' in obj:
obj = to_feature(obj)
newfeat = func(obj)
if newfeat:
yield newfeat
text_buffer = line.strip(u'\x1e')
else:
text_buffer += line
else:
obj = json.loads(text_buffer)
if 'coordinates' in obj:
obj = to_feature(obj)
newfeat = func(obj)
if newfeat:
yield newfeat
else:
try:
obj = json.loads(first_line)
if obj['type'] == 'Feature':
newfeat = func(obj)
if newfeat:
yield newfeat
for line in geojsonfile:
newfeat = func(json.loads(line))
if newfeat:
yield newfeat
elif obj['type'] == 'FeatureCollection':
for feat in obj['features']:
newfeat = func(feat)
if newfeat:
yield newfeat
elif 'coordinates' in obj:
newfeat = func(to_feature(obj))
if newfeat:
yield newfeat
for line in geojsonfile:
newfeat = func(to_feature(json.loads(line)))
if newfeat:
yield newfeat
except ValueError:
text = "".join(chain([first_line], geojsonfile))
obj = json.loads(text)
if obj['type'] == 'Feature':
newfeat = func(obj)
if newfeat:
yield newfeat
elif obj['type'] == 'FeatureCollection':
for feat in obj['features']:
newfeat = func(feat)
if newfeat:
yield newfeat
elif 'coordinates' in obj:
newfeat = func(to_feature(obj))
if newfeat:
yield newfeat
|
[
"def",
"iter_features",
"(",
"geojsonfile",
",",
"func",
"=",
"None",
")",
":",
"func",
"=",
"func",
"or",
"(",
"lambda",
"x",
":",
"x",
")",
"first_line",
"=",
"next",
"(",
"geojsonfile",
")",
"# Does the geojsonfile contain RS-delimited JSON sequences?",
"if",
"first_line",
".",
"startswith",
"(",
"u'\\x1e'",
")",
":",
"text_buffer",
"=",
"first_line",
".",
"strip",
"(",
"u'\\x1e'",
")",
"for",
"line",
"in",
"geojsonfile",
":",
"if",
"line",
".",
"startswith",
"(",
"u'\\x1e'",
")",
":",
"if",
"text_buffer",
":",
"obj",
"=",
"json",
".",
"loads",
"(",
"text_buffer",
")",
"if",
"'coordinates'",
"in",
"obj",
":",
"obj",
"=",
"to_feature",
"(",
"obj",
")",
"newfeat",
"=",
"func",
"(",
"obj",
")",
"if",
"newfeat",
":",
"yield",
"newfeat",
"text_buffer",
"=",
"line",
".",
"strip",
"(",
"u'\\x1e'",
")",
"else",
":",
"text_buffer",
"+=",
"line",
"# complete our parsing with a for-else clause.",
"else",
":",
"obj",
"=",
"json",
".",
"loads",
"(",
"text_buffer",
")",
"if",
"'coordinates'",
"in",
"obj",
":",
"obj",
"=",
"to_feature",
"(",
"obj",
")",
"newfeat",
"=",
"func",
"(",
"obj",
")",
"if",
"newfeat",
":",
"yield",
"newfeat",
"# If not, it may contains LF-delimited GeoJSON objects or a single",
"# multi-line pretty-printed GeoJSON object.",
"else",
":",
"# Try to parse LF-delimited sequences of features or feature",
"# collections produced by, e.g., `jq -c ...`.",
"try",
":",
"obj",
"=",
"json",
".",
"loads",
"(",
"first_line",
")",
"if",
"obj",
"[",
"'type'",
"]",
"==",
"'Feature'",
":",
"newfeat",
"=",
"func",
"(",
"obj",
")",
"if",
"newfeat",
":",
"yield",
"newfeat",
"for",
"line",
"in",
"geojsonfile",
":",
"newfeat",
"=",
"func",
"(",
"json",
".",
"loads",
"(",
"line",
")",
")",
"if",
"newfeat",
":",
"yield",
"newfeat",
"elif",
"obj",
"[",
"'type'",
"]",
"==",
"'FeatureCollection'",
":",
"for",
"feat",
"in",
"obj",
"[",
"'features'",
"]",
":",
"newfeat",
"=",
"func",
"(",
"feat",
")",
"if",
"newfeat",
":",
"yield",
"newfeat",
"elif",
"'coordinates'",
"in",
"obj",
":",
"newfeat",
"=",
"func",
"(",
"to_feature",
"(",
"obj",
")",
")",
"if",
"newfeat",
":",
"yield",
"newfeat",
"for",
"line",
"in",
"geojsonfile",
":",
"newfeat",
"=",
"func",
"(",
"to_feature",
"(",
"json",
".",
"loads",
"(",
"line",
")",
")",
")",
"if",
"newfeat",
":",
"yield",
"newfeat",
"# Indented or pretty-printed GeoJSON features or feature",
"# collections will fail out of the try clause above since",
"# they'll have no complete JSON object on their first line.",
"# To handle these, we slurp in the entire file and parse its",
"# text.",
"except",
"ValueError",
":",
"text",
"=",
"\"\"",
".",
"join",
"(",
"chain",
"(",
"[",
"first_line",
"]",
",",
"geojsonfile",
")",
")",
"obj",
"=",
"json",
".",
"loads",
"(",
"text",
")",
"if",
"obj",
"[",
"'type'",
"]",
"==",
"'Feature'",
":",
"newfeat",
"=",
"func",
"(",
"obj",
")",
"if",
"newfeat",
":",
"yield",
"newfeat",
"elif",
"obj",
"[",
"'type'",
"]",
"==",
"'FeatureCollection'",
":",
"for",
"feat",
"in",
"obj",
"[",
"'features'",
"]",
":",
"newfeat",
"=",
"func",
"(",
"feat",
")",
"if",
"newfeat",
":",
"yield",
"newfeat",
"elif",
"'coordinates'",
"in",
"obj",
":",
"newfeat",
"=",
"func",
"(",
"to_feature",
"(",
"obj",
")",
")",
"if",
"newfeat",
":",
"yield",
"newfeat"
] |
Extract GeoJSON features from a text file object.
Given a file-like object containing a single GeoJSON feature
collection text or a sequence of GeoJSON features, iter_features()
iterates over lines of the file and yields GeoJSON features.
Parameters
----------
geojsonfile: a file-like object
The geojsonfile implements the iterator protocol and yields
lines of JSON text.
func: function, optional
A function that will be applied to each extracted feature. It
takes a feature object and may return a replacement feature or
None -- in which case iter_features does not yield.
|
[
"Extract",
"GeoJSON",
"features",
"from",
"a",
"text",
"file",
"object",
"."
] |
train
|
https://github.com/mapbox/cligj/blob/1815692d99abfb4bc4b2d0411f67fa568f112c05/cligj/features.py#L42-L135
|
mapbox/cligj
|
cligj/features.py
|
iter_query
|
def iter_query(query):
"""Accept a filename, stream, or string.
Returns an iterator over lines of the query."""
try:
itr = click.open_file(query).readlines()
except IOError:
itr = [query]
return itr
|
python
|
def iter_query(query):
try:
itr = click.open_file(query).readlines()
except IOError:
itr = [query]
return itr
|
[
"def",
"iter_query",
"(",
"query",
")",
":",
"try",
":",
"itr",
"=",
"click",
".",
"open_file",
"(",
"query",
")",
".",
"readlines",
"(",
")",
"except",
"IOError",
":",
"itr",
"=",
"[",
"query",
"]",
"return",
"itr"
] |
Accept a filename, stream, or string.
Returns an iterator over lines of the query.
|
[
"Accept",
"a",
"filename",
"stream",
"or",
"string",
".",
"Returns",
"an",
"iterator",
"over",
"lines",
"of",
"the",
"query",
"."
] |
train
|
https://github.com/mapbox/cligj/blob/1815692d99abfb4bc4b2d0411f67fa568f112c05/cligj/features.py#L154-L161
|
mapbox/cligj
|
cligj/features.py
|
coords_from_query
|
def coords_from_query(query):
"""Transform a query line into a (lng, lat) pair of coordinates."""
try:
coords = json.loads(query)
except ValueError:
query = query.replace(',', ' ')
vals = query.split()
coords = [float(v) for v in vals]
return tuple(coords[:2])
|
python
|
def coords_from_query(query):
try:
coords = json.loads(query)
except ValueError:
query = query.replace(',', ' ')
vals = query.split()
coords = [float(v) for v in vals]
return tuple(coords[:2])
|
[
"def",
"coords_from_query",
"(",
"query",
")",
":",
"try",
":",
"coords",
"=",
"json",
".",
"loads",
"(",
"query",
")",
"except",
"ValueError",
":",
"query",
"=",
"query",
".",
"replace",
"(",
"','",
",",
"' '",
")",
"vals",
"=",
"query",
".",
"split",
"(",
")",
"coords",
"=",
"[",
"float",
"(",
"v",
")",
"for",
"v",
"in",
"vals",
"]",
"return",
"tuple",
"(",
"coords",
"[",
":",
"2",
"]",
")"
] |
Transform a query line into a (lng, lat) pair of coordinates.
|
[
"Transform",
"a",
"query",
"line",
"into",
"a",
"(",
"lng",
"lat",
")",
"pair",
"of",
"coordinates",
"."
] |
train
|
https://github.com/mapbox/cligj/blob/1815692d99abfb4bc4b2d0411f67fa568f112c05/cligj/features.py#L164-L172
|
mapbox/cligj
|
cligj/features.py
|
normalize_feature_objects
|
def normalize_feature_objects(feature_objs):
"""Takes an iterable of GeoJSON-like Feature mappings or
an iterable of objects with a geo interface and
normalizes it to the former."""
for obj in feature_objs:
if hasattr(obj, "__geo_interface__") and \
'type' in obj.__geo_interface__.keys() and \
obj.__geo_interface__['type'] == 'Feature':
yield obj.__geo_interface__
elif isinstance(obj, dict) and 'type' in obj and \
obj['type'] == 'Feature':
yield obj
else:
raise ValueError("Did not recognize object {0}"
"as GeoJSON Feature".format(obj))
|
python
|
def normalize_feature_objects(feature_objs):
for obj in feature_objs:
if hasattr(obj, "__geo_interface__") and \
'type' in obj.__geo_interface__.keys() and \
obj.__geo_interface__['type'] == 'Feature':
yield obj.__geo_interface__
elif isinstance(obj, dict) and 'type' in obj and \
obj['type'] == 'Feature':
yield obj
else:
raise ValueError("Did not recognize object {0}"
"as GeoJSON Feature".format(obj))
|
[
"def",
"normalize_feature_objects",
"(",
"feature_objs",
")",
":",
"for",
"obj",
"in",
"feature_objs",
":",
"if",
"hasattr",
"(",
"obj",
",",
"\"__geo_interface__\"",
")",
"and",
"'type'",
"in",
"obj",
".",
"__geo_interface__",
".",
"keys",
"(",
")",
"and",
"obj",
".",
"__geo_interface__",
"[",
"'type'",
"]",
"==",
"'Feature'",
":",
"yield",
"obj",
".",
"__geo_interface__",
"elif",
"isinstance",
"(",
"obj",
",",
"dict",
")",
"and",
"'type'",
"in",
"obj",
"and",
"obj",
"[",
"'type'",
"]",
"==",
"'Feature'",
":",
"yield",
"obj",
"else",
":",
"raise",
"ValueError",
"(",
"\"Did not recognize object {0}\"",
"\"as GeoJSON Feature\"",
".",
"format",
"(",
"obj",
")",
")"
] |
Takes an iterable of GeoJSON-like Feature mappings or
an iterable of objects with a geo interface and
normalizes it to the former.
|
[
"Takes",
"an",
"iterable",
"of",
"GeoJSON",
"-",
"like",
"Feature",
"mappings",
"or",
"an",
"iterable",
"of",
"objects",
"with",
"a",
"geo",
"interface",
"and",
"normalizes",
"it",
"to",
"the",
"former",
"."
] |
train
|
https://github.com/mapbox/cligj/blob/1815692d99abfb4bc4b2d0411f67fa568f112c05/cligj/features.py#L175-L189
|
ludeeus/pytraccar
|
pytraccar/api.py
|
API.api
|
async def api(self, endpoint, params=None, test=False):
"""Comunicate with the API."""
data = {}
url = "{}/{}".format(self._api, endpoint)
try:
async with async_timeout.timeout(8, loop=self._loop):
response = await self._session.get(
url, auth=self._auth, headers=HEADERS, params=params
)
if response.status == 200:
self._authenticated = True
self._connected = True
if not test:
data = await response.json()
elif response.status == 401:
self._authenticated = False
self._connected = True
except asyncio.TimeoutError as error:
self._authenticated, self._connected = False, False
if not test:
_LOGGER.warning("Timeouterror connecting to Traccar, %s", error)
except aiohttp.ClientError as error:
self._authenticated, self._connected = False, False
if not test:
_LOGGER.warning("Error connecting to Traccar, %s", error)
except socket.gaierror as error:
self._authenticated, self._connected = False, False
if not test:
_LOGGER.warning("Error connecting to Traccar, %s", error)
except TypeError as error:
self._authenticated, self._connected = False, False
if not test:
_LOGGER.warning("Error connecting to Traccar, %s", error)
except Exception as error: # pylint: disable=broad-except
self._authenticated, self._connected = False, False
if not test:
_LOGGER.warning("Error connecting to Traccar, %s", error)
return data
|
python
|
async def api(self, endpoint, params=None, test=False):
data = {}
url = "{}/{}".format(self._api, endpoint)
try:
async with async_timeout.timeout(8, loop=self._loop):
response = await self._session.get(
url, auth=self._auth, headers=HEADERS, params=params
)
if response.status == 200:
self._authenticated = True
self._connected = True
if not test:
data = await response.json()
elif response.status == 401:
self._authenticated = False
self._connected = True
except asyncio.TimeoutError as error:
self._authenticated, self._connected = False, False
if not test:
_LOGGER.warning("Timeouterror connecting to Traccar, %s", error)
except aiohttp.ClientError as error:
self._authenticated, self._connected = False, False
if not test:
_LOGGER.warning("Error connecting to Traccar, %s", error)
except socket.gaierror as error:
self._authenticated, self._connected = False, False
if not test:
_LOGGER.warning("Error connecting to Traccar, %s", error)
except TypeError as error:
self._authenticated, self._connected = False, False
if not test:
_LOGGER.warning("Error connecting to Traccar, %s", error)
except Exception as error:
self._authenticated, self._connected = False, False
if not test:
_LOGGER.warning("Error connecting to Traccar, %s", error)
return data
|
[
"async",
"def",
"api",
"(",
"self",
",",
"endpoint",
",",
"params",
"=",
"None",
",",
"test",
"=",
"False",
")",
":",
"data",
"=",
"{",
"}",
"url",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"self",
".",
"_api",
",",
"endpoint",
")",
"try",
":",
"async",
"with",
"async_timeout",
".",
"timeout",
"(",
"8",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
":",
"response",
"=",
"await",
"self",
".",
"_session",
".",
"get",
"(",
"url",
",",
"auth",
"=",
"self",
".",
"_auth",
",",
"headers",
"=",
"HEADERS",
",",
"params",
"=",
"params",
")",
"if",
"response",
".",
"status",
"==",
"200",
":",
"self",
".",
"_authenticated",
"=",
"True",
"self",
".",
"_connected",
"=",
"True",
"if",
"not",
"test",
":",
"data",
"=",
"await",
"response",
".",
"json",
"(",
")",
"elif",
"response",
".",
"status",
"==",
"401",
":",
"self",
".",
"_authenticated",
"=",
"False",
"self",
".",
"_connected",
"=",
"True",
"except",
"asyncio",
".",
"TimeoutError",
"as",
"error",
":",
"self",
".",
"_authenticated",
",",
"self",
".",
"_connected",
"=",
"False",
",",
"False",
"if",
"not",
"test",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Timeouterror connecting to Traccar, %s\"",
",",
"error",
")",
"except",
"aiohttp",
".",
"ClientError",
"as",
"error",
":",
"self",
".",
"_authenticated",
",",
"self",
".",
"_connected",
"=",
"False",
",",
"False",
"if",
"not",
"test",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Error connecting to Traccar, %s\"",
",",
"error",
")",
"except",
"socket",
".",
"gaierror",
"as",
"error",
":",
"self",
".",
"_authenticated",
",",
"self",
".",
"_connected",
"=",
"False",
",",
"False",
"if",
"not",
"test",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Error connecting to Traccar, %s\"",
",",
"error",
")",
"except",
"TypeError",
"as",
"error",
":",
"self",
".",
"_authenticated",
",",
"self",
".",
"_connected",
"=",
"False",
",",
"False",
"if",
"not",
"test",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Error connecting to Traccar, %s\"",
",",
"error",
")",
"except",
"Exception",
"as",
"error",
":",
"# pylint: disable=broad-except",
"self",
".",
"_authenticated",
",",
"self",
".",
"_connected",
"=",
"False",
",",
"False",
"if",
"not",
"test",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Error connecting to Traccar, %s\"",
",",
"error",
")",
"return",
"data"
] |
Comunicate with the API.
|
[
"Comunicate",
"with",
"the",
"API",
"."
] |
train
|
https://github.com/ludeeus/pytraccar/blob/c7c635c334cc193c2da351a9fc8213d5095f77d6/pytraccar/api.py#L39-L79
|
ludeeus/pytraccar
|
pytraccar/api.py
|
API.get_device_info
|
async def get_device_info(self, custom_attributes=None):
"""Get the local installed version."""
await self.get_geofences()
await self.get_devices()
await self.get_positions()
devinfo = {}
try: # pylint: disable=too-many-nested-blocks
for dev in self._devices or []:
for pos in self._positions or []:
if pos["deviceId"] == dev.get("id"):
uid = dev.get("uniqueId")
devinfo[uid] = {}
nested = pos.get("attributes", {})
for attribute in ATTRIBUTES["position"]:
key = ATTRIBUTES["position"][attribute]
devinfo[uid][attribute] = pos[key]
for attribute in ATTRIBUTES["device"]:
key = ATTRIBUTES["device"][attribute]
devinfo[uid][attribute] = dev[key]
devinfo[uid]["battery"] = nested.get("batteryLevel")
devinfo[uid]["motion"] = nested.get("motion")
if custom_attributes is not None:
for attr in custom_attributes:
if attr in nested:
attrvalue = nested.get(attr)
devinfo[uid][attr] = attrvalue
try:
geofence = self.geofences[dev["geofenceIds"][0]]
except IndexError:
geofence = None
devinfo[uid]["geofence"] = geofence
if devinfo:
self._device_info = devinfo
else:
self._device_info = self._device_info
_LOGGER.debug(self._device_info)
except KeyError as error:
_LOGGER.error("Error combining data from Traccar, %s", error)
|
python
|
async def get_device_info(self, custom_attributes=None):
await self.get_geofences()
await self.get_devices()
await self.get_positions()
devinfo = {}
try:
for dev in self._devices or []:
for pos in self._positions or []:
if pos["deviceId"] == dev.get("id"):
uid = dev.get("uniqueId")
devinfo[uid] = {}
nested = pos.get("attributes", {})
for attribute in ATTRIBUTES["position"]:
key = ATTRIBUTES["position"][attribute]
devinfo[uid][attribute] = pos[key]
for attribute in ATTRIBUTES["device"]:
key = ATTRIBUTES["device"][attribute]
devinfo[uid][attribute] = dev[key]
devinfo[uid]["battery"] = nested.get("batteryLevel")
devinfo[uid]["motion"] = nested.get("motion")
if custom_attributes is not None:
for attr in custom_attributes:
if attr in nested:
attrvalue = nested.get(attr)
devinfo[uid][attr] = attrvalue
try:
geofence = self.geofences[dev["geofenceIds"][0]]
except IndexError:
geofence = None
devinfo[uid]["geofence"] = geofence
if devinfo:
self._device_info = devinfo
else:
self._device_info = self._device_info
_LOGGER.debug(self._device_info)
except KeyError as error:
_LOGGER.error("Error combining data from Traccar, %s", error)
|
[
"async",
"def",
"get_device_info",
"(",
"self",
",",
"custom_attributes",
"=",
"None",
")",
":",
"await",
"self",
".",
"get_geofences",
"(",
")",
"await",
"self",
".",
"get_devices",
"(",
")",
"await",
"self",
".",
"get_positions",
"(",
")",
"devinfo",
"=",
"{",
"}",
"try",
":",
"# pylint: disable=too-many-nested-blocks",
"for",
"dev",
"in",
"self",
".",
"_devices",
"or",
"[",
"]",
":",
"for",
"pos",
"in",
"self",
".",
"_positions",
"or",
"[",
"]",
":",
"if",
"pos",
"[",
"\"deviceId\"",
"]",
"==",
"dev",
".",
"get",
"(",
"\"id\"",
")",
":",
"uid",
"=",
"dev",
".",
"get",
"(",
"\"uniqueId\"",
")",
"devinfo",
"[",
"uid",
"]",
"=",
"{",
"}",
"nested",
"=",
"pos",
".",
"get",
"(",
"\"attributes\"",
",",
"{",
"}",
")",
"for",
"attribute",
"in",
"ATTRIBUTES",
"[",
"\"position\"",
"]",
":",
"key",
"=",
"ATTRIBUTES",
"[",
"\"position\"",
"]",
"[",
"attribute",
"]",
"devinfo",
"[",
"uid",
"]",
"[",
"attribute",
"]",
"=",
"pos",
"[",
"key",
"]",
"for",
"attribute",
"in",
"ATTRIBUTES",
"[",
"\"device\"",
"]",
":",
"key",
"=",
"ATTRIBUTES",
"[",
"\"device\"",
"]",
"[",
"attribute",
"]",
"devinfo",
"[",
"uid",
"]",
"[",
"attribute",
"]",
"=",
"dev",
"[",
"key",
"]",
"devinfo",
"[",
"uid",
"]",
"[",
"\"battery\"",
"]",
"=",
"nested",
".",
"get",
"(",
"\"batteryLevel\"",
")",
"devinfo",
"[",
"uid",
"]",
"[",
"\"motion\"",
"]",
"=",
"nested",
".",
"get",
"(",
"\"motion\"",
")",
"if",
"custom_attributes",
"is",
"not",
"None",
":",
"for",
"attr",
"in",
"custom_attributes",
":",
"if",
"attr",
"in",
"nested",
":",
"attrvalue",
"=",
"nested",
".",
"get",
"(",
"attr",
")",
"devinfo",
"[",
"uid",
"]",
"[",
"attr",
"]",
"=",
"attrvalue",
"try",
":",
"geofence",
"=",
"self",
".",
"geofences",
"[",
"dev",
"[",
"\"geofenceIds\"",
"]",
"[",
"0",
"]",
"]",
"except",
"IndexError",
":",
"geofence",
"=",
"None",
"devinfo",
"[",
"uid",
"]",
"[",
"\"geofence\"",
"]",
"=",
"geofence",
"if",
"devinfo",
":",
"self",
".",
"_device_info",
"=",
"devinfo",
"else",
":",
"self",
".",
"_device_info",
"=",
"self",
".",
"_device_info",
"_LOGGER",
".",
"debug",
"(",
"self",
".",
"_device_info",
")",
"except",
"KeyError",
"as",
"error",
":",
"_LOGGER",
".",
"error",
"(",
"\"Error combining data from Traccar, %s\"",
",",
"error",
")"
] |
Get the local installed version.
|
[
"Get",
"the",
"local",
"installed",
"version",
"."
] |
train
|
https://github.com/ludeeus/pytraccar/blob/c7c635c334cc193c2da351a9fc8213d5095f77d6/pytraccar/api.py#L85-L127
|
ludeeus/pytraccar
|
pytraccar/api.py
|
API.get_geofences
|
async def get_geofences(self):
"""Get the local installed version."""
data = await self.api("geofences")
if self.connected and self.authenticated:
for geofence in data or []:
self._geofences[geofence["id"]] = geofence["name"]
else:
self._geofences = self._geofences
_LOGGER.debug(self._geofences)
|
python
|
async def get_geofences(self):
data = await self.api("geofences")
if self.connected and self.authenticated:
for geofence in data or []:
self._geofences[geofence["id"]] = geofence["name"]
else:
self._geofences = self._geofences
_LOGGER.debug(self._geofences)
|
[
"async",
"def",
"get_geofences",
"(",
"self",
")",
":",
"data",
"=",
"await",
"self",
".",
"api",
"(",
"\"geofences\"",
")",
"if",
"self",
".",
"connected",
"and",
"self",
".",
"authenticated",
":",
"for",
"geofence",
"in",
"data",
"or",
"[",
"]",
":",
"self",
".",
"_geofences",
"[",
"geofence",
"[",
"\"id\"",
"]",
"]",
"=",
"geofence",
"[",
"\"name\"",
"]",
"else",
":",
"self",
".",
"_geofences",
"=",
"self",
".",
"_geofences",
"_LOGGER",
".",
"debug",
"(",
"self",
".",
"_geofences",
")"
] |
Get the local installed version.
|
[
"Get",
"the",
"local",
"installed",
"version",
"."
] |
train
|
https://github.com/ludeeus/pytraccar/blob/c7c635c334cc193c2da351a9fc8213d5095f77d6/pytraccar/api.py#L129-L137
|
ludeeus/pytraccar
|
pytraccar/api.py
|
API.get_devices
|
async def get_devices(self):
"""Get the local installed version."""
data = await self.api("devices")
if self.connected and self.authenticated:
self._devices = data
else:
self._devices = self._devices
_LOGGER.debug(self._devices)
|
python
|
async def get_devices(self):
data = await self.api("devices")
if self.connected and self.authenticated:
self._devices = data
else:
self._devices = self._devices
_LOGGER.debug(self._devices)
|
[
"async",
"def",
"get_devices",
"(",
"self",
")",
":",
"data",
"=",
"await",
"self",
".",
"api",
"(",
"\"devices\"",
")",
"if",
"self",
".",
"connected",
"and",
"self",
".",
"authenticated",
":",
"self",
".",
"_devices",
"=",
"data",
"else",
":",
"self",
".",
"_devices",
"=",
"self",
".",
"_devices",
"_LOGGER",
".",
"debug",
"(",
"self",
".",
"_devices",
")"
] |
Get the local installed version.
|
[
"Get",
"the",
"local",
"installed",
"version",
"."
] |
train
|
https://github.com/ludeeus/pytraccar/blob/c7c635c334cc193c2da351a9fc8213d5095f77d6/pytraccar/api.py#L139-L146
|
ludeeus/pytraccar
|
pytraccar/api.py
|
API.get_positions
|
async def get_positions(self):
"""Get the local installed version."""
data = await self.api("positions")
if self.connected and self.authenticated:
self._positions = data
else:
self._positions = self._positions
_LOGGER.debug(self._positions)
|
python
|
async def get_positions(self):
data = await self.api("positions")
if self.connected and self.authenticated:
self._positions = data
else:
self._positions = self._positions
_LOGGER.debug(self._positions)
|
[
"async",
"def",
"get_positions",
"(",
"self",
")",
":",
"data",
"=",
"await",
"self",
".",
"api",
"(",
"\"positions\"",
")",
"if",
"self",
".",
"connected",
"and",
"self",
".",
"authenticated",
":",
"self",
".",
"_positions",
"=",
"data",
"else",
":",
"self",
".",
"_positions",
"=",
"self",
".",
"_positions",
"_LOGGER",
".",
"debug",
"(",
"self",
".",
"_positions",
")"
] |
Get the local installed version.
|
[
"Get",
"the",
"local",
"installed",
"version",
"."
] |
train
|
https://github.com/ludeeus/pytraccar/blob/c7c635c334cc193c2da351a9fc8213d5095f77d6/pytraccar/api.py#L148-L155
|
ludeeus/pytraccar
|
pytraccar/api.py
|
API.get_events
|
async def get_events(
self, device_ids, group_ids=None, from_time=None, to_time=None, event_types=None
):
"""Get the local installed version."""
if to_time is None:
to_time = datetime.utcnow()
if from_time is None:
from_time = to_time - timedelta(seconds=EVENT_INTERVAL)
if event_types is None:
event_types = ["allEvents"]
get_params = []
get_params.extend([("deviceId", value) for value in device_ids])
if group_ids is not None:
get_params.extend([("groupId", value) for value in group_ids])
get_params.extend([("from", from_time.isoformat() + "Z")])
get_params.extend([("to", to_time.isoformat() + "Z")])
get_params.extend([("type", value) for value in event_types])
data = await self.api("reports/events", get_params)
if self.connected and self.authenticated:
self._events = data
else:
self._events = self._events
return self._events
|
python
|
async def get_events(
self, device_ids, group_ids=None, from_time=None, to_time=None, event_types=None
):
if to_time is None:
to_time = datetime.utcnow()
if from_time is None:
from_time = to_time - timedelta(seconds=EVENT_INTERVAL)
if event_types is None:
event_types = ["allEvents"]
get_params = []
get_params.extend([("deviceId", value) for value in device_ids])
if group_ids is not None:
get_params.extend([("groupId", value) for value in group_ids])
get_params.extend([("from", from_time.isoformat() + "Z")])
get_params.extend([("to", to_time.isoformat() + "Z")])
get_params.extend([("type", value) for value in event_types])
data = await self.api("reports/events", get_params)
if self.connected and self.authenticated:
self._events = data
else:
self._events = self._events
return self._events
|
[
"async",
"def",
"get_events",
"(",
"self",
",",
"device_ids",
",",
"group_ids",
"=",
"None",
",",
"from_time",
"=",
"None",
",",
"to_time",
"=",
"None",
",",
"event_types",
"=",
"None",
")",
":",
"if",
"to_time",
"is",
"None",
":",
"to_time",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"from_time",
"is",
"None",
":",
"from_time",
"=",
"to_time",
"-",
"timedelta",
"(",
"seconds",
"=",
"EVENT_INTERVAL",
")",
"if",
"event_types",
"is",
"None",
":",
"event_types",
"=",
"[",
"\"allEvents\"",
"]",
"get_params",
"=",
"[",
"]",
"get_params",
".",
"extend",
"(",
"[",
"(",
"\"deviceId\"",
",",
"value",
")",
"for",
"value",
"in",
"device_ids",
"]",
")",
"if",
"group_ids",
"is",
"not",
"None",
":",
"get_params",
".",
"extend",
"(",
"[",
"(",
"\"groupId\"",
",",
"value",
")",
"for",
"value",
"in",
"group_ids",
"]",
")",
"get_params",
".",
"extend",
"(",
"[",
"(",
"\"from\"",
",",
"from_time",
".",
"isoformat",
"(",
")",
"+",
"\"Z\"",
")",
"]",
")",
"get_params",
".",
"extend",
"(",
"[",
"(",
"\"to\"",
",",
"to_time",
".",
"isoformat",
"(",
")",
"+",
"\"Z\"",
")",
"]",
")",
"get_params",
".",
"extend",
"(",
"[",
"(",
"\"type\"",
",",
"value",
")",
"for",
"value",
"in",
"event_types",
"]",
")",
"data",
"=",
"await",
"self",
".",
"api",
"(",
"\"reports/events\"",
",",
"get_params",
")",
"if",
"self",
".",
"connected",
"and",
"self",
".",
"authenticated",
":",
"self",
".",
"_events",
"=",
"data",
"else",
":",
"self",
".",
"_events",
"=",
"self",
".",
"_events",
"return",
"self",
".",
"_events"
] |
Get the local installed version.
|
[
"Get",
"the",
"local",
"installed",
"version",
"."
] |
train
|
https://github.com/ludeeus/pytraccar/blob/c7c635c334cc193c2da351a9fc8213d5095f77d6/pytraccar/api.py#L157-L184
|
ludeeus/pytraccar
|
pytraccar/cli.py
|
runcli
|
async def runcli():
"""Debug of pytraccar."""
async with aiohttp.ClientSession() as session:
host = input("IP: ")
username = input("Username: ")
password = input("Password: ")
print("\n\n\n")
data = API(LOOP, session, username, password, host)
await data.test_connection()
print("Authenticated:", data.authenticated)
if data.authenticated:
await data.get_device_info()
print("Authentication:", data.authenticated)
print("Geofences:", data.geofences)
print("Devices:", data.devices)
print("Positions:", data.positions)
print("Device info:", data.device_info)
|
python
|
async def runcli():
async with aiohttp.ClientSession() as session:
host = input("IP: ")
username = input("Username: ")
password = input("Password: ")
print("\n\n\n")
data = API(LOOP, session, username, password, host)
await data.test_connection()
print("Authenticated:", data.authenticated)
if data.authenticated:
await data.get_device_info()
print("Authentication:", data.authenticated)
print("Geofences:", data.geofences)
print("Devices:", data.devices)
print("Positions:", data.positions)
print("Device info:", data.device_info)
|
[
"async",
"def",
"runcli",
"(",
")",
":",
"async",
"with",
"aiohttp",
".",
"ClientSession",
"(",
")",
"as",
"session",
":",
"host",
"=",
"input",
"(",
"\"IP: \"",
")",
"username",
"=",
"input",
"(",
"\"Username: \"",
")",
"password",
"=",
"input",
"(",
"\"Password: \"",
")",
"print",
"(",
"\"\\n\\n\\n\"",
")",
"data",
"=",
"API",
"(",
"LOOP",
",",
"session",
",",
"username",
",",
"password",
",",
"host",
")",
"await",
"data",
".",
"test_connection",
"(",
")",
"print",
"(",
"\"Authenticated:\"",
",",
"data",
".",
"authenticated",
")",
"if",
"data",
".",
"authenticated",
":",
"await",
"data",
".",
"get_device_info",
"(",
")",
"print",
"(",
"\"Authentication:\"",
",",
"data",
".",
"authenticated",
")",
"print",
"(",
"\"Geofences:\"",
",",
"data",
".",
"geofences",
")",
"print",
"(",
"\"Devices:\"",
",",
"data",
".",
"devices",
")",
"print",
"(",
"\"Positions:\"",
",",
"data",
".",
"positions",
")",
"print",
"(",
"\"Device info:\"",
",",
"data",
".",
"device_info",
")"
] |
Debug of pytraccar.
|
[
"Debug",
"of",
"pytraccar",
"."
] |
train
|
https://github.com/ludeeus/pytraccar/blob/c7c635c334cc193c2da351a9fc8213d5095f77d6/pytraccar/cli.py#L9-L25
|
alexdej/puzpy
|
puz.py
|
scramble_string
|
def scramble_string(s, key):
"""
s is the puzzle's solution in column-major order, omitting black squares:
i.e. if the puzzle is:
C A T
# # A
# # R
solution is CATAR
Key is a 4-digit number in the range 1000 <= key <= 9999
"""
key = key_digits(key)
for k in key: # foreach digit in the key
s = shift(s, key) # for each char by each digit in the key in sequence
s = s[k:] + s[:k] # cut the sequence around the key digit
s = shuffle(s) # do a 1:1 shuffle of the 'deck'
return s
|
python
|
def scramble_string(s, key):
key = key_digits(key)
for k in key:
s = shift(s, key)
s = s[k:] + s[:k]
s = shuffle(s)
return s
|
[
"def",
"scramble_string",
"(",
"s",
",",
"key",
")",
":",
"key",
"=",
"key_digits",
"(",
"key",
")",
"for",
"k",
"in",
"key",
":",
"# foreach digit in the key",
"s",
"=",
"shift",
"(",
"s",
",",
"key",
")",
"# for each char by each digit in the key in sequence",
"s",
"=",
"s",
"[",
"k",
":",
"]",
"+",
"s",
"[",
":",
"k",
"]",
"# cut the sequence around the key digit",
"s",
"=",
"shuffle",
"(",
"s",
")",
"# do a 1:1 shuffle of the 'deck'",
"return",
"s"
] |
s is the puzzle's solution in column-major order, omitting black squares:
i.e. if the puzzle is:
C A T
# # A
# # R
solution is CATAR
Key is a 4-digit number in the range 1000 <= key <= 9999
|
[
"s",
"is",
"the",
"puzzle",
"s",
"solution",
"in",
"column",
"-",
"major",
"order",
"omitting",
"black",
"squares",
":",
"i",
".",
"e",
".",
"if",
"the",
"puzzle",
"is",
":",
"C",
"A",
"T",
"#",
"#",
"A",
"#",
"#",
"R",
"solution",
"is",
"CATAR"
] |
train
|
https://github.com/alexdej/puzpy/blob/8906ab899845d1200ac3411b4c2a2067cffa15d7/puz.py#L618-L637
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.