body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
419c0b95d78cc0b5f031abeecd1efb5744e43f4e74c225796ef431a2d64299a3
|
def last_item(array):
'Returns the last item of an array in a list or an empty list.'
if (array.size == 0):
return []
indexer = ((slice((- 1), None),) * array.ndim)
return np.ravel(array[indexer]).tolist()
|
Returns the last item of an array in a list or an empty list.
|
openstreetmap-scraper/venv/lib/python3.7/site-packages/xarray/core/formatting.py
|
last_item
|
espoo-urban-planning/urbanplanningGAN
| 51 |
python
|
def last_item(array):
if (array.size == 0):
return []
indexer = ((slice((- 1), None),) * array.ndim)
return np.ravel(array[indexer]).tolist()
|
def last_item(array):
if (array.size == 0):
return []
indexer = ((slice((- 1), None),) * array.ndim)
return np.ravel(array[indexer]).tolist()<|docstring|>Returns the last item of an array in a list or an empty list.<|endoftext|>
|
b8ca6b0ddae4a582bece03a35eef3c23525f5d9df41e8d0e612f1e3b9fc568ed
|
def format_timestamp(t):
'Cast given object to a Timestamp and return a nicely formatted string'
try:
datetime_str = str(pd.Timestamp(t))
except OutOfBoundsDatetime:
datetime_str = str(t)
try:
(date_str, time_str) = datetime_str.split()
except ValueError:
return datetime_str
else:
if (time_str == '00:00:00'):
return date_str
else:
return ('%sT%s' % (date_str, time_str))
|
Cast given object to a Timestamp and return a nicely formatted string
|
openstreetmap-scraper/venv/lib/python3.7/site-packages/xarray/core/formatting.py
|
format_timestamp
|
espoo-urban-planning/urbanplanningGAN
| 51 |
python
|
def format_timestamp(t):
try:
datetime_str = str(pd.Timestamp(t))
except OutOfBoundsDatetime:
datetime_str = str(t)
try:
(date_str, time_str) = datetime_str.split()
except ValueError:
return datetime_str
else:
if (time_str == '00:00:00'):
return date_str
else:
return ('%sT%s' % (date_str, time_str))
|
def format_timestamp(t):
try:
datetime_str = str(pd.Timestamp(t))
except OutOfBoundsDatetime:
datetime_str = str(t)
try:
(date_str, time_str) = datetime_str.split()
except ValueError:
return datetime_str
else:
if (time_str == '00:00:00'):
return date_str
else:
return ('%sT%s' % (date_str, time_str))<|docstring|>Cast given object to a Timestamp and return a nicely formatted string<|endoftext|>
|
dc7f2ec404ad8bb2ea97ea7f2286b999068a578da77d629d9c322f0df1050701
|
def format_timedelta(t, timedelta_format=None):
'Cast given object to a Timestamp and return a nicely formatted string'
timedelta_str = str(pd.Timedelta(t))
try:
(days_str, time_str) = timedelta_str.split(' days ')
except ValueError:
return timedelta_str
else:
if (timedelta_format == 'date'):
return (days_str + ' days')
elif (timedelta_format == 'time'):
return time_str
else:
return timedelta_str
|
Cast given object to a Timestamp and return a nicely formatted string
|
openstreetmap-scraper/venv/lib/python3.7/site-packages/xarray/core/formatting.py
|
format_timedelta
|
espoo-urban-planning/urbanplanningGAN
| 51 |
python
|
def format_timedelta(t, timedelta_format=None):
timedelta_str = str(pd.Timedelta(t))
try:
(days_str, time_str) = timedelta_str.split(' days ')
except ValueError:
return timedelta_str
else:
if (timedelta_format == 'date'):
return (days_str + ' days')
elif (timedelta_format == 'time'):
return time_str
else:
return timedelta_str
|
def format_timedelta(t, timedelta_format=None):
timedelta_str = str(pd.Timedelta(t))
try:
(days_str, time_str) = timedelta_str.split(' days ')
except ValueError:
return timedelta_str
else:
if (timedelta_format == 'date'):
return (days_str + ' days')
elif (timedelta_format == 'time'):
return time_str
else:
return timedelta_str<|docstring|>Cast given object to a Timestamp and return a nicely formatted string<|endoftext|>
|
f77414ac68efa00c41d2b4b9e0444e49b906574e02e3f925284ded5130b36c9e
|
def format_item(x, timedelta_format=None, quote_strings=True):
'Returns a succinct summary of an object as a string'
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (str, bytes)):
return (repr(x) if quote_strings else x)
elif isinstance(x, (float, np.float)):
return '{0:.4}'.format(x)
else:
return str(x)
|
Returns a succinct summary of an object as a string
|
openstreetmap-scraper/venv/lib/python3.7/site-packages/xarray/core/formatting.py
|
format_item
|
espoo-urban-planning/urbanplanningGAN
| 51 |
python
|
def format_item(x, timedelta_format=None, quote_strings=True):
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (str, bytes)):
return (repr(x) if quote_strings else x)
elif isinstance(x, (float, np.float)):
return '{0:.4}'.format(x)
else:
return str(x)
|
def format_item(x, timedelta_format=None, quote_strings=True):
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (str, bytes)):
return (repr(x) if quote_strings else x)
elif isinstance(x, (float, np.float)):
return '{0:.4}'.format(x)
else:
return str(x)<|docstring|>Returns a succinct summary of an object as a string<|endoftext|>
|
edadffaa195b23f8996b08e2100f56d7023a76e25495b6b63ab54f54694f5c90
|
def format_items(x):
'Returns a succinct summaries of all items in a sequence as strings'
x = np.asarray(x)
timedelta_format = 'datetime'
if np.issubdtype(x.dtype, np.timedelta64):
x = np.asarray(x, dtype='timedelta64[ns]')
day_part = x[(~ pd.isnull(x))].astype('timedelta64[D]').astype('timedelta64[ns]')
time_needed = (x[(~ pd.isnull(x))] != day_part)
day_needed = (day_part != np.timedelta64(0, 'ns'))
if np.logical_not(day_needed).all():
timedelta_format = 'time'
elif np.logical_not(time_needed).all():
timedelta_format = 'date'
formatted = [format_item(xi, timedelta_format) for xi in x]
return formatted
|
Returns a succinct summaries of all items in a sequence as strings
|
openstreetmap-scraper/venv/lib/python3.7/site-packages/xarray/core/formatting.py
|
format_items
|
espoo-urban-planning/urbanplanningGAN
| 51 |
python
|
def format_items(x):
x = np.asarray(x)
timedelta_format = 'datetime'
if np.issubdtype(x.dtype, np.timedelta64):
x = np.asarray(x, dtype='timedelta64[ns]')
day_part = x[(~ pd.isnull(x))].astype('timedelta64[D]').astype('timedelta64[ns]')
time_needed = (x[(~ pd.isnull(x))] != day_part)
day_needed = (day_part != np.timedelta64(0, 'ns'))
if np.logical_not(day_needed).all():
timedelta_format = 'time'
elif np.logical_not(time_needed).all():
timedelta_format = 'date'
formatted = [format_item(xi, timedelta_format) for xi in x]
return formatted
|
def format_items(x):
x = np.asarray(x)
timedelta_format = 'datetime'
if np.issubdtype(x.dtype, np.timedelta64):
x = np.asarray(x, dtype='timedelta64[ns]')
day_part = x[(~ pd.isnull(x))].astype('timedelta64[D]').astype('timedelta64[ns]')
time_needed = (x[(~ pd.isnull(x))] != day_part)
day_needed = (day_part != np.timedelta64(0, 'ns'))
if np.logical_not(day_needed).all():
timedelta_format = 'time'
elif np.logical_not(time_needed).all():
timedelta_format = 'date'
formatted = [format_item(xi, timedelta_format) for xi in x]
return formatted<|docstring|>Returns a succinct summaries of all items in a sequence as strings<|endoftext|>
|
5a01dbb97a14fa72b8a901e88c228b5019eb222ce8dc3d8f8ca8f1b015fa1624
|
def format_array_flat(array, max_width):
'Return a formatted string for as many items in the flattened version of\n array that will fit within max_width characters.\n '
max_possibly_relevant = min(max(array.size, 1), max(int(np.ceil((max_width / 2.0))), 2))
relevant_front_items = format_items(first_n_items(array, ((max_possibly_relevant + 1) // 2)))
relevant_back_items = format_items(last_n_items(array, (max_possibly_relevant // 2)))
relevant_items = sum(zip_longest(relevant_front_items, reversed(relevant_back_items)), ())[:max_possibly_relevant]
cum_len = (np.cumsum([(len(s) + 1) for s in relevant_items]) - 1)
if ((array.size > 2) and ((max_possibly_relevant < array.size) or (cum_len > max_width).any())):
padding = ' ... '
count = min(array.size, max(np.argmax((((cum_len + len(padding)) - 1) > max_width)), 2))
else:
count = array.size
padding = ('' if (count <= 1) else ' ')
num_front = ((count + 1) // 2)
num_back = (count - num_front)
pprint_str = ((' '.join(relevant_front_items[:num_front]) + padding) + ' '.join(relevant_back_items[(- num_back):]))
return pprint_str
|
Return a formatted string for as many items in the flattened version of
array that will fit within max_width characters.
|
openstreetmap-scraper/venv/lib/python3.7/site-packages/xarray/core/formatting.py
|
format_array_flat
|
espoo-urban-planning/urbanplanningGAN
| 51 |
python
|
def format_array_flat(array, max_width):
'Return a formatted string for as many items in the flattened version of\n array that will fit within max_width characters.\n '
max_possibly_relevant = min(max(array.size, 1), max(int(np.ceil((max_width / 2.0))), 2))
relevant_front_items = format_items(first_n_items(array, ((max_possibly_relevant + 1) // 2)))
relevant_back_items = format_items(last_n_items(array, (max_possibly_relevant // 2)))
relevant_items = sum(zip_longest(relevant_front_items, reversed(relevant_back_items)), ())[:max_possibly_relevant]
cum_len = (np.cumsum([(len(s) + 1) for s in relevant_items]) - 1)
if ((array.size > 2) and ((max_possibly_relevant < array.size) or (cum_len > max_width).any())):
padding = ' ... '
count = min(array.size, max(np.argmax((((cum_len + len(padding)) - 1) > max_width)), 2))
else:
count = array.size
padding = ( if (count <= 1) else ' ')
num_front = ((count + 1) // 2)
num_back = (count - num_front)
pprint_str = ((' '.join(relevant_front_items[:num_front]) + padding) + ' '.join(relevant_back_items[(- num_back):]))
return pprint_str
|
def format_array_flat(array, max_width):
'Return a formatted string for as many items in the flattened version of\n array that will fit within max_width characters.\n '
max_possibly_relevant = min(max(array.size, 1), max(int(np.ceil((max_width / 2.0))), 2))
relevant_front_items = format_items(first_n_items(array, ((max_possibly_relevant + 1) // 2)))
relevant_back_items = format_items(last_n_items(array, (max_possibly_relevant // 2)))
relevant_items = sum(zip_longest(relevant_front_items, reversed(relevant_back_items)), ())[:max_possibly_relevant]
cum_len = (np.cumsum([(len(s) + 1) for s in relevant_items]) - 1)
if ((array.size > 2) and ((max_possibly_relevant < array.size) or (cum_len > max_width).any())):
padding = ' ... '
count = min(array.size, max(np.argmax((((cum_len + len(padding)) - 1) > max_width)), 2))
else:
count = array.size
padding = ( if (count <= 1) else ' ')
num_front = ((count + 1) // 2)
num_back = (count - num_front)
pprint_str = ((' '.join(relevant_front_items[:num_front]) + padding) + ' '.join(relevant_back_items[(- num_back):]))
return pprint_str<|docstring|>Return a formatted string for as many items in the flattened version of
array that will fit within max_width characters.<|endoftext|>
|
a4e2eaf5a6ccd1b80efa1aa305cb47e594ef9d66941d58c14f268dbb096240ae
|
def summarize_attr(key, value, col_width=None):
'Summary for __repr__ - use ``X.attrs[key]`` for full value.'
k_str = (' %s:' % key)
if (col_width is not None):
k_str = pretty_print(k_str, col_width)
v_str = str(value).replace('\t', '\\t').replace('\n', '\\n')
return maybe_truncate(('%s %s' % (k_str, v_str)), OPTIONS['display_width'])
|
Summary for __repr__ - use ``X.attrs[key]`` for full value.
|
openstreetmap-scraper/venv/lib/python3.7/site-packages/xarray/core/formatting.py
|
summarize_attr
|
espoo-urban-planning/urbanplanningGAN
| 51 |
python
|
def summarize_attr(key, value, col_width=None):
k_str = (' %s:' % key)
if (col_width is not None):
k_str = pretty_print(k_str, col_width)
v_str = str(value).replace('\t', '\\t').replace('\n', '\\n')
return maybe_truncate(('%s %s' % (k_str, v_str)), OPTIONS['display_width'])
|
def summarize_attr(key, value, col_width=None):
k_str = (' %s:' % key)
if (col_width is not None):
k_str = pretty_print(k_str, col_width)
v_str = str(value).replace('\t', '\\t').replace('\n', '\\n')
return maybe_truncate(('%s %s' % (k_str, v_str)), OPTIONS['display_width'])<|docstring|>Summary for __repr__ - use ``X.attrs[key]`` for full value.<|endoftext|>
|
47c4fc86f2efb4e3e40283514b96b611fc749f0fedf145c0fd5151626b71efa3
|
def _get_col_items(mapping):
'Get all column items to format, including both keys of `mapping`\n and MultiIndex levels if any.\n '
from .variable import IndexVariable
col_items = []
for (k, v) in mapping.items():
col_items.append(k)
var = getattr(v, 'variable', v)
if isinstance(var, IndexVariable):
level_names = var.to_index_variable().level_names
if (level_names is not None):
col_items += list(level_names)
return col_items
|
Get all column items to format, including both keys of `mapping`
and MultiIndex levels if any.
|
openstreetmap-scraper/venv/lib/python3.7/site-packages/xarray/core/formatting.py
|
_get_col_items
|
espoo-urban-planning/urbanplanningGAN
| 51 |
python
|
def _get_col_items(mapping):
'Get all column items to format, including both keys of `mapping`\n and MultiIndex levels if any.\n '
from .variable import IndexVariable
col_items = []
for (k, v) in mapping.items():
col_items.append(k)
var = getattr(v, 'variable', v)
if isinstance(var, IndexVariable):
level_names = var.to_index_variable().level_names
if (level_names is not None):
col_items += list(level_names)
return col_items
|
def _get_col_items(mapping):
'Get all column items to format, including both keys of `mapping`\n and MultiIndex levels if any.\n '
from .variable import IndexVariable
col_items = []
for (k, v) in mapping.items():
col_items.append(k)
var = getattr(v, 'variable', v)
if isinstance(var, IndexVariable):
level_names = var.to_index_variable().level_names
if (level_names is not None):
col_items += list(level_names)
return col_items<|docstring|>Get all column items to format, including both keys of `mapping`
and MultiIndex levels if any.<|endoftext|>
|
e3bb79a1ae9c7a610c274293d24e6fd101f70e25fd0183877c0d8273055b8b63
|
def short_dask_repr(array, show_dtype=True):
"Similar to dask.array.DataArray.__repr__, but without\n redundant information that's already printed by the repr\n function of the xarray wrapper.\n "
chunksize = tuple((c[0] for c in array.chunks))
if show_dtype:
return ('dask.array<shape=%s, dtype=%s, chunksize=%s>' % (array.shape, array.dtype, chunksize))
else:
return ('dask.array<shape=%s, chunksize=%s>' % (array.shape, chunksize))
|
Similar to dask.array.DataArray.__repr__, but without
redundant information that's already printed by the repr
function of the xarray wrapper.
|
openstreetmap-scraper/venv/lib/python3.7/site-packages/xarray/core/formatting.py
|
short_dask_repr
|
espoo-urban-planning/urbanplanningGAN
| 51 |
python
|
def short_dask_repr(array, show_dtype=True):
"Similar to dask.array.DataArray.__repr__, but without\n redundant information that's already printed by the repr\n function of the xarray wrapper.\n "
chunksize = tuple((c[0] for c in array.chunks))
if show_dtype:
return ('dask.array<shape=%s, dtype=%s, chunksize=%s>' % (array.shape, array.dtype, chunksize))
else:
return ('dask.array<shape=%s, chunksize=%s>' % (array.shape, chunksize))
|
def short_dask_repr(array, show_dtype=True):
"Similar to dask.array.DataArray.__repr__, but without\n redundant information that's already printed by the repr\n function of the xarray wrapper.\n "
chunksize = tuple((c[0] for c in array.chunks))
if show_dtype:
return ('dask.array<shape=%s, dtype=%s, chunksize=%s>' % (array.shape, array.dtype, chunksize))
else:
return ('dask.array<shape=%s, chunksize=%s>' % (array.shape, chunksize))<|docstring|>Similar to dask.array.DataArray.__repr__, but without
redundant information that's already printed by the repr
function of the xarray wrapper.<|endoftext|>
|
949a0971c4149004e0c5367c4bcfc0b88bd0289c76df769bb80ada19b6f739af
|
def get_future_positions(start=datetime(2007, 1, 5), end=datetime.today()):
'\n\n :param start: 2007-01-05 上海期货交易所最早数据\n :param end:\n :return:\n '
trade_index = get_future_calender(start=start, end=end)
target = (RAW_DATA_DIR / 'receipt/shfe')
if (not target.exists()):
target.mkdir()
file_index = None
else:
file_index = pd.to_datetime([x.name[:(- 4)] for x in target.glob('*.csv')])
if (file_index is None):
file_index = trade_index
else:
file_index = trade_index.difference(file_index)
for date in file_index:
date_str = date.strftime('%Y-%m-%d')
file_path = (target / '{}.csv'.format(date_str))
if (file_path.exists() or (date < datetime(2014, 5, 23))):
continue
table = get_receipt_from_shfe(date.strftime('%Y%m%d'))
if (len(table) != 0):
print(date)
spread_df = pd.DataFrame(table, columns=HEADER)
spread_df.to_csv(str(file_path), index=False, encoding='gb2312')
time.sleep((np.random.rand() * 90))
return None
|
:param start: 2007-01-05 上海期货交易所最早数据
:param end:
:return:
|
src/data/future/position.py
|
get_future_positions
|
newlyedward/datascinece
| 2 |
python
|
def get_future_positions(start=datetime(2007, 1, 5), end=datetime.today()):
'\n\n :param start: 2007-01-05 上海期货交易所最早数据\n :param end:\n :return:\n '
trade_index = get_future_calender(start=start, end=end)
target = (RAW_DATA_DIR / 'receipt/shfe')
if (not target.exists()):
target.mkdir()
file_index = None
else:
file_index = pd.to_datetime([x.name[:(- 4)] for x in target.glob('*.csv')])
if (file_index is None):
file_index = trade_index
else:
file_index = trade_index.difference(file_index)
for date in file_index:
date_str = date.strftime('%Y-%m-%d')
file_path = (target / '{}.csv'.format(date_str))
if (file_path.exists() or (date < datetime(2014, 5, 23))):
continue
table = get_receipt_from_shfe(date.strftime('%Y%m%d'))
if (len(table) != 0):
print(date)
spread_df = pd.DataFrame(table, columns=HEADER)
spread_df.to_csv(str(file_path), index=False, encoding='gb2312')
time.sleep((np.random.rand() * 90))
return None
|
def get_future_positions(start=datetime(2007, 1, 5), end=datetime.today()):
'\n\n :param start: 2007-01-05 上海期货交易所最早数据\n :param end:\n :return:\n '
trade_index = get_future_calender(start=start, end=end)
target = (RAW_DATA_DIR / 'receipt/shfe')
if (not target.exists()):
target.mkdir()
file_index = None
else:
file_index = pd.to_datetime([x.name[:(- 4)] for x in target.glob('*.csv')])
if (file_index is None):
file_index = trade_index
else:
file_index = trade_index.difference(file_index)
for date in file_index:
date_str = date.strftime('%Y-%m-%d')
file_path = (target / '{}.csv'.format(date_str))
if (file_path.exists() or (date < datetime(2014, 5, 23))):
continue
table = get_receipt_from_shfe(date.strftime('%Y%m%d'))
if (len(table) != 0):
print(date)
spread_df = pd.DataFrame(table, columns=HEADER)
spread_df.to_csv(str(file_path), index=False, encoding='gb2312')
time.sleep((np.random.rand() * 90))
return None<|docstring|>:param start: 2007-01-05 上海期货交易所最早数据
:param end:
:return:<|endoftext|>
|
cfa065ff5dbdeb43b9321758e8240f26855cd6d8f7c394c9c1dcb7f735e0a86b
|
def vecnorm_NDarray(v, axis=(- 1)):
'\n Vector normalisation performed along an arbitrary dimension, which by default is the last one.\n Comes with workaround by casting that to zero instead of keeping np.nan or np.inf.\n '
if (len(v.shape) > 1):
sh = list(v.shape)
sh[axis] = 1
return np.nan_to_num((v / np.linalg.norm(v, axis=axis).reshape(sh)))
else:
return np.nan_to_num((v / np.linalg.norm(v)))
|
Vector normalisation performed along an arbitrary dimension, which by default is the last one.
Comes with workaround by casting that to zero instead of keeping np.nan or np.inf.
|
calculate-S2.py
|
vecnorm_NDarray
|
zharmad/SpinRelax
| 0 |
python
|
def vecnorm_NDarray(v, axis=(- 1)):
'\n Vector normalisation performed along an arbitrary dimension, which by default is the last one.\n Comes with workaround by casting that to zero instead of keeping np.nan or np.inf.\n '
if (len(v.shape) > 1):
sh = list(v.shape)
sh[axis] = 1
return np.nan_to_num((v / np.linalg.norm(v, axis=axis).reshape(sh)))
else:
return np.nan_to_num((v / np.linalg.norm(v)))
|
def vecnorm_NDarray(v, axis=(- 1)):
'\n Vector normalisation performed along an arbitrary dimension, which by default is the last one.\n Comes with workaround by casting that to zero instead of keeping np.nan or np.inf.\n '
if (len(v.shape) > 1):
sh = list(v.shape)
sh[axis] = 1
return np.nan_to_num((v / np.linalg.norm(v, axis=axis).reshape(sh)))
else:
return np.nan_to_num((v / np.linalg.norm(v)))<|docstring|>Vector normalisation performed along an arbitrary dimension, which by default is the last one.
Comes with workaround by casting that to zero instead of keeping np.nan or np.inf.<|endoftext|>
|
1078ab8618ba74f2ae98b060ce2b1ac7e6682287ad57c07da49597492110ab0b
|
def S2_by_outerProduct(v):
'\n Two\n '
outer = np.mean([np.outer(v[i], v[i]) for i in range(len(v))], axis=0)
return ((1.5 * np.sum((outer ** 2.0))) - 0.5)
|
Two
|
calculate-S2.py
|
S2_by_outerProduct
|
zharmad/SpinRelax
| 0 |
python
|
def S2_by_outerProduct(v):
'\n \n '
outer = np.mean([np.outer(v[i], v[i]) for i in range(len(v))], axis=0)
return ((1.5 * np.sum((outer ** 2.0))) - 0.5)
|
def S2_by_outerProduct(v):
'\n \n '
outer = np.mean([np.outer(v[i], v[i]) for i in range(len(v))], axis=0)
return ((1.5 * np.sum((outer ** 2.0))) - 0.5)<|docstring|>Two<|endoftext|>
|
d57728ef648409b24231af9dadaff66c7da4974cc59d788483d59982b539a1c3
|
def calculate_S2_by_outerProduct(vecs, delta_t=(- 1), tau_memory=(- 1)):
'\n Calculates the general order parameter S2 by using the quantity 3*Sum_i,j <e_i * e_j >^2 - 1 , which is akin to P2( CosTheta )\n Expects vecs to be of dimensions (time, 3) or ( time, nResidues, 3 )\n\n This directly collapses all dimensions in two steps:\n - 1. calculate the outer product <v_i v_j >\n - 2. calculate Sum <v_i v_j>^2\n\n When both delta_t and tau_memory are given, then returns average and SEM of the S2 samples of dimensions ( nResidues, 2 )\n '
sh = vecs.shape
nDim = sh[(- 1)]
if (len(sh) == 2):
nFrames = vecs.shape[0]
if ((delta_t < 0) or (tau_memory < 0)):
tmp = (np.einsum('ij,ik->jk', vecs, vecs) / nFrames)
return ((1.5 * np.einsum('ij,ij->', tmp, tmp)) - 0.5)
else:
nFramesPerBlock = int((tau_memory / delta_t))
nBlocks = int((nFrames / nFramesPerBlock))
vecs = vecs[:(nBlocks * nFramesPerBlock)].reshape(nBlocks, nFramesPerBlock, nDim)
tmp = (np.einsum('ijk,ijl->ikl', vecs, vecs) / nFramesPerBlock)
tmp = ((1.5 * np.einsum('ijk,ijk->i', tmp, tmp)) - 0.5)
S2 = np.mean(tmp)
dS2 = (np.std(tmp) / (np.sqrt(nBlocks) - 1.0))
return np.array([S2, dS2])
elif (len(sh) == 3):
nFrames = vecs.shape[0]
nResidues = vecs.shape[1]
if ((delta_t < 0) or (tau_memory < 0)):
tmp = (np.einsum('ijk,ijl->jkl', vecs, vecs) / nFrames)
return ((1.5 * np.einsum('...ij,...ij->...', tmp, tmp)) - 0.5)
else:
nFramesPerBlock = int((tau_memory / delta_t))
nBlocks = int((nFrames / nFramesPerBlock))
vecs = vecs[:(nBlocks * nFramesPerBlock)].reshape(nBlocks, nFramesPerBlock, nResidues, nDim)
tmp = (np.einsum('ijkl,ijkm->iklm', vecs, vecs) / nFramesPerBlock)
tmp = ((1.5 * np.einsum('...ij,...ij->...', tmp, tmp)) - 0.5)
S2 = np.mean(tmp, axis=0)
dS2 = (np.std(tmp, axis=0) / (np.sqrt(nBlocks) - 1.0))
return np.stack((S2, dS2), axis=(- 1))
else:
print('= = = ERROR in calculate_S2_by_outerProduct: unsupported number of dimensions! vecs.shape: ', sh, file=sys.stderr)
sys.exit(1)
|
Calculates the general order parameter S2 by using the quantity 3*Sum_i,j <e_i * e_j >^2 - 1 , which is akin to P2( CosTheta )
Expects vecs to be of dimensions (time, 3) or ( time, nResidues, 3 )
This directly collapses all dimensions in two steps:
- 1. calculate the outer product <v_i v_j >
- 2. calculate Sum <v_i v_j>^2
When both delta_t and tau_memory are given, then returns average and SEM of the S2 samples of dimensions ( nResidues, 2 )
|
calculate-S2.py
|
calculate_S2_by_outerProduct
|
zharmad/SpinRelax
| 0 |
python
|
def calculate_S2_by_outerProduct(vecs, delta_t=(- 1), tau_memory=(- 1)):
'\n Calculates the general order parameter S2 by using the quantity 3*Sum_i,j <e_i * e_j >^2 - 1 , which is akin to P2( CosTheta )\n Expects vecs to be of dimensions (time, 3) or ( time, nResidues, 3 )\n\n This directly collapses all dimensions in two steps:\n - 1. calculate the outer product <v_i v_j >\n - 2. calculate Sum <v_i v_j>^2\n\n When both delta_t and tau_memory are given, then returns average and SEM of the S2 samples of dimensions ( nResidues, 2 )\n '
sh = vecs.shape
nDim = sh[(- 1)]
if (len(sh) == 2):
nFrames = vecs.shape[0]
if ((delta_t < 0) or (tau_memory < 0)):
tmp = (np.einsum('ij,ik->jk', vecs, vecs) / nFrames)
return ((1.5 * np.einsum('ij,ij->', tmp, tmp)) - 0.5)
else:
nFramesPerBlock = int((tau_memory / delta_t))
nBlocks = int((nFrames / nFramesPerBlock))
vecs = vecs[:(nBlocks * nFramesPerBlock)].reshape(nBlocks, nFramesPerBlock, nDim)
tmp = (np.einsum('ijk,ijl->ikl', vecs, vecs) / nFramesPerBlock)
tmp = ((1.5 * np.einsum('ijk,ijk->i', tmp, tmp)) - 0.5)
S2 = np.mean(tmp)
dS2 = (np.std(tmp) / (np.sqrt(nBlocks) - 1.0))
return np.array([S2, dS2])
elif (len(sh) == 3):
nFrames = vecs.shape[0]
nResidues = vecs.shape[1]
if ((delta_t < 0) or (tau_memory < 0)):
tmp = (np.einsum('ijk,ijl->jkl', vecs, vecs) / nFrames)
return ((1.5 * np.einsum('...ij,...ij->...', tmp, tmp)) - 0.5)
else:
nFramesPerBlock = int((tau_memory / delta_t))
nBlocks = int((nFrames / nFramesPerBlock))
vecs = vecs[:(nBlocks * nFramesPerBlock)].reshape(nBlocks, nFramesPerBlock, nResidues, nDim)
tmp = (np.einsum('ijkl,ijkm->iklm', vecs, vecs) / nFramesPerBlock)
tmp = ((1.5 * np.einsum('...ij,...ij->...', tmp, tmp)) - 0.5)
S2 = np.mean(tmp, axis=0)
dS2 = (np.std(tmp, axis=0) / (np.sqrt(nBlocks) - 1.0))
return np.stack((S2, dS2), axis=(- 1))
else:
print('= = = ERROR in calculate_S2_by_outerProduct: unsupported number of dimensions! vecs.shape: ', sh, file=sys.stderr)
sys.exit(1)
|
def calculate_S2_by_outerProduct(vecs, delta_t=(- 1), tau_memory=(- 1)):
'\n Calculates the general order parameter S2 by using the quantity 3*Sum_i,j <e_i * e_j >^2 - 1 , which is akin to P2( CosTheta )\n Expects vecs to be of dimensions (time, 3) or ( time, nResidues, 3 )\n\n This directly collapses all dimensions in two steps:\n - 1. calculate the outer product <v_i v_j >\n - 2. calculate Sum <v_i v_j>^2\n\n When both delta_t and tau_memory are given, then returns average and SEM of the S2 samples of dimensions ( nResidues, 2 )\n '
sh = vecs.shape
nDim = sh[(- 1)]
if (len(sh) == 2):
nFrames = vecs.shape[0]
if ((delta_t < 0) or (tau_memory < 0)):
tmp = (np.einsum('ij,ik->jk', vecs, vecs) / nFrames)
return ((1.5 * np.einsum('ij,ij->', tmp, tmp)) - 0.5)
else:
nFramesPerBlock = int((tau_memory / delta_t))
nBlocks = int((nFrames / nFramesPerBlock))
vecs = vecs[:(nBlocks * nFramesPerBlock)].reshape(nBlocks, nFramesPerBlock, nDim)
tmp = (np.einsum('ijk,ijl->ikl', vecs, vecs) / nFramesPerBlock)
tmp = ((1.5 * np.einsum('ijk,ijk->i', tmp, tmp)) - 0.5)
S2 = np.mean(tmp)
dS2 = (np.std(tmp) / (np.sqrt(nBlocks) - 1.0))
return np.array([S2, dS2])
elif (len(sh) == 3):
nFrames = vecs.shape[0]
nResidues = vecs.shape[1]
if ((delta_t < 0) or (tau_memory < 0)):
tmp = (np.einsum('ijk,ijl->jkl', vecs, vecs) / nFrames)
return ((1.5 * np.einsum('...ij,...ij->...', tmp, tmp)) - 0.5)
else:
nFramesPerBlock = int((tau_memory / delta_t))
nBlocks = int((nFrames / nFramesPerBlock))
vecs = vecs[:(nBlocks * nFramesPerBlock)].reshape(nBlocks, nFramesPerBlock, nResidues, nDim)
tmp = (np.einsum('ijkl,ijkm->iklm', vecs, vecs) / nFramesPerBlock)
tmp = ((1.5 * np.einsum('...ij,...ij->...', tmp, tmp)) - 0.5)
S2 = np.mean(tmp, axis=0)
dS2 = (np.std(tmp, axis=0) / (np.sqrt(nBlocks) - 1.0))
return np.stack((S2, dS2), axis=(- 1))
else:
print('= = = ERROR in calculate_S2_by_outerProduct: unsupported number of dimensions! vecs.shape: ', sh, file=sys.stderr)
sys.exit(1)<|docstring|>Calculates the general order parameter S2 by using the quantity 3*Sum_i,j <e_i * e_j >^2 - 1 , which is akin to P2( CosTheta )
Expects vecs to be of dimensions (time, 3) or ( time, nResidues, 3 )
This directly collapses all dimensions in two steps:
- 1. calculate the outer product <v_i v_j >
- 2. calculate Sum <v_i v_j>^2
When both delta_t and tau_memory are given, then returns average and SEM of the S2 samples of dimensions ( nResidues, 2 )<|endoftext|>
|
5186239ed66e2658b51d8fec7c1828ad8a0a6c3155521d5326d9421f2f03f6ec
|
def reformat_vecs_by_tau(vecs, dt, tau):
"\n This proc assumes that vecs list is N 3D-arrays in the form <Nfile>,(frames, bonds, XYZ).\n We take advantage of Palmer's iteration where the trajectory is divided into N chunks each of tau in length,\n to reformulate everything into fast 4D np.arrays of form (nchunk, frames, bonds, XYZ) so as to\n take full advantage of broadcasting.\n This will throw away additional frame data in each trajectory that does not fit into a single block of memory time tau.\n "
nFiles = len(vecs)
nFramesPerChunk = int((tau / dt))
print((' ...debug: Using %i frames per chunk based on tau/dt (%g/%g).' % (nFramesPerChunk, tau, dt)))
used_frames = np.zeros(nFiles, dtype=int)
remainders = np.zeros(nFiles, dtype=int)
for i in range(nFiles):
nFrames = vecs[i].shape[0]
used_frames[i] = (int((nFrames / nFramesPerChunk)) * nFramesPerChunk)
remainders[i] = (nFrames % nFramesPerChunk)
print((' ...Source %i divided into %i chunks. Usage rate: %g %%' % (i, (used_frames[i] / nFramesPerChunk), ((100.0 * used_frames[i]) / nFrames))))
nFramesTot = int(used_frames.sum())
out = np.zeros((nFramesTot, vecs[0].shape[1], vecs[0].shape[2]), dtype=vecs[0].dtype)
start = 0
for i in range(nFiles):
end = int((start + used_frames[i]))
endv = int(used_frames[i])
out[(start:end, ...)] = vecs[i][(0:endv, ...)]
start = end
sh = out.shape
print((' ...Done. vecs reformatted into %i chunks.' % (nFramesTot / nFramesPerChunk)))
return out.reshape(((nFramesTot / nFramesPerChunk), nFramesPerChunk, sh[(- 2)], sh[(- 1)]))
|
This proc assumes that vecs list is N 3D-arrays in the form <Nfile>,(frames, bonds, XYZ).
We take advantage of Palmer's iteration where the trajectory is divided into N chunks each of tau in length,
to reformulate everything into fast 4D np.arrays of form (nchunk, frames, bonds, XYZ) so as to
take full advantage of broadcasting.
This will throw away additional frame data in each trajectory that does not fit into a single block of memory time tau.
|
calculate-S2.py
|
reformat_vecs_by_tau
|
zharmad/SpinRelax
| 0 |
python
|
def reformat_vecs_by_tau(vecs, dt, tau):
"\n This proc assumes that vecs list is N 3D-arrays in the form <Nfile>,(frames, bonds, XYZ).\n We take advantage of Palmer's iteration where the trajectory is divided into N chunks each of tau in length,\n to reformulate everything into fast 4D np.arrays of form (nchunk, frames, bonds, XYZ) so as to\n take full advantage of broadcasting.\n This will throw away additional frame data in each trajectory that does not fit into a single block of memory time tau.\n "
nFiles = len(vecs)
nFramesPerChunk = int((tau / dt))
print((' ...debug: Using %i frames per chunk based on tau/dt (%g/%g).' % (nFramesPerChunk, tau, dt)))
used_frames = np.zeros(nFiles, dtype=int)
remainders = np.zeros(nFiles, dtype=int)
for i in range(nFiles):
nFrames = vecs[i].shape[0]
used_frames[i] = (int((nFrames / nFramesPerChunk)) * nFramesPerChunk)
remainders[i] = (nFrames % nFramesPerChunk)
print((' ...Source %i divided into %i chunks. Usage rate: %g %%' % (i, (used_frames[i] / nFramesPerChunk), ((100.0 * used_frames[i]) / nFrames))))
nFramesTot = int(used_frames.sum())
out = np.zeros((nFramesTot, vecs[0].shape[1], vecs[0].shape[2]), dtype=vecs[0].dtype)
start = 0
for i in range(nFiles):
end = int((start + used_frames[i]))
endv = int(used_frames[i])
out[(start:end, ...)] = vecs[i][(0:endv, ...)]
start = end
sh = out.shape
print((' ...Done. vecs reformatted into %i chunks.' % (nFramesTot / nFramesPerChunk)))
return out.reshape(((nFramesTot / nFramesPerChunk), nFramesPerChunk, sh[(- 2)], sh[(- 1)]))
|
def reformat_vecs_by_tau(vecs, dt, tau):
"\n This proc assumes that vecs list is N 3D-arrays in the form <Nfile>,(frames, bonds, XYZ).\n We take advantage of Palmer's iteration where the trajectory is divided into N chunks each of tau in length,\n to reformulate everything into fast 4D np.arrays of form (nchunk, frames, bonds, XYZ) so as to\n take full advantage of broadcasting.\n This will throw away additional frame data in each trajectory that does not fit into a single block of memory time tau.\n "
nFiles = len(vecs)
nFramesPerChunk = int((tau / dt))
print((' ...debug: Using %i frames per chunk based on tau/dt (%g/%g).' % (nFramesPerChunk, tau, dt)))
used_frames = np.zeros(nFiles, dtype=int)
remainders = np.zeros(nFiles, dtype=int)
for i in range(nFiles):
nFrames = vecs[i].shape[0]
used_frames[i] = (int((nFrames / nFramesPerChunk)) * nFramesPerChunk)
remainders[i] = (nFrames % nFramesPerChunk)
print((' ...Source %i divided into %i chunks. Usage rate: %g %%' % (i, (used_frames[i] / nFramesPerChunk), ((100.0 * used_frames[i]) / nFrames))))
nFramesTot = int(used_frames.sum())
out = np.zeros((nFramesTot, vecs[0].shape[1], vecs[0].shape[2]), dtype=vecs[0].dtype)
start = 0
for i in range(nFiles):
end = int((start + used_frames[i]))
endv = int(used_frames[i])
out[(start:end, ...)] = vecs[i][(0:endv, ...)]
start = end
sh = out.shape
print((' ...Done. vecs reformatted into %i chunks.' % (nFramesTot / nFramesPerChunk)))
return out.reshape(((nFramesTot / nFramesPerChunk), nFramesPerChunk, sh[(- 2)], sh[(- 1)]))<|docstring|>This proc assumes that vecs list is N 3D-arrays in the form <Nfile>,(frames, bonds, XYZ).
We take advantage of Palmer's iteration where the trajectory is divided into N chunks each of tau in length,
to reformulate everything into fast 4D np.arrays of form (nchunk, frames, bonds, XYZ) so as to
take full advantage of broadcasting.
This will throw away additional frame data in each trajectory that does not fit into a single block of memory time tau.<|endoftext|>
|
a43b58c393881f430d2c4be6045230160aa2a0a3087185a2fa08c54a9cfcdd2e
|
def get_indices_mdtraj(seltxt, top, filename):
'\n NB: A workaround for MDTraj is needed becusae the standard reader\n does not return topologies.\n '
if (seltxt == 'custom occupancy'):
pdb = md.formats.pdb.pdbstructure.PdbStructure(open(filename))
mask = [atom.get_occupancy() for atom in pdb.iter_atoms()]
inds = top.select('all')
return [inds[i] for i in range(len(mask)) if (mask[i] > 0.0)]
else:
return top.select(seltxt)
|
NB: A workaround for MDTraj is needed becusae the standard reader
does not return topologies.
|
calculate-S2.py
|
get_indices_mdtraj
|
zharmad/SpinRelax
| 0 |
python
|
def get_indices_mdtraj(seltxt, top, filename):
'\n NB: A workaround for MDTraj is needed becusae the standard reader\n does not return topologies.\n '
if (seltxt == 'custom occupancy'):
pdb = md.formats.pdb.pdbstructure.PdbStructure(open(filename))
mask = [atom.get_occupancy() for atom in pdb.iter_atoms()]
inds = top.select('all')
return [inds[i] for i in range(len(mask)) if (mask[i] > 0.0)]
else:
return top.select(seltxt)
|
def get_indices_mdtraj(seltxt, top, filename):
'\n NB: A workaround for MDTraj is needed becusae the standard reader\n does not return topologies.\n '
if (seltxt == 'custom occupancy'):
pdb = md.formats.pdb.pdbstructure.PdbStructure(open(filename))
mask = [atom.get_occupancy() for atom in pdb.iter_atoms()]
inds = top.select('all')
return [inds[i] for i in range(len(mask)) if (mask[i] > 0.0)]
else:
return top.select(seltxt)<|docstring|>NB: A workaround for MDTraj is needed becusae the standard reader
does not return topologies.<|endoftext|>
|
490b00d7ff6e5d9da91c601e99d7a62cf0ee502b2e4d7439b5cdaef3a4941735
|
def test_001_all(self):
'\n details for all workspaces\n '
ws = self.api.workspaces
ws_list = ws.list()
with ThreadPoolExecutor() as pool:
details = list(pool.map((lambda w: ws.details(workspace_id=w.workspace_id)), ws_list))
print(f'got details for {len(details)} workspaces')
|
details for all workspaces
|
tests/test_workspaces.py
|
test_001_all
|
jeokrohn/wxc_sdk
| 0 |
python
|
def test_001_all(self):
'\n \n '
ws = self.api.workspaces
ws_list = ws.list()
with ThreadPoolExecutor() as pool:
details = list(pool.map((lambda w: ws.details(workspace_id=w.workspace_id)), ws_list))
print(f'got details for {len(details)} workspaces')
|
def test_001_all(self):
'\n \n '
ws = self.api.workspaces
ws_list = ws.list()
with ThreadPoolExecutor() as pool:
details = list(pool.map((lambda w: ws.details(workspace_id=w.workspace_id)), ws_list))
print(f'got details for {len(details)} workspaces')<|docstring|>details for all workspaces<|endoftext|>
|
41a4b9ef62c03e71ab58ccbd5d778ab0b033721c99c3f6c82c435b603a35161e
|
def test_001_get_all(self):
'\n get outgoing permissions auto transfer numbers for all workspaces\n '
wsa = self.api.workspaces
tna = self.api.workspace_settings.permissions_out.transfer_numbers
targets = [ws for ws in wsa.list() if (ws.calling == CallingType.webex)]
if (not targets):
self.skipTest('Need some WxC enabled workspaces to run this test')
with ThreadPoolExecutor() as pool:
_ = list(pool.map((lambda ws: tna.read(person_id=ws.workspace_id)), targets))
print(f'outgoing permissions auto transfer numbers for {len(targets)} workspaces')
|
get outgoing permissions auto transfer numbers for all workspaces
|
tests/test_workspaces.py
|
test_001_get_all
|
jeokrohn/wxc_sdk
| 0 |
python
|
def test_001_get_all(self):
'\n \n '
wsa = self.api.workspaces
tna = self.api.workspace_settings.permissions_out.transfer_numbers
targets = [ws for ws in wsa.list() if (ws.calling == CallingType.webex)]
if (not targets):
self.skipTest('Need some WxC enabled workspaces to run this test')
with ThreadPoolExecutor() as pool:
_ = list(pool.map((lambda ws: tna.read(person_id=ws.workspace_id)), targets))
print(f'outgoing permissions auto transfer numbers for {len(targets)} workspaces')
|
def test_001_get_all(self):
'\n \n '
wsa = self.api.workspaces
tna = self.api.workspace_settings.permissions_out.transfer_numbers
targets = [ws for ws in wsa.list() if (ws.calling == CallingType.webex)]
if (not targets):
self.skipTest('Need some WxC enabled workspaces to run this test')
with ThreadPoolExecutor() as pool:
_ = list(pool.map((lambda ws: tna.read(person_id=ws.workspace_id)), targets))
print(f'outgoing permissions auto transfer numbers for {len(targets)} workspaces')<|docstring|>get outgoing permissions auto transfer numbers for all workspaces<|endoftext|>
|
cbfb2796377f13e6f873844ca4df03f8a145c7d84c9be48bc9b8e82965c9e623
|
@contextmanager
def target_ws_context(self, use_custom_enabled: bool=True) -> Workspace:
'\n pick a random workspace and make sure that the outgoing permission settings are restored\n\n :return:\n '
po = self.api.workspace_settings.permissions_out
targets = [ws for ws in self.api.workspaces.list() if (ws.calling == CallingType.webex)]
if (not targets):
self.skipTest('Need some WxC enabled workspaces to run this test')
random.shuffle(targets)
po_settings = None
target_ws = next((ws for ws in targets if (use_custom_enabled or (not (po_settings := po.read(person_id=ws.workspace_id)).use_custom_enabled))), None)
if (target_ws is None):
self.skipTest('No WxC enabled workspace with use_custom_enabled==False')
if (po_settings is None):
po_settings = po.read(person_id=target_ws.workspace_id)
try:
if use_custom_enabled:
po.configure(person_id=target_ws.workspace_id, settings=OutgoingPermissions(use_custom_enabled=use_custom_enabled))
(yield target_ws)
finally:
if use_custom_enabled:
po.configure(person_id=target_ws.workspace_id, settings=po_settings)
po_restored = po.read(person_id=target_ws.workspace_id)
self.assertEqual(po_settings, po_restored)
|
pick a random workspace and make sure that the outgoing permission settings are restored
:return:
|
tests/test_workspaces.py
|
target_ws_context
|
jeokrohn/wxc_sdk
| 0 |
python
|
@contextmanager
def target_ws_context(self, use_custom_enabled: bool=True) -> Workspace:
'\n pick a random workspace and make sure that the outgoing permission settings are restored\n\n :return:\n '
po = self.api.workspace_settings.permissions_out
targets = [ws for ws in self.api.workspaces.list() if (ws.calling == CallingType.webex)]
if (not targets):
self.skipTest('Need some WxC enabled workspaces to run this test')
random.shuffle(targets)
po_settings = None
target_ws = next((ws for ws in targets if (use_custom_enabled or (not (po_settings := po.read(person_id=ws.workspace_id)).use_custom_enabled))), None)
if (target_ws is None):
self.skipTest('No WxC enabled workspace with use_custom_enabled==False')
if (po_settings is None):
po_settings = po.read(person_id=target_ws.workspace_id)
try:
if use_custom_enabled:
po.configure(person_id=target_ws.workspace_id, settings=OutgoingPermissions(use_custom_enabled=use_custom_enabled))
(yield target_ws)
finally:
if use_custom_enabled:
po.configure(person_id=target_ws.workspace_id, settings=po_settings)
po_restored = po.read(person_id=target_ws.workspace_id)
self.assertEqual(po_settings, po_restored)
|
@contextmanager
def target_ws_context(self, use_custom_enabled: bool=True) -> Workspace:
'\n pick a random workspace and make sure that the outgoing permission settings are restored\n\n :return:\n '
po = self.api.workspace_settings.permissions_out
targets = [ws for ws in self.api.workspaces.list() if (ws.calling == CallingType.webex)]
if (not targets):
self.skipTest('Need some WxC enabled workspaces to run this test')
random.shuffle(targets)
po_settings = None
target_ws = next((ws for ws in targets if (use_custom_enabled or (not (po_settings := po.read(person_id=ws.workspace_id)).use_custom_enabled))), None)
if (target_ws is None):
self.skipTest('No WxC enabled workspace with use_custom_enabled==False')
if (po_settings is None):
po_settings = po.read(person_id=target_ws.workspace_id)
try:
if use_custom_enabled:
po.configure(person_id=target_ws.workspace_id, settings=OutgoingPermissions(use_custom_enabled=use_custom_enabled))
(yield target_ws)
finally:
if use_custom_enabled:
po.configure(person_id=target_ws.workspace_id, settings=po_settings)
po_restored = po.read(person_id=target_ws.workspace_id)
self.assertEqual(po_settings, po_restored)<|docstring|>pick a random workspace and make sure that the outgoing permission settings are restored
:return:<|endoftext|>
|
a0cd84330aee36c41ad4ac3761cf21ebe394c22755e6f700a132fec7aef9df8b
|
def test_002_update_wo_custom_enabled(self):
'\n updating auto transfer numbers requires use_custom_enabled to be set\n :return:\n '
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context(use_custom_enabled=False) as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
update = numbers.copy(deep=True)
transfer = f'+4961007739{random.randint(0, 999):03}'
update.auto_transfer_number1 = transfer
tna.configure(person_id=target_ws.workspace_id, settings=update)
updated = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, updated)
finally:
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
|
updating auto transfer numbers requires use_custom_enabled to be set
:return:
|
tests/test_workspaces.py
|
test_002_update_wo_custom_enabled
|
jeokrohn/wxc_sdk
| 0 |
python
|
def test_002_update_wo_custom_enabled(self):
'\n updating auto transfer numbers requires use_custom_enabled to be set\n :return:\n '
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context(use_custom_enabled=False) as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
update = numbers.copy(deep=True)
transfer = f'+4961007739{random.randint(0, 999):03}'
update.auto_transfer_number1 = transfer
tna.configure(person_id=target_ws.workspace_id, settings=update)
updated = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, updated)
finally:
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
|
def test_002_update_wo_custom_enabled(self):
'\n updating auto transfer numbers requires use_custom_enabled to be set\n :return:\n '
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context(use_custom_enabled=False) as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
update = numbers.copy(deep=True)
transfer = f'+4961007739{random.randint(0, 999):03}'
update.auto_transfer_number1 = transfer
tna.configure(person_id=target_ws.workspace_id, settings=update)
updated = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, updated)
finally:
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)<|docstring|>updating auto transfer numbers requires use_custom_enabled to be set
:return:<|endoftext|>
|
0da738c3a5a3c61e3782785787b6f48f7f85cb9e14f393cb1f4b4defc4936d90
|
def test_003_update_one_number(self):
'\n try to update auto transfer numbers for a workspace\n '
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context() as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
update = numbers.copy(deep=True)
transfer = f'+496100773{random.randint(0, 9999):03}'
update.auto_transfer_number1 = transfer
tna.configure(person_id=target_ws.workspace_id, settings=update)
updated = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(transfer, updated.auto_transfer_number1.replace('-', ''))
updated.auto_transfer_number1 = numbers.auto_transfer_number1
self.assertEqual(numbers, updated)
finally:
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
|
try to update auto transfer numbers for a workspace
|
tests/test_workspaces.py
|
test_003_update_one_number
|
jeokrohn/wxc_sdk
| 0 |
python
|
def test_003_update_one_number(self):
'\n \n '
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context() as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
update = numbers.copy(deep=True)
transfer = f'+496100773{random.randint(0, 9999):03}'
update.auto_transfer_number1 = transfer
tna.configure(person_id=target_ws.workspace_id, settings=update)
updated = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(transfer, updated.auto_transfer_number1.replace('-', ))
updated.auto_transfer_number1 = numbers.auto_transfer_number1
self.assertEqual(numbers, updated)
finally:
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
|
def test_003_update_one_number(self):
'\n \n '
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context() as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
update = numbers.copy(deep=True)
transfer = f'+496100773{random.randint(0, 9999):03}'
update.auto_transfer_number1 = transfer
tna.configure(person_id=target_ws.workspace_id, settings=update)
updated = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(transfer, updated.auto_transfer_number1.replace('-', ))
updated.auto_transfer_number1 = numbers.auto_transfer_number1
self.assertEqual(numbers, updated)
finally:
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)<|docstring|>try to update auto transfer numbers for a workspace<|endoftext|>
|
46d52075f4a1de4cbf8e421505fa6a4ec3253043e973328a9d8a18396d5267f7
|
def test_002_update_one_number_no_effect_on_other_numbers(self):
"\n try to update auto transfer numbers for a workspace. Verify that updating a single number doesn't affect the\n other numbers\n "
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context() as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
all_numbers_set = AutoTransferNumbers(auto_transfer_number1='+4961007738001', auto_transfer_number2='+4961007738002', auto_transfer_number3='+4961007738003')
tna.configure(person_id=target_ws.workspace_id, settings=all_numbers_set)
all_numbers_set = tna.read(person_id=target_ws.workspace_id)
transfer = f'+496100773{random.randint(0, 9999):03}'
update = AutoTransferNumbers(auto_transfer_number1=transfer)
tna.configure(person_id=target_ws.workspace_id, settings=update)
updated = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(transfer, updated.auto_transfer_number1.replace('-', ''))
updated.auto_transfer_number1 = all_numbers_set.auto_transfer_number1
self.assertEqual(all_numbers_set, updated)
finally:
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
|
try to update auto transfer numbers for a workspace. Verify that updating a single number doesn't affect the
other numbers
|
tests/test_workspaces.py
|
test_002_update_one_number_no_effect_on_other_numbers
|
jeokrohn/wxc_sdk
| 0 |
python
|
def test_002_update_one_number_no_effect_on_other_numbers(self):
"\n try to update auto transfer numbers for a workspace. Verify that updating a single number doesn't affect the\n other numbers\n "
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context() as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
all_numbers_set = AutoTransferNumbers(auto_transfer_number1='+4961007738001', auto_transfer_number2='+4961007738002', auto_transfer_number3='+4961007738003')
tna.configure(person_id=target_ws.workspace_id, settings=all_numbers_set)
all_numbers_set = tna.read(person_id=target_ws.workspace_id)
transfer = f'+496100773{random.randint(0, 9999):03}'
update = AutoTransferNumbers(auto_transfer_number1=transfer)
tna.configure(person_id=target_ws.workspace_id, settings=update)
updated = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(transfer, updated.auto_transfer_number1.replace('-', ))
updated.auto_transfer_number1 = all_numbers_set.auto_transfer_number1
self.assertEqual(all_numbers_set, updated)
finally:
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
|
def test_002_update_one_number_no_effect_on_other_numbers(self):
"\n try to update auto transfer numbers for a workspace. Verify that updating a single number doesn't affect the\n other numbers\n "
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context() as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
all_numbers_set = AutoTransferNumbers(auto_transfer_number1='+4961007738001', auto_transfer_number2='+4961007738002', auto_transfer_number3='+4961007738003')
tna.configure(person_id=target_ws.workspace_id, settings=all_numbers_set)
all_numbers_set = tna.read(person_id=target_ws.workspace_id)
transfer = f'+496100773{random.randint(0, 9999):03}'
update = AutoTransferNumbers(auto_transfer_number1=transfer)
tna.configure(person_id=target_ws.workspace_id, settings=update)
updated = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(transfer, updated.auto_transfer_number1.replace('-', ))
updated.auto_transfer_number1 = all_numbers_set.auto_transfer_number1
self.assertEqual(all_numbers_set, updated)
finally:
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)<|docstring|>try to update auto transfer numbers for a workspace. Verify that updating a single number doesn't affect the
other numbers<|endoftext|>
|
1db3959081391609056cf9d7fe4f4ccf4c8569b90a0653906751a0892591b6d6
|
def test_001_trivial(self):
'\n create workspace with minimal settings\n '
ws = self.api.workspaces
name = next(self.new_names())
settings = Workspace.create(display_name=name)
workspace = ws.create(settings=settings)
print(f'new worksspace: {workspace.json()}')
self.assertEqual(name, workspace.display_name)
|
create workspace with minimal settings
|
tests/test_workspaces.py
|
test_001_trivial
|
jeokrohn/wxc_sdk
| 0 |
python
|
def test_001_trivial(self):
'\n \n '
ws = self.api.workspaces
name = next(self.new_names())
settings = Workspace.create(display_name=name)
workspace = ws.create(settings=settings)
print(f'new worksspace: {workspace.json()}')
self.assertEqual(name, workspace.display_name)
|
def test_001_trivial(self):
'\n \n '
ws = self.api.workspaces
name = next(self.new_names())
settings = Workspace.create(display_name=name)
workspace = ws.create(settings=settings)
print(f'new worksspace: {workspace.json()}')
self.assertEqual(name, workspace.display_name)<|docstring|>create workspace with minimal settings<|endoftext|>
|
a07b693da37725b55a7eab62788948dbdf406e078da6046dae7a8f1806ffae82
|
def test_002_edge_for_devices(self):
'\n create workspace with edge_for_devices\n '
ws = self.api.workspaces
name = next(self.new_names())
settings = Workspace(display_name=name, calling=CallingType.edge_for_devices)
workspace = ws.create(settings=settings)
print(f'new worksspace: {workspace.json()}')
self.assertEqual(name, workspace.display_name)
|
create workspace with edge_for_devices
|
tests/test_workspaces.py
|
test_002_edge_for_devices
|
jeokrohn/wxc_sdk
| 0 |
python
|
def test_002_edge_for_devices(self):
'\n \n '
ws = self.api.workspaces
name = next(self.new_names())
settings = Workspace(display_name=name, calling=CallingType.edge_for_devices)
workspace = ws.create(settings=settings)
print(f'new worksspace: {workspace.json()}')
self.assertEqual(name, workspace.display_name)
|
def test_002_edge_for_devices(self):
'\n \n '
ws = self.api.workspaces
name = next(self.new_names())
settings = Workspace(display_name=name, calling=CallingType.edge_for_devices)
workspace = ws.create(settings=settings)
print(f'new worksspace: {workspace.json()}')
self.assertEqual(name, workspace.display_name)<|docstring|>create workspace with edge_for_devices<|endoftext|>
|
2d35c10818891e32a9bbba40fc7e9b92376f3e6c1a7140bbc0efa12e9fa34960
|
def test_003_change_name_full(self):
'\n change name of a workspace, full settings\n '
ws = self.api.workspaces
with self.target(no_edge=True) as target_ws:
target_ws: Workspace
settings: Workspace = target_ws.copy(deep=True)
new_name = next(self.new_names())
settings.display_name = new_name
after = ws.update(workspace_id=target_ws.workspace_id, settings=settings)
self.assertEqual(new_name, after.display_name)
|
change name of a workspace, full settings
|
tests/test_workspaces.py
|
test_003_change_name_full
|
jeokrohn/wxc_sdk
| 0 |
python
|
def test_003_change_name_full(self):
'\n \n '
ws = self.api.workspaces
with self.target(no_edge=True) as target_ws:
target_ws: Workspace
settings: Workspace = target_ws.copy(deep=True)
new_name = next(self.new_names())
settings.display_name = new_name
after = ws.update(workspace_id=target_ws.workspace_id, settings=settings)
self.assertEqual(new_name, after.display_name)
|
def test_003_change_name_full(self):
'\n \n '
ws = self.api.workspaces
with self.target(no_edge=True) as target_ws:
target_ws: Workspace
settings: Workspace = target_ws.copy(deep=True)
new_name = next(self.new_names())
settings.display_name = new_name
after = ws.update(workspace_id=target_ws.workspace_id, settings=settings)
self.assertEqual(new_name, after.display_name)<|docstring|>change name of a workspace, full settings<|endoftext|>
|
298301aed3a7278e1fc1c6b61b9949bdfd08caa3d4a10de9e533f28413348355
|
def test_004_change_name_name_only(self):
'\n change name of a workspace, only name update\n '
ws = self.api.workspaces
with self.target(no_edge=True) as target_ws:
target_ws: Workspace
new_name = next(self.new_names())
settings = Workspace(display_name=new_name)
after = ws.update(workspace_id=target_ws.workspace_id, settings=settings)
self.assertEqual(new_name, after.display_name)
|
change name of a workspace, only name update
|
tests/test_workspaces.py
|
test_004_change_name_name_only
|
jeokrohn/wxc_sdk
| 0 |
python
|
def test_004_change_name_name_only(self):
'\n \n '
ws = self.api.workspaces
with self.target(no_edge=True) as target_ws:
target_ws: Workspace
new_name = next(self.new_names())
settings = Workspace(display_name=new_name)
after = ws.update(workspace_id=target_ws.workspace_id, settings=settings)
self.assertEqual(new_name, after.display_name)
|
def test_004_change_name_name_only(self):
'\n \n '
ws = self.api.workspaces
with self.target(no_edge=True) as target_ws:
target_ws: Workspace
new_name = next(self.new_names())
settings = Workspace(display_name=new_name)
after = ws.update(workspace_id=target_ws.workspace_id, settings=settings)
self.assertEqual(new_name, after.display_name)<|docstring|>change name of a workspace, only name update<|endoftext|>
|
d53dbeda749999521a50e8213f296218407701a08732755611598553abaf1766
|
def test_001_delete_one(self):
'\n delete a random workspace\n '
ws = self.api.workspaces
ws_list = list(ws.list(display_name=TEST_WORKSPACES_PREFIX))
if (not ws_list):
self.skipTest('No test workspace to delete')
target = random.choice(ws_list)
ws.delete_workspace(workspace_id=target.workspace_id)
with self.assertRaises(RestError) as exc:
ws.details(workspace_id=target.workspace_id)
rest_error: RestError = exc.exception
self.assertEqual(404, rest_error.response.status_code)
|
delete a random workspace
|
tests/test_workspaces.py
|
test_001_delete_one
|
jeokrohn/wxc_sdk
| 0 |
python
|
def test_001_delete_one(self):
'\n \n '
ws = self.api.workspaces
ws_list = list(ws.list(display_name=TEST_WORKSPACES_PREFIX))
if (not ws_list):
self.skipTest('No test workspace to delete')
target = random.choice(ws_list)
ws.delete_workspace(workspace_id=target.workspace_id)
with self.assertRaises(RestError) as exc:
ws.details(workspace_id=target.workspace_id)
rest_error: RestError = exc.exception
self.assertEqual(404, rest_error.response.status_code)
|
def test_001_delete_one(self):
'\n \n '
ws = self.api.workspaces
ws_list = list(ws.list(display_name=TEST_WORKSPACES_PREFIX))
if (not ws_list):
self.skipTest('No test workspace to delete')
target = random.choice(ws_list)
ws.delete_workspace(workspace_id=target.workspace_id)
with self.assertRaises(RestError) as exc:
ws.details(workspace_id=target.workspace_id)
rest_error: RestError = exc.exception
self.assertEqual(404, rest_error.response.status_code)<|docstring|>delete a random workspace<|endoftext|>
|
91c50ce14732741668e5605aa9df20d450c96b6b377d06d3ce87bb52884f589c
|
def get_native_batch(self, stage: str, loader: Union[(str, int)]=0, data_index: int=0):
'Returns a batch from experiment loader\n\n Args:\n stage (str): stage name\n loader (Union[str, int]): loader name or its index,\n default is the first loader\n data_index (int): index in dataset from the loader\n '
loaders = self.get_loaders(stage)
if isinstance(loader, str):
_loader = loaders[loader]
elif isinstance(loader, int):
_loader = list(loaders.values())[loader]
else:
raise TypeError('Loader parameter must be a string or an integer')
dataset = _loader.dataset
collate_fn = _loader.collate_fn
sample = collate_fn([dataset[data_index]])
return sample
|
Returns a batch from experiment loader
Args:
stage (str): stage name
loader (Union[str, int]): loader name or its index,
default is the first loader
data_index (int): index in dataset from the loader
|
catalyst_rl/dl/core/experiment.py
|
get_native_batch
|
rhololkeolke/catalyst-rl
| 46 |
python
|
def get_native_batch(self, stage: str, loader: Union[(str, int)]=0, data_index: int=0):
'Returns a batch from experiment loader\n\n Args:\n stage (str): stage name\n loader (Union[str, int]): loader name or its index,\n default is the first loader\n data_index (int): index in dataset from the loader\n '
loaders = self.get_loaders(stage)
if isinstance(loader, str):
_loader = loaders[loader]
elif isinstance(loader, int):
_loader = list(loaders.values())[loader]
else:
raise TypeError('Loader parameter must be a string or an integer')
dataset = _loader.dataset
collate_fn = _loader.collate_fn
sample = collate_fn([dataset[data_index]])
return sample
|
def get_native_batch(self, stage: str, loader: Union[(str, int)]=0, data_index: int=0):
'Returns a batch from experiment loader\n\n Args:\n stage (str): stage name\n loader (Union[str, int]): loader name or its index,\n default is the first loader\n data_index (int): index in dataset from the loader\n '
loaders = self.get_loaders(stage)
if isinstance(loader, str):
_loader = loaders[loader]
elif isinstance(loader, int):
_loader = list(loaders.values())[loader]
else:
raise TypeError('Loader parameter must be a string or an integer')
dataset = _loader.dataset
collate_fn = _loader.collate_fn
sample = collate_fn([dataset[data_index]])
return sample<|docstring|>Returns a batch from experiment loader
Args:
stage (str): stage name
loader (Union[str, int]): loader name or its index,
default is the first loader
data_index (int): index in dataset from the loader<|endoftext|>
|
0903f8c1acfcd0b8194829c037d9219acdcf0551a2e524deb2304e271db3e227
|
def main():
'\n Extracts various features from a cv2 image and returns them in a list.\n :param None\n :return: A tuple containing two lists: containing the most similar and the least similar blobs wrt the query blob.\n '
parser = argparse.ArgumentParser()
parser.add_argument('--blob_set', required=True, help='The path to the blob_set directory')
parser.add_argument('--query_blob', required=True, help='The file path to the query blob')
args = parser.parse_args()
blob_set_dir = args.blob_set
query_blob_path = args.query_blob
query_blob = cv2.imread(query_blob_path)
query_features = blob_feature_extractor(img=query_blob)
blob_img_paths = [blob_path for blob_path in os.listdir(blob_set_dir) if blob_path.endswith('.png')]
blob_set_feature_vectors = {}
for blob_path in blob_img_paths:
blob = cv2.imread(('blob_set/' + blob_path))
feature_vector = blob_feature_extractor(img=blob)
blob_set_feature_vectors[os.path.basename(blob_path)] = feature_vector
print('The extracted features of the query blob are: Area, Perimeter, Circularity, Aspect Ratio respectively \n', query_features)
most_similar = []
least_similar = []
for (blob, feature_vector) in blob_set_feature_vectors.items():
if (((query_features[0][0] - 100) <= feature_vector[0][0]) and ((query_features[0][0] + 100) >= feature_vector[0][0])):
if (((query_features[0][2] - 0.15) <= feature_vector[0][2]) and ((query_features[0][2] + 0.15) >= feature_vector[0][2])):
if (((query_features[0][3] - 0.3) <= feature_vector[0][3]) and ((query_features[0][3] + 0.3) >= feature_vector[0][3])):
most_similar.append(blob)
else:
least_similar.append(blob)
return (most_similar, least_similar)
|
Extracts various features from a cv2 image and returns them in a list.
:param None
:return: A tuple containing two lists: containing the most similar and the least similar blobs wrt the query blob.
|
q1_init.py
|
main
|
NJNischal/Blob-Detection-and-Classification
| 0 |
python
|
def main():
'\n Extracts various features from a cv2 image and returns them in a list.\n :param None\n :return: A tuple containing two lists: containing the most similar and the least similar blobs wrt the query blob.\n '
parser = argparse.ArgumentParser()
parser.add_argument('--blob_set', required=True, help='The path to the blob_set directory')
parser.add_argument('--query_blob', required=True, help='The file path to the query blob')
args = parser.parse_args()
blob_set_dir = args.blob_set
query_blob_path = args.query_blob
query_blob = cv2.imread(query_blob_path)
query_features = blob_feature_extractor(img=query_blob)
blob_img_paths = [blob_path for blob_path in os.listdir(blob_set_dir) if blob_path.endswith('.png')]
blob_set_feature_vectors = {}
for blob_path in blob_img_paths:
blob = cv2.imread(('blob_set/' + blob_path))
feature_vector = blob_feature_extractor(img=blob)
blob_set_feature_vectors[os.path.basename(blob_path)] = feature_vector
print('The extracted features of the query blob are: Area, Perimeter, Circularity, Aspect Ratio respectively \n', query_features)
most_similar = []
least_similar = []
for (blob, feature_vector) in blob_set_feature_vectors.items():
if (((query_features[0][0] - 100) <= feature_vector[0][0]) and ((query_features[0][0] + 100) >= feature_vector[0][0])):
if (((query_features[0][2] - 0.15) <= feature_vector[0][2]) and ((query_features[0][2] + 0.15) >= feature_vector[0][2])):
if (((query_features[0][3] - 0.3) <= feature_vector[0][3]) and ((query_features[0][3] + 0.3) >= feature_vector[0][3])):
most_similar.append(blob)
else:
least_similar.append(blob)
return (most_similar, least_similar)
|
def main():
'\n Extracts various features from a cv2 image and returns them in a list.\n :param None\n :return: A tuple containing two lists: containing the most similar and the least similar blobs wrt the query blob.\n '
parser = argparse.ArgumentParser()
parser.add_argument('--blob_set', required=True, help='The path to the blob_set directory')
parser.add_argument('--query_blob', required=True, help='The file path to the query blob')
args = parser.parse_args()
blob_set_dir = args.blob_set
query_blob_path = args.query_blob
query_blob = cv2.imread(query_blob_path)
query_features = blob_feature_extractor(img=query_blob)
blob_img_paths = [blob_path for blob_path in os.listdir(blob_set_dir) if blob_path.endswith('.png')]
blob_set_feature_vectors = {}
for blob_path in blob_img_paths:
blob = cv2.imread(('blob_set/' + blob_path))
feature_vector = blob_feature_extractor(img=blob)
blob_set_feature_vectors[os.path.basename(blob_path)] = feature_vector
print('The extracted features of the query blob are: Area, Perimeter, Circularity, Aspect Ratio respectively \n', query_features)
most_similar = []
least_similar = []
for (blob, feature_vector) in blob_set_feature_vectors.items():
if (((query_features[0][0] - 100) <= feature_vector[0][0]) and ((query_features[0][0] + 100) >= feature_vector[0][0])):
if (((query_features[0][2] - 0.15) <= feature_vector[0][2]) and ((query_features[0][2] + 0.15) >= feature_vector[0][2])):
if (((query_features[0][3] - 0.3) <= feature_vector[0][3]) and ((query_features[0][3] + 0.3) >= feature_vector[0][3])):
most_similar.append(blob)
else:
least_similar.append(blob)
return (most_similar, least_similar)<|docstring|>Extracts various features from a cv2 image and returns them in a list.
:param None
:return: A tuple containing two lists: containing the most similar and the least similar blobs wrt the query blob.<|endoftext|>
|
f557b0da163b232ea0ba87f1f8cff8580d134c53a019fa749a45128a3e6b0d32
|
def blob_feature_extractor(img):
'\n Extracts various features from a cv2 image and returns them in a list.\n :param img: A cv2 image of a blob.\n :return: A list where each element represents a unique feature in the image.\n '
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(ret, thresh) = cv2.threshold(imgray, 127, 255, 0)
(image, contours, hierarchy) = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
(ret, thresh) = cv2.threshold(imgray, 127, 255, 0)
(_, contours, hierarchy) = cv2.findContours(thresh, 1, 2)
cnt = contours[0]
area = cv2.contourArea(cnt)
perimeter = cv2.arcLength(cnt, True)
circularity = (((4 * math.pi) * area) / (perimeter ** 2))
Convexity = cv2.isContourConvex(cnt)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
aspectR = (math.sqrt((((box[0][0] - box[1][0]) ** 2) + ((box[0][1] - box[1][1]) ** 2))) / math.sqrt((((box[2][0] - box[1][0]) ** 2) + ((box[2][1] - box[1][1]) ** 2))))
values = [area, perimeter, circularity, aspectR]
feature_vector = []
feature_vector.append(values)
return feature_vector
|
Extracts various features from a cv2 image and returns them in a list.
:param img: A cv2 image of a blob.
:return: A list where each element represents a unique feature in the image.
|
q1_init.py
|
blob_feature_extractor
|
NJNischal/Blob-Detection-and-Classification
| 0 |
python
|
def blob_feature_extractor(img):
'\n Extracts various features from a cv2 image and returns them in a list.\n :param img: A cv2 image of a blob.\n :return: A list where each element represents a unique feature in the image.\n '
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(ret, thresh) = cv2.threshold(imgray, 127, 255, 0)
(image, contours, hierarchy) = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
(ret, thresh) = cv2.threshold(imgray, 127, 255, 0)
(_, contours, hierarchy) = cv2.findContours(thresh, 1, 2)
cnt = contours[0]
area = cv2.contourArea(cnt)
perimeter = cv2.arcLength(cnt, True)
circularity = (((4 * math.pi) * area) / (perimeter ** 2))
Convexity = cv2.isContourConvex(cnt)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
aspectR = (math.sqrt((((box[0][0] - box[1][0]) ** 2) + ((box[0][1] - box[1][1]) ** 2))) / math.sqrt((((box[2][0] - box[1][0]) ** 2) + ((box[2][1] - box[1][1]) ** 2))))
values = [area, perimeter, circularity, aspectR]
feature_vector = []
feature_vector.append(values)
return feature_vector
|
def blob_feature_extractor(img):
'\n Extracts various features from a cv2 image and returns them in a list.\n :param img: A cv2 image of a blob.\n :return: A list where each element represents a unique feature in the image.\n '
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(ret, thresh) = cv2.threshold(imgray, 127, 255, 0)
(image, contours, hierarchy) = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
(ret, thresh) = cv2.threshold(imgray, 127, 255, 0)
(_, contours, hierarchy) = cv2.findContours(thresh, 1, 2)
cnt = contours[0]
area = cv2.contourArea(cnt)
perimeter = cv2.arcLength(cnt, True)
circularity = (((4 * math.pi) * area) / (perimeter ** 2))
Convexity = cv2.isContourConvex(cnt)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
aspectR = (math.sqrt((((box[0][0] - box[1][0]) ** 2) + ((box[0][1] - box[1][1]) ** 2))) / math.sqrt((((box[2][0] - box[1][0]) ** 2) + ((box[2][1] - box[1][1]) ** 2))))
values = [area, perimeter, circularity, aspectR]
feature_vector = []
feature_vector.append(values)
return feature_vector<|docstring|>Extracts various features from a cv2 image and returns them in a list.
:param img: A cv2 image of a blob.
:return: A list where each element represents a unique feature in the image.<|endoftext|>
|
0065dbb3540b9a2e2a8dacc0d14b51b7c8fa1c933ff3caca8ed6848fca5e2e97
|
@staticmethod
def get_environment(async_=False, ENVS={}):
'retrieve singleton jinja2 environment.'
from jinja2 import ChoiceLoader, DictLoader, Environment, FileSystemLoader
if (async_ in ENVS):
return ENVS[async_]
ENVS[async_] = ENVIRONMENT = Environment(enable_async=async_, loader=ChoiceLoader([DictLoader({}), FileSystemLoader('.')]), cache_size=0, undefined=Weave.Undefined, finalize=Weave.Finalize())
return ENVIRONMENT
|
retrieve singleton jinja2 environment.
|
src/pidgy/weave.py
|
get_environment
|
bollwyvl/pidgy
| 25 |
python
|
@staticmethod
def get_environment(async_=False, ENVS={}):
from jinja2 import ChoiceLoader, DictLoader, Environment, FileSystemLoader
if (async_ in ENVS):
return ENVS[async_]
ENVS[async_] = ENVIRONMENT = Environment(enable_async=async_, loader=ChoiceLoader([DictLoader({}), FileSystemLoader('.')]), cache_size=0, undefined=Weave.Undefined, finalize=Weave.Finalize())
return ENVIRONMENT
|
@staticmethod
def get_environment(async_=False, ENVS={}):
from jinja2 import ChoiceLoader, DictLoader, Environment, FileSystemLoader
if (async_ in ENVS):
return ENVS[async_]
ENVS[async_] = ENVIRONMENT = Environment(enable_async=async_, loader=ChoiceLoader([DictLoader({}), FileSystemLoader('.')]), cache_size=0, undefined=Weave.Undefined, finalize=Weave.Finalize())
return ENVIRONMENT<|docstring|>retrieve singleton jinja2 environment.<|endoftext|>
|
0ca20433729e18144f8441b353642c9770914332bb52dab7bbb93167d16995a1
|
def normalize(self, type, object, metadata) -> str:
'normalize and object with (mime)type and return a string.'
from .utils import get_decoded, get_minified
if ((type == 'text/html') or ('svg' in type)):
object = get_minified(object)
if type.startswith('image'):
(width, height) = (metadata.get(type, {}).get('width'), metadata.get(type, {}).get('height'))
object = get_decoded(object)
object = f"""<img src="data:image/{type.partition('/')[2]};base64,{object}"/>"""
return object
|
normalize and object with (mime)type and return a string.
|
src/pidgy/weave.py
|
normalize
|
bollwyvl/pidgy
| 25 |
python
|
def normalize(self, type, object, metadata) -> str:
from .utils import get_decoded, get_minified
if ((type == 'text/html') or ('svg' in type)):
object = get_minified(object)
if type.startswith('image'):
(width, height) = (metadata.get(type, {}).get('width'), metadata.get(type, {}).get('height'))
object = get_decoded(object)
object = f"<img src="data:image/{type.partition('/')[2]};base64,{object}"/>"
return object
|
def normalize(self, type, object, metadata) -> str:
from .utils import get_decoded, get_minified
if ((type == 'text/html') or ('svg' in type)):
object = get_minified(object)
if type.startswith('image'):
(width, height) = (metadata.get(type, {}).get('width'), metadata.get(type, {}).get('height'))
object = get_decoded(object)
object = f"<img src="data:image/{type.partition('/')[2]};base64,{object}"/>"
return object<|docstring|>normalize and object with (mime)type and return a string.<|endoftext|>
|
50aea46e0ffbcb66670c488b2dbecadf2e78ca416e2edd787c86a06a13232db0
|
def __call__(self, object):
'convert an object into a markdown/html representation'
from .utils import get_active_types
datum = get_ipython().display_formatter.format(object)
(data, metadata) = (datum if isinstance(datum, tuple) else (datum, {}))
try:
key = next(filter(data.__contains__, get_active_types(get_ipython())))
except StopIteration:
return str(object)
if (key == 'text/plain'):
return str(object)
return self.normalize(key, data[key], metadata)
|
convert an object into a markdown/html representation
|
src/pidgy/weave.py
|
__call__
|
bollwyvl/pidgy
| 25 |
python
|
def __call__(self, object):
from .utils import get_active_types
datum = get_ipython().display_formatter.format(object)
(data, metadata) = (datum if isinstance(datum, tuple) else (datum, {}))
try:
key = next(filter(data.__contains__, get_active_types(get_ipython())))
except StopIteration:
return str(object)
if (key == 'text/plain'):
return str(object)
return self.normalize(key, data[key], metadata)
|
def __call__(self, object):
from .utils import get_active_types
datum = get_ipython().display_formatter.format(object)
(data, metadata) = (datum if isinstance(datum, tuple) else (datum, {}))
try:
key = next(filter(data.__contains__, get_active_types(get_ipython())))
except StopIteration:
return str(object)
if (key == 'text/plain'):
return str(object)
return self.normalize(key, data[key], metadata)<|docstring|>convert an object into a markdown/html representation<|endoftext|>
|
11337ad32f55a1fefc4a1a5e833df2cc230dc45e184b361209dab530e6ad7f51
|
def __init__(self, fname):
'\n Enlil time-series JSON dataset.\n\n This class will read in the given JSON file that contains\n the Enlil output for a specific satellite.\n '
self.name = os.path.basename(fname).split('.')[1]
with open(fname) as f:
self.json = json.loads(f.read())
self.times = (np.datetime64('1970-01-01') + np.array(self.json['coords']['time']['data']).astype('timedelta64[s]'))
for var in ['X', 'Y', 'Z']:
setattr(self, var, np.array(self.json['data_vars'][var]['data']))
|
Enlil time-series JSON dataset.
This class will read in the given JSON file that contains
the Enlil output for a specific satellite.
|
pvw/server/evolution.py
|
__init__
|
SWxTREC/enlil-3d-server
| 0 |
python
|
def __init__(self, fname):
'\n Enlil time-series JSON dataset.\n\n This class will read in the given JSON file that contains\n the Enlil output for a specific satellite.\n '
self.name = os.path.basename(fname).split('.')[1]
with open(fname) as f:
self.json = json.loads(f.read())
self.times = (np.datetime64('1970-01-01') + np.array(self.json['coords']['time']['data']).astype('timedelta64[s]'))
for var in ['X', 'Y', 'Z']:
setattr(self, var, np.array(self.json['data_vars'][var]['data']))
|
def __init__(self, fname):
'\n Enlil time-series JSON dataset.\n\n This class will read in the given JSON file that contains\n the Enlil output for a specific satellite.\n '
self.name = os.path.basename(fname).split('.')[1]
with open(fname) as f:
self.json = json.loads(f.read())
self.times = (np.datetime64('1970-01-01') + np.array(self.json['coords']['time']['data']).astype('timedelta64[s]'))
for var in ['X', 'Y', 'Z']:
setattr(self, var, np.array(self.json['data_vars'][var]['data']))<|docstring|>Enlil time-series JSON dataset.
This class will read in the given JSON file that contains
the Enlil output for a specific satellite.<|endoftext|>
|
791c195e16a19e82a5f8376bfd416173bcbd2ad58b3866c3bfef990a8a9d7fb9
|
def get_position(self, time):
'\n Get the position at the given time.\n\n time : datetime-like\n Time of interest\n\n\n Returns\n -------\n The closest (X, Y, Z) position of the satellite to the requested time.\n '
loc = np.argmin(np.abs((np.datetime64(time) - self.times)))
return (self.X[loc], self.Y[loc], self.Z[loc])
|
Get the position at the given time.
time : datetime-like
Time of interest
Returns
-------
The closest (X, Y, Z) position of the satellite to the requested time.
|
pvw/server/evolution.py
|
get_position
|
SWxTREC/enlil-3d-server
| 0 |
python
|
def get_position(self, time):
'\n Get the position at the given time.\n\n time : datetime-like\n Time of interest\n\n\n Returns\n -------\n The closest (X, Y, Z) position of the satellite to the requested time.\n '
loc = np.argmin(np.abs((np.datetime64(time) - self.times)))
return (self.X[loc], self.Y[loc], self.Z[loc])
|
def get_position(self, time):
'\n Get the position at the given time.\n\n time : datetime-like\n Time of interest\n\n\n Returns\n -------\n The closest (X, Y, Z) position of the satellite to the requested time.\n '
loc = np.argmin(np.abs((np.datetime64(time) - self.times)))
return (self.X[loc], self.Y[loc], self.Z[loc])<|docstring|>Get the position at the given time.
time : datetime-like
Time of interest
Returns
-------
The closest (X, Y, Z) position of the satellite to the requested time.<|endoftext|>
|
195ba857d58538780254d52eba6518620a6fe64825bb9b314595f37b8a9df807
|
def get_data(self, variable):
'\n Get the data within the variable of this satellite\n\n variable : str\n Variable of interest\n\n Returns\n -------\n List of data for this satellite\n '
return self.json['data_vars'][variable]['data']
|
Get the data within the variable of this satellite
variable : str
Variable of interest
Returns
-------
List of data for this satellite
|
pvw/server/evolution.py
|
get_data
|
SWxTREC/enlil-3d-server
| 0 |
python
|
def get_data(self, variable):
'\n Get the data within the variable of this satellite\n\n variable : str\n Variable of interest\n\n Returns\n -------\n List of data for this satellite\n '
return self.json['data_vars'][variable]['data']
|
def get_data(self, variable):
'\n Get the data within the variable of this satellite\n\n variable : str\n Variable of interest\n\n Returns\n -------\n List of data for this satellite\n '
return self.json['data_vars'][variable]['data']<|docstring|>Get the data within the variable of this satellite
variable : str
Variable of interest
Returns
-------
List of data for this satellite<|endoftext|>
|
8676d124a1da355cefe7f0dd45dad4a958ed661818c1f9b6d4bf901e4aa50b32
|
def get_times(self):
'\n Get the time series within the variable of this satellite\n\n Returns\n -------\n List of times for this satellite\n '
return self.json['coords']['time']['data']
|
Get the time series within the variable of this satellite
Returns
-------
List of times for this satellite
|
pvw/server/evolution.py
|
get_times
|
SWxTREC/enlil-3d-server
| 0 |
python
|
def get_times(self):
'\n Get the time series within the variable of this satellite\n\n Returns\n -------\n List of times for this satellite\n '
return self.json['coords']['time']['data']
|
def get_times(self):
'\n Get the time series within the variable of this satellite\n\n Returns\n -------\n List of times for this satellite\n '
return self.json['coords']['time']['data']<|docstring|>Get the time series within the variable of this satellite
Returns
-------
List of times for this satellite<|endoftext|>
|
89775c24d47b5feda2bae813fc085f5ed85c9f2dab7f5e53c0ac6cf39985fba1
|
def as_latis(self):
'Create a Latis-style return for front-end use.'
ntimes = len(self.times)
timestep_data = []
for i in range(ntimes):
curr_row = [(int(self.get_times()[i]) * 1000)]
for var in ['Density', 'Vr', 'Pressure', 'T', 'Bx', 'By', 'Bz']:
curr_row.append(self.get_data(var)[i][0])
timestep_data.append(curr_row)
json_out = {f'{self.name}': {'metadata': {'time': {'units': 'milliseconds since 1970-01-01', 'length': f'{ntimes}'}, 'density': {'missing_value': '99999.99', 'description': 'Density', 'units': 'r<sup>2</sup>N/cm<sup>3</sup>'}, 'velocity': {'missing_value': '99999.99', 'description': 'Velocity', 'units': 'km/s'}, 'pressure': {'missing_value': '99999.99', 'description': 'Ram pressure', 'units': 'r<sup>2</sup>N/cm<sup>3</sup> * km<sup>2</sup>/s<sup>2</sup>'}, 'temperature': {'missing_value': '99999.99', 'description': 'Temperature', 'units': 'K'}, 'bx': {'missing_value': '99999.99', 'description': 'BX', 'units': 'nT'}, 'by': {'missing_value': '99999.99', 'description': 'BX', 'units': 'nT'}, 'bz': {'missing_value': '99999.99', 'description': 'BX', 'units': 'nT'}}, 'parameters': ['time', 'density', 'velocity', 'pressure', 'temperature', 'bx', 'by', 'bz'], 'data': timestep_data}}
return json.dumps(json_out)
|
Create a Latis-style return for front-end use.
|
pvw/server/evolution.py
|
as_latis
|
SWxTREC/enlil-3d-server
| 0 |
python
|
def as_latis(self):
ntimes = len(self.times)
timestep_data = []
for i in range(ntimes):
curr_row = [(int(self.get_times()[i]) * 1000)]
for var in ['Density', 'Vr', 'Pressure', 'T', 'Bx', 'By', 'Bz']:
curr_row.append(self.get_data(var)[i][0])
timestep_data.append(curr_row)
json_out = {f'{self.name}': {'metadata': {'time': {'units': 'milliseconds since 1970-01-01', 'length': f'{ntimes}'}, 'density': {'missing_value': '99999.99', 'description': 'Density', 'units': 'r<sup>2</sup>N/cm<sup>3</sup>'}, 'velocity': {'missing_value': '99999.99', 'description': 'Velocity', 'units': 'km/s'}, 'pressure': {'missing_value': '99999.99', 'description': 'Ram pressure', 'units': 'r<sup>2</sup>N/cm<sup>3</sup> * km<sup>2</sup>/s<sup>2</sup>'}, 'temperature': {'missing_value': '99999.99', 'description': 'Temperature', 'units': 'K'}, 'bx': {'missing_value': '99999.99', 'description': 'BX', 'units': 'nT'}, 'by': {'missing_value': '99999.99', 'description': 'BX', 'units': 'nT'}, 'bz': {'missing_value': '99999.99', 'description': 'BX', 'units': 'nT'}}, 'parameters': ['time', 'density', 'velocity', 'pressure', 'temperature', 'bx', 'by', 'bz'], 'data': timestep_data}}
return json.dumps(json_out)
|
def as_latis(self):
ntimes = len(self.times)
timestep_data = []
for i in range(ntimes):
curr_row = [(int(self.get_times()[i]) * 1000)]
for var in ['Density', 'Vr', 'Pressure', 'T', 'Bx', 'By', 'Bz']:
curr_row.append(self.get_data(var)[i][0])
timestep_data.append(curr_row)
json_out = {f'{self.name}': {'metadata': {'time': {'units': 'milliseconds since 1970-01-01', 'length': f'{ntimes}'}, 'density': {'missing_value': '99999.99', 'description': 'Density', 'units': 'r<sup>2</sup>N/cm<sup>3</sup>'}, 'velocity': {'missing_value': '99999.99', 'description': 'Velocity', 'units': 'km/s'}, 'pressure': {'missing_value': '99999.99', 'description': 'Ram pressure', 'units': 'r<sup>2</sup>N/cm<sup>3</sup> * km<sup>2</sup>/s<sup>2</sup>'}, 'temperature': {'missing_value': '99999.99', 'description': 'Temperature', 'units': 'K'}, 'bx': {'missing_value': '99999.99', 'description': 'BX', 'units': 'nT'}, 'by': {'missing_value': '99999.99', 'description': 'BX', 'units': 'nT'}, 'bz': {'missing_value': '99999.99', 'description': 'BX', 'units': 'nT'}}, 'parameters': ['time', 'density', 'velocity', 'pressure', 'temperature', 'bx', 'by', 'bz'], 'data': timestep_data}}
return json.dumps(json_out)<|docstring|>Create a Latis-style return for front-end use.<|endoftext|>
|
64af5cdc35e967a0afd3a4cbc1ba6ab44fa7e5b2aa637f5e9464912f0a6a6bc4
|
def tl_vbm_and_oae(stim, L):
'\n DEFINE ALL THE PARAMTERS HERE\n '
sheraPdat = 'StartingPoles.dat'
poles = []
for line in open(sheraPdat, 'r'):
poles.append(float(line.rstrip()))
sheraPo = np.array(poles)
irregularities = 1
opts = {}
opts['sheraPo'] = sheraPo
opts['storeflag'] = 've'
opts['probe_points'] = 'abr'
opts['Fs'] = 100000.0
opts['channels'] = np.min(stim.shape)
opts['subjectNo'] = 1
opts['sectionsNo'] = int(1000.0)
opts['output_folder'] = (os.getcwd() + '/')
opts['numH'] = 13.0
opts['numM'] = 3.0
opts['numL'] = 3.0
opts['IrrPct'] = 0.05
opts['nl'] = 'vel'
opts['L'] = L
irr_on = (irregularities * np.ones((1, opts['channels'])).astype('int'))
cochlear_list = [[cochlea_model(), stim[i], irr_on[0][i], i, opts] for i in range(opts['channels'])]
print('running human auditory model 2018: Verhulst, Altoe, Vasilkov')
p = mp.Pool(mp.cpu_count(), maxtasksperchild=1)
output = p.map(solve_one_cochlea, cochlear_list)
p.close()
p.join()
print('cochlear simulation: done')
return output
|
DEFINE ALL THE PARAMTERS HERE
|
tlmodel/get_tl_vbm_and_oae.py
|
tl_vbm_and_oae
|
deepakbaby/CoNNear_cochlea
| 10 |
python
|
def tl_vbm_and_oae(stim, L):
'\n \n '
sheraPdat = 'StartingPoles.dat'
poles = []
for line in open(sheraPdat, 'r'):
poles.append(float(line.rstrip()))
sheraPo = np.array(poles)
irregularities = 1
opts = {}
opts['sheraPo'] = sheraPo
opts['storeflag'] = 've'
opts['probe_points'] = 'abr'
opts['Fs'] = 100000.0
opts['channels'] = np.min(stim.shape)
opts['subjectNo'] = 1
opts['sectionsNo'] = int(1000.0)
opts['output_folder'] = (os.getcwd() + '/')
opts['numH'] = 13.0
opts['numM'] = 3.0
opts['numL'] = 3.0
opts['IrrPct'] = 0.05
opts['nl'] = 'vel'
opts['L'] = L
irr_on = (irregularities * np.ones((1, opts['channels'])).astype('int'))
cochlear_list = [[cochlea_model(), stim[i], irr_on[0][i], i, opts] for i in range(opts['channels'])]
print('running human auditory model 2018: Verhulst, Altoe, Vasilkov')
p = mp.Pool(mp.cpu_count(), maxtasksperchild=1)
output = p.map(solve_one_cochlea, cochlear_list)
p.close()
p.join()
print('cochlear simulation: done')
return output
|
def tl_vbm_and_oae(stim, L):
'\n \n '
sheraPdat = 'StartingPoles.dat'
poles = []
for line in open(sheraPdat, 'r'):
poles.append(float(line.rstrip()))
sheraPo = np.array(poles)
irregularities = 1
opts = {}
opts['sheraPo'] = sheraPo
opts['storeflag'] = 've'
opts['probe_points'] = 'abr'
opts['Fs'] = 100000.0
opts['channels'] = np.min(stim.shape)
opts['subjectNo'] = 1
opts['sectionsNo'] = int(1000.0)
opts['output_folder'] = (os.getcwd() + '/')
opts['numH'] = 13.0
opts['numM'] = 3.0
opts['numL'] = 3.0
opts['IrrPct'] = 0.05
opts['nl'] = 'vel'
opts['L'] = L
irr_on = (irregularities * np.ones((1, opts['channels'])).astype('int'))
cochlear_list = [[cochlea_model(), stim[i], irr_on[0][i], i, opts] for i in range(opts['channels'])]
print('running human auditory model 2018: Verhulst, Altoe, Vasilkov')
p = mp.Pool(mp.cpu_count(), maxtasksperchild=1)
output = p.map(solve_one_cochlea, cochlear_list)
p.close()
p.join()
print('cochlear simulation: done')
return output<|docstring|>DEFINE ALL THE PARAMTERS HERE<|endoftext|>
|
85d6a9b73336267f841b6752eb9a540474e28e6ecabd62ffc40aef31b4fdc3ff
|
@is_staff('Helper')
@commands.group(aliases=['autoprobation'], invoke_without_command=True, case_insensitive=True)
async def autoprobate(self, ctx):
'\n Manages auto-probation.\n on | true | 1 | enable: turns on auto-probation.\n off | false | 0 | disable: turns off auto-probation.\n To display the status of auto-probation, invoke with no subcommand.\n '
(await self.autoprobate_handler(ctx))
|
Manages auto-probation.
on | true | 1 | enable: turns on auto-probation.
off | false | 0 | disable: turns off auto-probation.
To display the status of auto-probation, invoke with no subcommand.
|
cogs/newcomers.py
|
autoprobate
|
xnoe/Kurisu
| 51 |
python
|
@is_staff('Helper')
@commands.group(aliases=['autoprobation'], invoke_without_command=True, case_insensitive=True)
async def autoprobate(self, ctx):
'\n Manages auto-probation.\n on | true | 1 | enable: turns on auto-probation.\n off | false | 0 | disable: turns off auto-probation.\n To display the status of auto-probation, invoke with no subcommand.\n '
(await self.autoprobate_handler(ctx))
|
@is_staff('Helper')
@commands.group(aliases=['autoprobation'], invoke_without_command=True, case_insensitive=True)
async def autoprobate(self, ctx):
'\n Manages auto-probation.\n on | true | 1 | enable: turns on auto-probation.\n off | false | 0 | disable: turns off auto-probation.\n To display the status of auto-probation, invoke with no subcommand.\n '
(await self.autoprobate_handler(ctx))<|docstring|>Manages auto-probation.
on | true | 1 | enable: turns on auto-probation.
off | false | 0 | disable: turns off auto-probation.
To display the status of auto-probation, invoke with no subcommand.<|endoftext|>
|
20bd0e9d615a48c3028517f34961b8ae231834563bfcf4315f3909201fa0ca84
|
@check_if_user_can_ready()
@commands.guild_only()
@commands.command(aliases=['ready'], cooldown=commands.CooldownMapping.from_cooldown(rate=1, per=300.0, type=commands.BucketType.member))
async def ncready(self, ctx, *, reason=''):
'Alerts online staff to a ready request in newcomers.'
newcomers = self.bot.channels['newcomers']
reason = reason[:300]
reason = re.sub('[^\\x20-\\x5b\\x5d-\\x7f]', '', reason)
reason = discord.utils.escape_mentions(reason)
(await ctx.message.delete())
if reason:
(await newcomers.send(f'''{ctx.author} (ID: {ctx.author.id}) is ready for unprobation.
Message: `{reason}` @here''', allowed_mentions=discord.AllowedMentions(everyone=True)))
try:
(await ctx.author.send('✅ Online staff have been notified of your request.'))
except discord.errors.Forbidden:
pass
else:
(await newcomers.send(f'{ctx.author.mention}, please run this command again with a brief message explaining your situation (e.g., `.ready hey guys, i was having trouble hacking my console`). **Copying and pasting the example will not remove your probation.**', delete_after=10))
ctx.command.reset_cooldown(ctx)
|
Alerts online staff to a ready request in newcomers.
|
cogs/newcomers.py
|
ncready
|
xnoe/Kurisu
| 51 |
python
|
@check_if_user_can_ready()
@commands.guild_only()
@commands.command(aliases=['ready'], cooldown=commands.CooldownMapping.from_cooldown(rate=1, per=300.0, type=commands.BucketType.member))
async def ncready(self, ctx, *, reason=):
newcomers = self.bot.channels['newcomers']
reason = reason[:300]
reason = re.sub('[^\\x20-\\x5b\\x5d-\\x7f]', , reason)
reason = discord.utils.escape_mentions(reason)
(await ctx.message.delete())
if reason:
(await newcomers.send(f'{ctx.author} (ID: {ctx.author.id}) is ready for unprobation.
Message: `{reason}` @here', allowed_mentions=discord.AllowedMentions(everyone=True)))
try:
(await ctx.author.send('✅ Online staff have been notified of your request.'))
except discord.errors.Forbidden:
pass
else:
(await newcomers.send(f'{ctx.author.mention}, please run this command again with a brief message explaining your situation (e.g., `.ready hey guys, i was having trouble hacking my console`). **Copying and pasting the example will not remove your probation.**', delete_after=10))
ctx.command.reset_cooldown(ctx)
|
@check_if_user_can_ready()
@commands.guild_only()
@commands.command(aliases=['ready'], cooldown=commands.CooldownMapping.from_cooldown(rate=1, per=300.0, type=commands.BucketType.member))
async def ncready(self, ctx, *, reason=):
newcomers = self.bot.channels['newcomers']
reason = reason[:300]
reason = re.sub('[^\\x20-\\x5b\\x5d-\\x7f]', , reason)
reason = discord.utils.escape_mentions(reason)
(await ctx.message.delete())
if reason:
(await newcomers.send(f'{ctx.author} (ID: {ctx.author.id}) is ready for unprobation.
Message: `{reason}` @here', allowed_mentions=discord.AllowedMentions(everyone=True)))
try:
(await ctx.author.send('✅ Online staff have been notified of your request.'))
except discord.errors.Forbidden:
pass
else:
(await newcomers.send(f'{ctx.author.mention}, please run this command again with a brief message explaining your situation (e.g., `.ready hey guys, i was having trouble hacking my console`). **Copying and pasting the example will not remove your probation.**', delete_after=10))
ctx.command.reset_cooldown(ctx)<|docstring|>Alerts online staff to a ready request in newcomers.<|endoftext|>
|
4a943787e4c72affb8d9bd5ad7164e84087619ff42299bf4b773fd906a0d3476
|
@numba.njit()
def _get_interpolation_values(xi, yi, sigma_range, mu_range, d_sigma, d_mu):
'\n Return values needed for interpolation: bilinear (2D) interpolation\n within ranges, linear (1D) if "one edge" is crossed, corner value if\n "two edges" are crossed. Defined as jitted function due to compatibility\n with numba backend.\n\n :param xi: interpolation value on x-axis, i.e. I_sigma\n :type xi: float\n :param yi: interpolation value on y-axis, i.e. I_mu\n :type yi: float\n :param sigma_range: range of x-axis, i.e. sigma values\n :type sigma_range: np.ndarray\n :param mu_range: range of y-axis, i.e. mu values\n :type mu_range: np.ndarray\n :param d_sigma: grid coarsness in the x-axis, i.e. sigma values\n :type d_sigma: float\n :param d_mu: grid coarsness in the y-axis, i.e. mu values\n :type d_mu: float\n :return: index of the lower interpolation value on x-axis, index of the\n lower interpolation value on y-axis, distance of xi to the lower\n value, distance of yi to the lower value\n :rtype: (int, int, float, float)\n '
if ((xi >= sigma_range[0]) and (xi < sigma_range[(- 1)]) and (yi >= mu_range[0]) and (yi < mu_range[(- 1)])):
xid = ((xi - sigma_range[0]) / d_sigma)
xid1 = np.floor(xid)
dxid = (xid - xid1)
yid = ((yi - mu_range[0]) / d_mu)
yid1 = np.floor(yid)
dyid = (yid - yid1)
return (int(xid1), int(yid1), dxid, dyid)
if (yi < mu_range[0]):
yid1 = 0
dyid = 0.0
if ((xi >= sigma_range[0]) and (xi < sigma_range[(- 1)])):
xid = ((xi - sigma_range[0]) / d_sigma)
xid1 = np.floor(xid)
dxid = (xid - xid1)
elif (xi < sigma_range[0]):
xid1 = 0
dxid = 0.0
else:
xid1 = (- 1)
dxid = 0.0
return (int(xid1), int(yid1), dxid, dyid)
if (yi >= mu_range[(- 1)]):
yid1 = (- 1)
dyid = 0.0
if ((xi >= sigma_range[0]) and (xi < sigma_range[(- 1)])):
xid = ((xi - sigma_range[0]) / d_sigma)
xid1 = np.floor(xid)
dxid = (xid - xid1)
elif (xi < sigma_range[0]):
xid1 = 0
dxid = 0.0
else:
xid1 = (- 1)
dxid = 0.0
return (int(xid1), int(yid1), dxid, dyid)
if (xi < sigma_range[0]):
xid1 = 0
dxid = 0.0
yid = ((yi - mu_range[0]) / d_mu)
yid1 = np.floor(yid)
dyid = (yid - yid1)
return (int(xid1), int(yid1), dxid, dyid)
if (xi >= sigma_range[(- 1)]):
xid1 = (- 1)
dxid = 0.0
yid = ((yi - mu_range[0]) / d_mu)
yid1 = np.floor(yid)
dyid = (yid - yid1)
return (int(xid1), int(yid1), dxid, dyid)
|
Return values needed for interpolation: bilinear (2D) interpolation
within ranges, linear (1D) if "one edge" is crossed, corner value if
"two edges" are crossed. Defined as jitted function due to compatibility
with numba backend.
:param xi: interpolation value on x-axis, i.e. I_sigma
:type xi: float
:param yi: interpolation value on y-axis, i.e. I_mu
:type yi: float
:param sigma_range: range of x-axis, i.e. sigma values
:type sigma_range: np.ndarray
:param mu_range: range of y-axis, i.e. mu values
:type mu_range: np.ndarray
:param d_sigma: grid coarsness in the x-axis, i.e. sigma values
:type d_sigma: float
:param d_mu: grid coarsness in the y-axis, i.e. mu values
:type d_mu: float
:return: index of the lower interpolation value on x-axis, index of the
lower interpolation value on y-axis, distance of xi to the lower
value, distance of yi to the lower value
:rtype: (int, int, float, float)
|
neurolib/models/multimodel/builder/aln.py
|
_get_interpolation_values
|
lionelkusch/neurolib
| 258 |
python
|
@numba.njit()
def _get_interpolation_values(xi, yi, sigma_range, mu_range, d_sigma, d_mu):
'\n Return values needed for interpolation: bilinear (2D) interpolation\n within ranges, linear (1D) if "one edge" is crossed, corner value if\n "two edges" are crossed. Defined as jitted function due to compatibility\n with numba backend.\n\n :param xi: interpolation value on x-axis, i.e. I_sigma\n :type xi: float\n :param yi: interpolation value on y-axis, i.e. I_mu\n :type yi: float\n :param sigma_range: range of x-axis, i.e. sigma values\n :type sigma_range: np.ndarray\n :param mu_range: range of y-axis, i.e. mu values\n :type mu_range: np.ndarray\n :param d_sigma: grid coarsness in the x-axis, i.e. sigma values\n :type d_sigma: float\n :param d_mu: grid coarsness in the y-axis, i.e. mu values\n :type d_mu: float\n :return: index of the lower interpolation value on x-axis, index of the\n lower interpolation value on y-axis, distance of xi to the lower\n value, distance of yi to the lower value\n :rtype: (int, int, float, float)\n '
if ((xi >= sigma_range[0]) and (xi < sigma_range[(- 1)]) and (yi >= mu_range[0]) and (yi < mu_range[(- 1)])):
xid = ((xi - sigma_range[0]) / d_sigma)
xid1 = np.floor(xid)
dxid = (xid - xid1)
yid = ((yi - mu_range[0]) / d_mu)
yid1 = np.floor(yid)
dyid = (yid - yid1)
return (int(xid1), int(yid1), dxid, dyid)
if (yi < mu_range[0]):
yid1 = 0
dyid = 0.0
if ((xi >= sigma_range[0]) and (xi < sigma_range[(- 1)])):
xid = ((xi - sigma_range[0]) / d_sigma)
xid1 = np.floor(xid)
dxid = (xid - xid1)
elif (xi < sigma_range[0]):
xid1 = 0
dxid = 0.0
else:
xid1 = (- 1)
dxid = 0.0
return (int(xid1), int(yid1), dxid, dyid)
if (yi >= mu_range[(- 1)]):
yid1 = (- 1)
dyid = 0.0
if ((xi >= sigma_range[0]) and (xi < sigma_range[(- 1)])):
xid = ((xi - sigma_range[0]) / d_sigma)
xid1 = np.floor(xid)
dxid = (xid - xid1)
elif (xi < sigma_range[0]):
xid1 = 0
dxid = 0.0
else:
xid1 = (- 1)
dxid = 0.0
return (int(xid1), int(yid1), dxid, dyid)
if (xi < sigma_range[0]):
xid1 = 0
dxid = 0.0
yid = ((yi - mu_range[0]) / d_mu)
yid1 = np.floor(yid)
dyid = (yid - yid1)
return (int(xid1), int(yid1), dxid, dyid)
if (xi >= sigma_range[(- 1)]):
xid1 = (- 1)
dxid = 0.0
yid = ((yi - mu_range[0]) / d_mu)
yid1 = np.floor(yid)
dyid = (yid - yid1)
return (int(xid1), int(yid1), dxid, dyid)
|
@numba.njit()
def _get_interpolation_values(xi, yi, sigma_range, mu_range, d_sigma, d_mu):
'\n Return values needed for interpolation: bilinear (2D) interpolation\n within ranges, linear (1D) if "one edge" is crossed, corner value if\n "two edges" are crossed. Defined as jitted function due to compatibility\n with numba backend.\n\n :param xi: interpolation value on x-axis, i.e. I_sigma\n :type xi: float\n :param yi: interpolation value on y-axis, i.e. I_mu\n :type yi: float\n :param sigma_range: range of x-axis, i.e. sigma values\n :type sigma_range: np.ndarray\n :param mu_range: range of y-axis, i.e. mu values\n :type mu_range: np.ndarray\n :param d_sigma: grid coarsness in the x-axis, i.e. sigma values\n :type d_sigma: float\n :param d_mu: grid coarsness in the y-axis, i.e. mu values\n :type d_mu: float\n :return: index of the lower interpolation value on x-axis, index of the\n lower interpolation value on y-axis, distance of xi to the lower\n value, distance of yi to the lower value\n :rtype: (int, int, float, float)\n '
if ((xi >= sigma_range[0]) and (xi < sigma_range[(- 1)]) and (yi >= mu_range[0]) and (yi < mu_range[(- 1)])):
xid = ((xi - sigma_range[0]) / d_sigma)
xid1 = np.floor(xid)
dxid = (xid - xid1)
yid = ((yi - mu_range[0]) / d_mu)
yid1 = np.floor(yid)
dyid = (yid - yid1)
return (int(xid1), int(yid1), dxid, dyid)
if (yi < mu_range[0]):
yid1 = 0
dyid = 0.0
if ((xi >= sigma_range[0]) and (xi < sigma_range[(- 1)])):
xid = ((xi - sigma_range[0]) / d_sigma)
xid1 = np.floor(xid)
dxid = (xid - xid1)
elif (xi < sigma_range[0]):
xid1 = 0
dxid = 0.0
else:
xid1 = (- 1)
dxid = 0.0
return (int(xid1), int(yid1), dxid, dyid)
if (yi >= mu_range[(- 1)]):
yid1 = (- 1)
dyid = 0.0
if ((xi >= sigma_range[0]) and (xi < sigma_range[(- 1)])):
xid = ((xi - sigma_range[0]) / d_sigma)
xid1 = np.floor(xid)
dxid = (xid - xid1)
elif (xi < sigma_range[0]):
xid1 = 0
dxid = 0.0
else:
xid1 = (- 1)
dxid = 0.0
return (int(xid1), int(yid1), dxid, dyid)
if (xi < sigma_range[0]):
xid1 = 0
dxid = 0.0
yid = ((yi - mu_range[0]) / d_mu)
yid1 = np.floor(yid)
dyid = (yid - yid1)
return (int(xid1), int(yid1), dxid, dyid)
if (xi >= sigma_range[(- 1)]):
xid1 = (- 1)
dxid = 0.0
yid = ((yi - mu_range[0]) / d_mu)
yid1 = np.floor(yid)
dyid = (yid - yid1)
return (int(xid1), int(yid1), dxid, dyid)<|docstring|>Return values needed for interpolation: bilinear (2D) interpolation
within ranges, linear (1D) if "one edge" is crossed, corner value if
"two edges" are crossed. Defined as jitted function due to compatibility
with numba backend.
:param xi: interpolation value on x-axis, i.e. I_sigma
:type xi: float
:param yi: interpolation value on y-axis, i.e. I_mu
:type yi: float
:param sigma_range: range of x-axis, i.e. sigma values
:type sigma_range: np.ndarray
:param mu_range: range of y-axis, i.e. mu values
:type mu_range: np.ndarray
:param d_sigma: grid coarsness in the x-axis, i.e. sigma values
:type d_sigma: float
:param d_mu: grid coarsness in the y-axis, i.e. mu values
:type d_mu: float
:return: index of the lower interpolation value on x-axis, index of the
lower interpolation value on y-axis, distance of xi to the lower
value, distance of yi to the lower value
:rtype: (int, int, float, float)<|endoftext|>
|
9f69a46e0ae6a5df06babd358e1c455f9d78c6c4313509f0943b91b23f391ea4
|
@numba.njit()
def _table_lookup(current_mu, current_sigma, sigma_range, mu_range, d_sigma, d_mu, transfer_function_table):
'\n Translate mean and std. deviation of the current to selected quantity using\n linear-nonlinear lookup table for ALN. Defined as jitted function due to\n compatibility with numba backend.\n '
(x_idx, y_idx, dx_idx, dy_idx) = _get_interpolation_values(current_sigma, current_mu, sigma_range, mu_range, d_sigma, d_mu)
return (((((transfer_function_table[(y_idx, x_idx)] * (1 - dx_idx)) * (1 - dy_idx)) + ((transfer_function_table[(y_idx, (x_idx + 1))] * dx_idx) * (1 - dy_idx))) + ((transfer_function_table[((y_idx + 1), x_idx)] * (1 - dx_idx)) * dy_idx)) + ((transfer_function_table[((y_idx + 1), (x_idx + 1))] * dx_idx) * dy_idx))
|
Translate mean and std. deviation of the current to selected quantity using
linear-nonlinear lookup table for ALN. Defined as jitted function due to
compatibility with numba backend.
|
neurolib/models/multimodel/builder/aln.py
|
_table_lookup
|
lionelkusch/neurolib
| 258 |
python
|
@numba.njit()
def _table_lookup(current_mu, current_sigma, sigma_range, mu_range, d_sigma, d_mu, transfer_function_table):
'\n Translate mean and std. deviation of the current to selected quantity using\n linear-nonlinear lookup table for ALN. Defined as jitted function due to\n compatibility with numba backend.\n '
(x_idx, y_idx, dx_idx, dy_idx) = _get_interpolation_values(current_sigma, current_mu, sigma_range, mu_range, d_sigma, d_mu)
return (((((transfer_function_table[(y_idx, x_idx)] * (1 - dx_idx)) * (1 - dy_idx)) + ((transfer_function_table[(y_idx, (x_idx + 1))] * dx_idx) * (1 - dy_idx))) + ((transfer_function_table[((y_idx + 1), x_idx)] * (1 - dx_idx)) * dy_idx)) + ((transfer_function_table[((y_idx + 1), (x_idx + 1))] * dx_idx) * dy_idx))
|
@numba.njit()
def _table_lookup(current_mu, current_sigma, sigma_range, mu_range, d_sigma, d_mu, transfer_function_table):
'\n Translate mean and std. deviation of the current to selected quantity using\n linear-nonlinear lookup table for ALN. Defined as jitted function due to\n compatibility with numba backend.\n '
(x_idx, y_idx, dx_idx, dy_idx) = _get_interpolation_values(current_sigma, current_mu, sigma_range, mu_range, d_sigma, d_mu)
return (((((transfer_function_table[(y_idx, x_idx)] * (1 - dx_idx)) * (1 - dy_idx)) + ((transfer_function_table[(y_idx, (x_idx + 1))] * dx_idx) * (1 - dy_idx))) + ((transfer_function_table[((y_idx + 1), x_idx)] * (1 - dx_idx)) * dy_idx)) + ((transfer_function_table[((y_idx + 1), (x_idx + 1))] * dx_idx) * dy_idx))<|docstring|>Translate mean and std. deviation of the current to selected quantity using
linear-nonlinear lookup table for ALN. Defined as jitted function due to
compatibility with numba backend.<|endoftext|>
|
675a13be8471c04f71c3ffe1d8087d665b0cb1726592115275478635dae1599b
|
def __init__(self, params, lin_nonlin_transfer_function_filename=None, seed=None):
'\n :param lin_nonlin_transfer_function_filename: filename for precomputed\n transfer functions of the ALN model, if None, will look for it in this\n directory\n :type lin_nonlin_transfer_function_filename: str|None\n :param seed: seed for random number generator\n :type seed: int|None\n '
super().__init__(params=params, seed=seed)
lin_nonlin_transfer_function_filename = (lin_nonlin_transfer_function_filename or os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'aln', 'aln-precalc', DEFAULT_QUANTITIES_CASCADE_FILENAME))
self._load_lin_nonlin_transfer_function(lin_nonlin_transfer_function_filename)
|
:param lin_nonlin_transfer_function_filename: filename for precomputed
transfer functions of the ALN model, if None, will look for it in this
directory
:type lin_nonlin_transfer_function_filename: str|None
:param seed: seed for random number generator
:type seed: int|None
|
neurolib/models/multimodel/builder/aln.py
|
__init__
|
lionelkusch/neurolib
| 258 |
python
|
def __init__(self, params, lin_nonlin_transfer_function_filename=None, seed=None):
'\n :param lin_nonlin_transfer_function_filename: filename for precomputed\n transfer functions of the ALN model, if None, will look for it in this\n directory\n :type lin_nonlin_transfer_function_filename: str|None\n :param seed: seed for random number generator\n :type seed: int|None\n '
super().__init__(params=params, seed=seed)
lin_nonlin_transfer_function_filename = (lin_nonlin_transfer_function_filename or os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'aln', 'aln-precalc', DEFAULT_QUANTITIES_CASCADE_FILENAME))
self._load_lin_nonlin_transfer_function(lin_nonlin_transfer_function_filename)
|
def __init__(self, params, lin_nonlin_transfer_function_filename=None, seed=None):
'\n :param lin_nonlin_transfer_function_filename: filename for precomputed\n transfer functions of the ALN model, if None, will look for it in this\n directory\n :type lin_nonlin_transfer_function_filename: str|None\n :param seed: seed for random number generator\n :type seed: int|None\n '
super().__init__(params=params, seed=seed)
lin_nonlin_transfer_function_filename = (lin_nonlin_transfer_function_filename or os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'aln', 'aln-precalc', DEFAULT_QUANTITIES_CASCADE_FILENAME))
self._load_lin_nonlin_transfer_function(lin_nonlin_transfer_function_filename)<|docstring|>:param lin_nonlin_transfer_function_filename: filename for precomputed
transfer functions of the ALN model, if None, will look for it in this
directory
:type lin_nonlin_transfer_function_filename: str|None
:param seed: seed for random number generator
:type seed: int|None<|endoftext|>
|
e79f0f98aed41f0da5329bb1a9b3dfdb90e015441bf8e8232a3a9b44db99e60d
|
def _load_lin_nonlin_transfer_function(self, filename):
'\n Load precomputed transfer functions from h5 file.\n '
logging.info(f'Loading precomputed transfer functions from {filename}')
loaded_h5 = File(filename, 'r')
self.mu_range = np.array(loaded_h5['mu_vals'])
self.d_mu = (self.mu_range[1] - self.mu_range[0])
self.sigma_range = np.array(loaded_h5['sigma_vals'])
self.d_sigma = (self.sigma_range[1] - self.sigma_range[0])
self.firing_rate_transfer_function = np.array(loaded_h5['r_ss'])
self.voltage_transfer_function = np.array(loaded_h5['V_mean_ss'])
self.tau_transfer_function = np.array(loaded_h5['tau_mu_exp'])
logging.info('All transfer functions loaded.')
loaded_h5.close()
self.lin_nonlin_fname = filename
|
Load precomputed transfer functions from h5 file.
|
neurolib/models/multimodel/builder/aln.py
|
_load_lin_nonlin_transfer_function
|
lionelkusch/neurolib
| 258 |
python
|
def _load_lin_nonlin_transfer_function(self, filename):
'\n \n '
logging.info(f'Loading precomputed transfer functions from {filename}')
loaded_h5 = File(filename, 'r')
self.mu_range = np.array(loaded_h5['mu_vals'])
self.d_mu = (self.mu_range[1] - self.mu_range[0])
self.sigma_range = np.array(loaded_h5['sigma_vals'])
self.d_sigma = (self.sigma_range[1] - self.sigma_range[0])
self.firing_rate_transfer_function = np.array(loaded_h5['r_ss'])
self.voltage_transfer_function = np.array(loaded_h5['V_mean_ss'])
self.tau_transfer_function = np.array(loaded_h5['tau_mu_exp'])
logging.info('All transfer functions loaded.')
loaded_h5.close()
self.lin_nonlin_fname = filename
|
def _load_lin_nonlin_transfer_function(self, filename):
'\n \n '
logging.info(f'Loading precomputed transfer functions from {filename}')
loaded_h5 = File(filename, 'r')
self.mu_range = np.array(loaded_h5['mu_vals'])
self.d_mu = (self.mu_range[1] - self.mu_range[0])
self.sigma_range = np.array(loaded_h5['sigma_vals'])
self.d_sigma = (self.sigma_range[1] - self.sigma_range[0])
self.firing_rate_transfer_function = np.array(loaded_h5['r_ss'])
self.voltage_transfer_function = np.array(loaded_h5['V_mean_ss'])
self.tau_transfer_function = np.array(loaded_h5['tau_mu_exp'])
logging.info('All transfer functions loaded.')
loaded_h5.close()
self.lin_nonlin_fname = filename<|docstring|>Load precomputed transfer functions from h5 file.<|endoftext|>
|
8bedffe6917f0946d2a4c42660db306a668e684f9f5dd9a2966f952ae1f6785b
|
def _callbacks(self):
'\n Construct list of python callbacks for ALN model.\n '
callbacks_list = [(self.callback_functions['firing_rate_lookup'], self.firing_rate_lookup, 2), (self.callback_functions['voltage_lookup'], self.voltage_lookup, 2), (self.callback_functions['tau_lookup'], self.tau_lookup, 2)]
self._validate_callbacks(callbacks_list)
return callbacks_list
|
Construct list of python callbacks for ALN model.
|
neurolib/models/multimodel/builder/aln.py
|
_callbacks
|
lionelkusch/neurolib
| 258 |
python
|
def _callbacks(self):
'\n \n '
callbacks_list = [(self.callback_functions['firing_rate_lookup'], self.firing_rate_lookup, 2), (self.callback_functions['voltage_lookup'], self.voltage_lookup, 2), (self.callback_functions['tau_lookup'], self.tau_lookup, 2)]
self._validate_callbacks(callbacks_list)
return callbacks_list
|
def _callbacks(self):
'\n \n '
callbacks_list = [(self.callback_functions['firing_rate_lookup'], self.firing_rate_lookup, 2), (self.callback_functions['voltage_lookup'], self.voltage_lookup, 2), (self.callback_functions['tau_lookup'], self.tau_lookup, 2)]
self._validate_callbacks(callbacks_list)
return callbacks_list<|docstring|>Construct list of python callbacks for ALN model.<|endoftext|>
|
3304baac284683c29f4729bb67937537fae7a41265939fbc177ec4547f240230
|
def _numba_callbacks(self):
'\n Define numba callbacks - has to be different than jitcdde callbacks\n because of the internals.\n '
def _table_numba_gen(sigma_range, mu_range, d_sigma, d_mu, transfer_function):
'\n Function generator for numba callbacks. This works similarly as\n `functools.partial` (i.e. sets some of the arguments of the inner\n function), but afterwards can be jitted with `numba.njit()`, while\n partial functions cannot.\n '
def inner(current_mu, current_sigma):
return _table_lookup(current_mu, current_sigma, sigma_range, mu_range, d_sigma, d_mu, transfer_function)
return inner
return [('firing_rate_lookup', numba.njit(_table_numba_gen(self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.firing_rate_transfer_function))), ('voltage_lookup', numba.njit(_table_numba_gen(self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.voltage_transfer_function))), ('tau_lookup', numba.njit(_table_numba_gen(self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.tau_transfer_function)))]
|
Define numba callbacks - has to be different than jitcdde callbacks
because of the internals.
|
neurolib/models/multimodel/builder/aln.py
|
_numba_callbacks
|
lionelkusch/neurolib
| 258 |
python
|
def _numba_callbacks(self):
'\n Define numba callbacks - has to be different than jitcdde callbacks\n because of the internals.\n '
def _table_numba_gen(sigma_range, mu_range, d_sigma, d_mu, transfer_function):
'\n Function generator for numba callbacks. This works similarly as\n `functools.partial` (i.e. sets some of the arguments of the inner\n function), but afterwards can be jitted with `numba.njit()`, while\n partial functions cannot.\n '
def inner(current_mu, current_sigma):
return _table_lookup(current_mu, current_sigma, sigma_range, mu_range, d_sigma, d_mu, transfer_function)
return inner
return [('firing_rate_lookup', numba.njit(_table_numba_gen(self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.firing_rate_transfer_function))), ('voltage_lookup', numba.njit(_table_numba_gen(self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.voltage_transfer_function))), ('tau_lookup', numba.njit(_table_numba_gen(self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.tau_transfer_function)))]
|
def _numba_callbacks(self):
'\n Define numba callbacks - has to be different than jitcdde callbacks\n because of the internals.\n '
def _table_numba_gen(sigma_range, mu_range, d_sigma, d_mu, transfer_function):
'\n Function generator for numba callbacks. This works similarly as\n `functools.partial` (i.e. sets some of the arguments of the inner\n function), but afterwards can be jitted with `numba.njit()`, while\n partial functions cannot.\n '
def inner(current_mu, current_sigma):
return _table_lookup(current_mu, current_sigma, sigma_range, mu_range, d_sigma, d_mu, transfer_function)
return inner
return [('firing_rate_lookup', numba.njit(_table_numba_gen(self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.firing_rate_transfer_function))), ('voltage_lookup', numba.njit(_table_numba_gen(self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.voltage_transfer_function))), ('tau_lookup', numba.njit(_table_numba_gen(self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.tau_transfer_function)))]<|docstring|>Define numba callbacks - has to be different than jitcdde callbacks
because of the internals.<|endoftext|>
|
82a135d8d5266f94736591343e8ff2d59377735d03fdbf43d3f937889e8d3543
|
def firing_rate_lookup(self, y, current_mu, current_sigma):
'\n Translate mean and std. deviation of the current to firing rate using\n linear-nonlinear lookup table for ALN.\n '
return _table_lookup(current_mu, current_sigma, self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.firing_rate_transfer_function)
|
Translate mean and std. deviation of the current to firing rate using
linear-nonlinear lookup table for ALN.
|
neurolib/models/multimodel/builder/aln.py
|
firing_rate_lookup
|
lionelkusch/neurolib
| 258 |
python
|
def firing_rate_lookup(self, y, current_mu, current_sigma):
'\n Translate mean and std. deviation of the current to firing rate using\n linear-nonlinear lookup table for ALN.\n '
return _table_lookup(current_mu, current_sigma, self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.firing_rate_transfer_function)
|
def firing_rate_lookup(self, y, current_mu, current_sigma):
'\n Translate mean and std. deviation of the current to firing rate using\n linear-nonlinear lookup table for ALN.\n '
return _table_lookup(current_mu, current_sigma, self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.firing_rate_transfer_function)<|docstring|>Translate mean and std. deviation of the current to firing rate using
linear-nonlinear lookup table for ALN.<|endoftext|>
|
7d202b260c86db36457023d124a05afa1792b5bf038e16bba6149e3d13cdb435
|
def voltage_lookup(self, y, current_mu, current_sigma):
'\n Translate mean and std. deviation of the current to voltage using\n precomputed transfer functions of the aln model.\n '
return _table_lookup(current_mu, current_sigma, self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.voltage_transfer_function)
|
Translate mean and std. deviation of the current to voltage using
precomputed transfer functions of the aln model.
|
neurolib/models/multimodel/builder/aln.py
|
voltage_lookup
|
lionelkusch/neurolib
| 258 |
python
|
def voltage_lookup(self, y, current_mu, current_sigma):
'\n Translate mean and std. deviation of the current to voltage using\n precomputed transfer functions of the aln model.\n '
return _table_lookup(current_mu, current_sigma, self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.voltage_transfer_function)
|
def voltage_lookup(self, y, current_mu, current_sigma):
'\n Translate mean and std. deviation of the current to voltage using\n precomputed transfer functions of the aln model.\n '
return _table_lookup(current_mu, current_sigma, self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.voltage_transfer_function)<|docstring|>Translate mean and std. deviation of the current to voltage using
precomputed transfer functions of the aln model.<|endoftext|>
|
c10a5a9116417ca428c0bd3a3658c0fbbdd5f76476093993077c4ed6ff107889
|
def tau_lookup(self, y, current_mu, current_sigma):
'\n Translate mean and std. deviation of the current to tau - membrane time\n constant using precomputed transfer functions of the aln model.\n '
return _table_lookup(current_mu, current_sigma, self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.tau_transfer_function)
|
Translate mean and std. deviation of the current to tau - membrane time
constant using precomputed transfer functions of the aln model.
|
neurolib/models/multimodel/builder/aln.py
|
tau_lookup
|
lionelkusch/neurolib
| 258 |
python
|
def tau_lookup(self, y, current_mu, current_sigma):
'\n Translate mean and std. deviation of the current to tau - membrane time\n constant using precomputed transfer functions of the aln model.\n '
return _table_lookup(current_mu, current_sigma, self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.tau_transfer_function)
|
def tau_lookup(self, y, current_mu, current_sigma):
'\n Translate mean and std. deviation of the current to tau - membrane time\n constant using precomputed transfer functions of the aln model.\n '
return _table_lookup(current_mu, current_sigma, self.sigma_range, self.mu_range, self.d_sigma, self.d_mu, self.tau_transfer_function)<|docstring|>Translate mean and std. deviation of the current to tau - membrane time
constant using precomputed transfer functions of the aln model.<|endoftext|>
|
fd82c4ae4f28b63d49ece26aa694b8d05c2310dd17eeb9d1adf374fc1e214fda
|
def _get_current_sigma(self, I_syn_sigma_exc, I_syn_sigma_inh, exc_inp, inh_inp, J_exc_max, J_inh_max, ext_sigma):
'\n Compute membrane current standard deviation sigma.\n '
return se.sqrt((((((((2 * (J_exc_max ** 2)) * I_syn_sigma_exc) * self.params['tau_se']) * (self.params['C'] / self.params['gL'])) / (((1.0 + exc_inp) * (self.params['C'] / self.params['gL'])) + self.params['tau_se'])) + (((((2 * (J_inh_max ** 2)) * I_syn_sigma_inh) * self.params['tau_si']) * (self.params['C'] / self.params['gL'])) / (((1.0 + inh_inp) * (self.params['C'] / self.params['gL'])) + self.params['tau_si']))) + (ext_sigma ** 2)))
|
Compute membrane current standard deviation sigma.
|
neurolib/models/multimodel/builder/aln.py
|
_get_current_sigma
|
lionelkusch/neurolib
| 258 |
python
|
def _get_current_sigma(self, I_syn_sigma_exc, I_syn_sigma_inh, exc_inp, inh_inp, J_exc_max, J_inh_max, ext_sigma):
'\n \n '
return se.sqrt((((((((2 * (J_exc_max ** 2)) * I_syn_sigma_exc) * self.params['tau_se']) * (self.params['C'] / self.params['gL'])) / (((1.0 + exc_inp) * (self.params['C'] / self.params['gL'])) + self.params['tau_se'])) + (((((2 * (J_inh_max ** 2)) * I_syn_sigma_inh) * self.params['tau_si']) * (self.params['C'] / self.params['gL'])) / (((1.0 + inh_inp) * (self.params['C'] / self.params['gL'])) + self.params['tau_si']))) + (ext_sigma ** 2)))
|
def _get_current_sigma(self, I_syn_sigma_exc, I_syn_sigma_inh, exc_inp, inh_inp, J_exc_max, J_inh_max, ext_sigma):
'\n \n '
return se.sqrt((((((((2 * (J_exc_max ** 2)) * I_syn_sigma_exc) * self.params['tau_se']) * (self.params['C'] / self.params['gL'])) / (((1.0 + exc_inp) * (self.params['C'] / self.params['gL'])) + self.params['tau_se'])) + (((((2 * (J_inh_max ** 2)) * I_syn_sigma_inh) * self.params['tau_si']) * (self.params['C'] / self.params['gL'])) / (((1.0 + inh_inp) * (self.params['C'] / self.params['gL'])) + self.params['tau_si']))) + (ext_sigma ** 2)))<|docstring|>Compute membrane current standard deviation sigma.<|endoftext|>
|
f46e8f4ac8e39e5bb8b34159e64ec58d732983efacf2d39370f96298629af3e7
|
def _get_synaptic_current_mu(self, I_syn_mu, inp, tau):
'\n Compute synaptic current mean mu. Used for both excitatory and inhibitory cuurent.\n '
return ((((1.0 - I_syn_mu) * inp) - I_syn_mu) / tau)
|
Compute synaptic current mean mu. Used for both excitatory and inhibitory cuurent.
|
neurolib/models/multimodel/builder/aln.py
|
_get_synaptic_current_mu
|
lionelkusch/neurolib
| 258 |
python
|
def _get_synaptic_current_mu(self, I_syn_mu, inp, tau):
'\n \n '
return ((((1.0 - I_syn_mu) * inp) - I_syn_mu) / tau)
|
def _get_synaptic_current_mu(self, I_syn_mu, inp, tau):
'\n \n '
return ((((1.0 - I_syn_mu) * inp) - I_syn_mu) / tau)<|docstring|>Compute synaptic current mean mu. Used for both excitatory and inhibitory cuurent.<|endoftext|>
|
929579bb1d70938bc0edcaf04d066fd653fd3fd90709e8f5db9b1eaa0e1083ff
|
def _initialize_state_vector(self):
'\n Initialize state vector.\n '
np.random.seed(self.seed)
self.initial_state = (np.random.uniform(0, 1, self.num_state_variables) * np.array([3.0, 200.0, 0.5, 0.5, 0.001, 0.001, 0.01])).tolist()
|
Initialize state vector.
|
neurolib/models/multimodel/builder/aln.py
|
_initialize_state_vector
|
lionelkusch/neurolib
| 258 |
python
|
def _initialize_state_vector(self):
'\n \n '
np.random.seed(self.seed)
self.initial_state = (np.random.uniform(0, 1, self.num_state_variables) * np.array([3.0, 200.0, 0.5, 0.5, 0.001, 0.001, 0.01])).tolist()
|
def _initialize_state_vector(self):
'\n \n '
np.random.seed(self.seed)
self.initial_state = (np.random.uniform(0, 1, self.num_state_variables) * np.array([3.0, 200.0, 0.5, 0.5, 0.001, 0.001, 0.01])).tolist()<|docstring|>Initialize state vector.<|endoftext|>
|
49d3841155a1463d15acadcceb8f7f3a2afdc2888013a3ef120942a81c488b97
|
def _get_adaptation_current(self, I_adaptation, firing_rate, voltage):
'\n Compute adaptation current as a sum of subthreshold adaptation and spike-triggered adaptation.\n '
return ((((self.params['a'] * (voltage - self.params['EA'])) - I_adaptation) + ((self.params['tauA'] * self.params['b']) * firing_rate)) / self.params['tauA'])
|
Compute adaptation current as a sum of subthreshold adaptation and spike-triggered adaptation.
|
neurolib/models/multimodel/builder/aln.py
|
_get_adaptation_current
|
lionelkusch/neurolib
| 258 |
python
|
def _get_adaptation_current(self, I_adaptation, firing_rate, voltage):
'\n \n '
return ((((self.params['a'] * (voltage - self.params['EA'])) - I_adaptation) + ((self.params['tauA'] * self.params['b']) * firing_rate)) / self.params['tauA'])
|
def _get_adaptation_current(self, I_adaptation, firing_rate, voltage):
'\n \n '
return ((((self.params['a'] * (voltage - self.params['EA'])) - I_adaptation) + ((self.params['tauA'] * self.params['b']) * firing_rate)) / self.params['tauA'])<|docstring|>Compute adaptation current as a sum of subthreshold adaptation and spike-triggered adaptation.<|endoftext|>
|
66cb7036102df047b77b07685018088671455c0868ceecea6b3f1f6948c83306
|
def _compute_couplings(self, coupling_variables):
'\n Helper that computes coupling from other nodes and network.\n '
exc_coupling = (((self.params['Ke'] * coupling_variables['node_exc_exc']) + ((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) * self.params['Ke_gl']) * coupling_variables['network_exc_exc'])) + ((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) * self.params['Ke_gl']) * self.params['ext_exc_rate']))
inh_coupling = (self.params['Ki'] * coupling_variables['node_exc_inh'])
exc_coupling_squared = (((self.params['Ke'] * coupling_variables['node_exc_exc_sq']) + (((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) ** 2) * self.params['Ke_gl']) * coupling_variables['network_exc_exc_sq'])) + (((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) ** 2) * self.params['Ke_gl']) * self.params['ext_exc_rate']))
inh_coupling_squared = (self.params['Ki'] * coupling_variables['node_exc_inh_sq'])
return (exc_coupling, inh_coupling, exc_coupling_squared, inh_coupling_squared)
|
Helper that computes coupling from other nodes and network.
|
neurolib/models/multimodel/builder/aln.py
|
_compute_couplings
|
lionelkusch/neurolib
| 258 |
python
|
def _compute_couplings(self, coupling_variables):
'\n \n '
exc_coupling = (((self.params['Ke'] * coupling_variables['node_exc_exc']) + ((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) * self.params['Ke_gl']) * coupling_variables['network_exc_exc'])) + ((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) * self.params['Ke_gl']) * self.params['ext_exc_rate']))
inh_coupling = (self.params['Ki'] * coupling_variables['node_exc_inh'])
exc_coupling_squared = (((self.params['Ke'] * coupling_variables['node_exc_exc_sq']) + (((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) ** 2) * self.params['Ke_gl']) * coupling_variables['network_exc_exc_sq'])) + (((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) ** 2) * self.params['Ke_gl']) * self.params['ext_exc_rate']))
inh_coupling_squared = (self.params['Ki'] * coupling_variables['node_exc_inh_sq'])
return (exc_coupling, inh_coupling, exc_coupling_squared, inh_coupling_squared)
|
def _compute_couplings(self, coupling_variables):
'\n \n '
exc_coupling = (((self.params['Ke'] * coupling_variables['node_exc_exc']) + ((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) * self.params['Ke_gl']) * coupling_variables['network_exc_exc'])) + ((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) * self.params['Ke_gl']) * self.params['ext_exc_rate']))
inh_coupling = (self.params['Ki'] * coupling_variables['node_exc_inh'])
exc_coupling_squared = (((self.params['Ke'] * coupling_variables['node_exc_exc_sq']) + (((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) ** 2) * self.params['Ke_gl']) * coupling_variables['network_exc_exc_sq'])) + (((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jee_max']) ** 2) * self.params['Ke_gl']) * self.params['ext_exc_rate']))
inh_coupling_squared = (self.params['Ki'] * coupling_variables['node_exc_inh_sq'])
return (exc_coupling, inh_coupling, exc_coupling_squared, inh_coupling_squared)<|docstring|>Helper that computes coupling from other nodes and network.<|endoftext|>
|
e369705b435f3ef823a5d9b98a24e05c942636009b50918b3cdc7cad63754ea2
|
def _initialize_state_vector(self):
'\n Initialize state vector.\n '
np.random.seed(self.seed)
self.initial_state = (np.random.uniform(0, 1, self.num_state_variables) * np.array([3.0, 0.5, 0.5, 0.01, 0.01, 0.01])).tolist()
|
Initialize state vector.
|
neurolib/models/multimodel/builder/aln.py
|
_initialize_state_vector
|
lionelkusch/neurolib
| 258 |
python
|
def _initialize_state_vector(self):
'\n \n '
np.random.seed(self.seed)
self.initial_state = (np.random.uniform(0, 1, self.num_state_variables) * np.array([3.0, 0.5, 0.5, 0.01, 0.01, 0.01])).tolist()
|
def _initialize_state_vector(self):
'\n \n '
np.random.seed(self.seed)
self.initial_state = (np.random.uniform(0, 1, self.num_state_variables) * np.array([3.0, 0.5, 0.5, 0.01, 0.01, 0.01])).tolist()<|docstring|>Initialize state vector.<|endoftext|>
|
82e24022e8b820fe39051b5c7898622a6fcbb93077781c1d13d8087e635b87a5
|
def _compute_couplings(self, coupling_variables):
'\n Helper that computes coupling from other nodes and network.\n '
exc_coupling = ((self.params['Ke'] * coupling_variables['node_inh_exc']) + ((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jie_max']) * self.params['Ke_gl']) * self.params['ext_inh_rate']))
inh_coupling = (self.params['Ki'] * coupling_variables['node_inh_inh'])
exc_coupling_squared = ((self.params['Ke'] * coupling_variables['node_inh_exc_sq']) + (((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jie_max']) ** 2) * self.params['Ke_gl']) * self.params['ext_inh_rate']))
inh_coupling_squared = (self.params['Ki'] * coupling_variables['node_inh_inh_sq'])
return (exc_coupling, inh_coupling, exc_coupling_squared, inh_coupling_squared)
|
Helper that computes coupling from other nodes and network.
|
neurolib/models/multimodel/builder/aln.py
|
_compute_couplings
|
lionelkusch/neurolib
| 258 |
python
|
def _compute_couplings(self, coupling_variables):
'\n \n '
exc_coupling = ((self.params['Ke'] * coupling_variables['node_inh_exc']) + ((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jie_max']) * self.params['Ke_gl']) * self.params['ext_inh_rate']))
inh_coupling = (self.params['Ki'] * coupling_variables['node_inh_inh'])
exc_coupling_squared = ((self.params['Ke'] * coupling_variables['node_inh_exc_sq']) + (((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jie_max']) ** 2) * self.params['Ke_gl']) * self.params['ext_inh_rate']))
inh_coupling_squared = (self.params['Ki'] * coupling_variables['node_inh_inh_sq'])
return (exc_coupling, inh_coupling, exc_coupling_squared, inh_coupling_squared)
|
def _compute_couplings(self, coupling_variables):
'\n \n '
exc_coupling = ((self.params['Ke'] * coupling_variables['node_inh_exc']) + ((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jie_max']) * self.params['Ke_gl']) * self.params['ext_inh_rate']))
inh_coupling = (self.params['Ki'] * coupling_variables['node_inh_inh'])
exc_coupling_squared = ((self.params['Ke'] * coupling_variables['node_inh_exc_sq']) + (((((self.params['c_gl'] * self.params['tau_se']) / self.params['Jie_max']) ** 2) * self.params['Ke_gl']) * self.params['ext_inh_rate']))
inh_coupling_squared = (self.params['Ki'] * coupling_variables['node_inh_inh_sq'])
return (exc_coupling, inh_coupling, exc_coupling_squared, inh_coupling_squared)<|docstring|>Helper that computes coupling from other nodes and network.<|endoftext|>
|
cc0d8f107227f2afa410f96732a96e6fa14e33f435315cc688c668e86e387e84
|
def _rescale_connectivity(self):
'\n Rescale connection strengths for ALN. Should work also for ALN nodes\n with arbitrary number of masses of any type.\n '
tau_mat = np.zeros_like(self.connectivity)
J_mat = np.zeros_like(self.connectivity)
for (col, mass_from) in enumerate(self.masses):
tau_mat[(:, col)] = mass_from.params[f'tau_s{mass_from.mass_type.lower()[0]}']
for (row, mass_to) in enumerate(self.masses):
J_mat[(row, col)] = mass_to.params[f'J{mass_to.mass_type.lower()[0]}{mass_from.mass_type.lower()[0]}_max']
self.connectivity = ((self.connectivity * tau_mat) / np.abs(J_mat))
|
Rescale connection strengths for ALN. Should work also for ALN nodes
with arbitrary number of masses of any type.
|
neurolib/models/multimodel/builder/aln.py
|
_rescale_connectivity
|
lionelkusch/neurolib
| 258 |
python
|
def _rescale_connectivity(self):
'\n Rescale connection strengths for ALN. Should work also for ALN nodes\n with arbitrary number of masses of any type.\n '
tau_mat = np.zeros_like(self.connectivity)
J_mat = np.zeros_like(self.connectivity)
for (col, mass_from) in enumerate(self.masses):
tau_mat[(:, col)] = mass_from.params[f'tau_s{mass_from.mass_type.lower()[0]}']
for (row, mass_to) in enumerate(self.masses):
J_mat[(row, col)] = mass_to.params[f'J{mass_to.mass_type.lower()[0]}{mass_from.mass_type.lower()[0]}_max']
self.connectivity = ((self.connectivity * tau_mat) / np.abs(J_mat))
|
def _rescale_connectivity(self):
'\n Rescale connection strengths for ALN. Should work also for ALN nodes\n with arbitrary number of masses of any type.\n '
tau_mat = np.zeros_like(self.connectivity)
J_mat = np.zeros_like(self.connectivity)
for (col, mass_from) in enumerate(self.masses):
tau_mat[(:, col)] = mass_from.params[f'tau_s{mass_from.mass_type.lower()[0]}']
for (row, mass_to) in enumerate(self.masses):
J_mat[(row, col)] = mass_to.params[f'J{mass_to.mass_type.lower()[0]}{mass_from.mass_type.lower()[0]}_max']
self.connectivity = ((self.connectivity * tau_mat) / np.abs(J_mat))<|docstring|>Rescale connection strengths for ALN. Should work also for ALN nodes
with arbitrary number of masses of any type.<|endoftext|>
|
291a14b9dc7eca658e098463453264af8dc45e3e1c673d49ea3dac2e2137fc46
|
def __init__(self, exc_params=None, inh_params=None, exc_lin_nonlin_transfer_function_filename=None, inh_lin_nonlin_transfer_function_filename=None, connectivity=ALN_NODE_DEFAULT_CONNECTIVITY, delays=ALN_NODE_DEFAULT_DELAYS, exc_seed=None, inh_seed=None):
'\n :param exc_params: parameters for the excitatory mass\n :type exc_params: dict|None\n :param inh_params: parameters for the inhibitory mass\n :type inh_params: dict|None\n :param exc_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer functions for excitatory ALN mass, if None, will\n look for it in this directory\n :type exc_lin_nonlin_transfer_function_filename: str|None\n :param inh_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer functions for inhibitory ALN mass, if None, will\n look for it in this directory\n :type inh_lin_nonlin_transfer_function_filename: str|None\n :param connectivity: local connectivity matrix\n :type connectivity: np.ndarray\n :param delays: local delay matrix\n :type delays: np.ndarray\n :param exc_seed: seed for random number generator for the excitatory\n mass\n :type exc_seed: int|None\n :param inh_seed: seed for random number generator for the inhibitory\n mass\n :type inh_seed: int|None\n '
excitatory_mass = ExcitatoryALNMass(params=exc_params, lin_nonlin_transfer_function_filename=exc_lin_nonlin_transfer_function_filename, seed=exc_seed)
excitatory_mass.index = 0
inhibitory_mass = InhibitoryALNMass(params=inh_params, lin_nonlin_transfer_function_filename=inh_lin_nonlin_transfer_function_filename, seed=inh_seed)
inhibitory_mass.index = 1
super().__init__(neural_masses=[excitatory_mass, inhibitory_mass], local_connectivity=connectivity, local_delays=delays)
self._rescale_connectivity()
|
:param exc_params: parameters for the excitatory mass
:type exc_params: dict|None
:param inh_params: parameters for the inhibitory mass
:type inh_params: dict|None
:param exc_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer functions for excitatory ALN mass, if None, will
look for it in this directory
:type exc_lin_nonlin_transfer_function_filename: str|None
:param inh_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer functions for inhibitory ALN mass, if None, will
look for it in this directory
:type inh_lin_nonlin_transfer_function_filename: str|None
:param connectivity: local connectivity matrix
:type connectivity: np.ndarray
:param delays: local delay matrix
:type delays: np.ndarray
:param exc_seed: seed for random number generator for the excitatory
mass
:type exc_seed: int|None
:param inh_seed: seed for random number generator for the inhibitory
mass
:type inh_seed: int|None
|
neurolib/models/multimodel/builder/aln.py
|
__init__
|
lionelkusch/neurolib
| 258 |
python
|
def __init__(self, exc_params=None, inh_params=None, exc_lin_nonlin_transfer_function_filename=None, inh_lin_nonlin_transfer_function_filename=None, connectivity=ALN_NODE_DEFAULT_CONNECTIVITY, delays=ALN_NODE_DEFAULT_DELAYS, exc_seed=None, inh_seed=None):
'\n :param exc_params: parameters for the excitatory mass\n :type exc_params: dict|None\n :param inh_params: parameters for the inhibitory mass\n :type inh_params: dict|None\n :param exc_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer functions for excitatory ALN mass, if None, will\n look for it in this directory\n :type exc_lin_nonlin_transfer_function_filename: str|None\n :param inh_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer functions for inhibitory ALN mass, if None, will\n look for it in this directory\n :type inh_lin_nonlin_transfer_function_filename: str|None\n :param connectivity: local connectivity matrix\n :type connectivity: np.ndarray\n :param delays: local delay matrix\n :type delays: np.ndarray\n :param exc_seed: seed for random number generator for the excitatory\n mass\n :type exc_seed: int|None\n :param inh_seed: seed for random number generator for the inhibitory\n mass\n :type inh_seed: int|None\n '
excitatory_mass = ExcitatoryALNMass(params=exc_params, lin_nonlin_transfer_function_filename=exc_lin_nonlin_transfer_function_filename, seed=exc_seed)
excitatory_mass.index = 0
inhibitory_mass = InhibitoryALNMass(params=inh_params, lin_nonlin_transfer_function_filename=inh_lin_nonlin_transfer_function_filename, seed=inh_seed)
inhibitory_mass.index = 1
super().__init__(neural_masses=[excitatory_mass, inhibitory_mass], local_connectivity=connectivity, local_delays=delays)
self._rescale_connectivity()
|
def __init__(self, exc_params=None, inh_params=None, exc_lin_nonlin_transfer_function_filename=None, inh_lin_nonlin_transfer_function_filename=None, connectivity=ALN_NODE_DEFAULT_CONNECTIVITY, delays=ALN_NODE_DEFAULT_DELAYS, exc_seed=None, inh_seed=None):
'\n :param exc_params: parameters for the excitatory mass\n :type exc_params: dict|None\n :param inh_params: parameters for the inhibitory mass\n :type inh_params: dict|None\n :param exc_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer functions for excitatory ALN mass, if None, will\n look for it in this directory\n :type exc_lin_nonlin_transfer_function_filename: str|None\n :param inh_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer functions for inhibitory ALN mass, if None, will\n look for it in this directory\n :type inh_lin_nonlin_transfer_function_filename: str|None\n :param connectivity: local connectivity matrix\n :type connectivity: np.ndarray\n :param delays: local delay matrix\n :type delays: np.ndarray\n :param exc_seed: seed for random number generator for the excitatory\n mass\n :type exc_seed: int|None\n :param inh_seed: seed for random number generator for the inhibitory\n mass\n :type inh_seed: int|None\n '
excitatory_mass = ExcitatoryALNMass(params=exc_params, lin_nonlin_transfer_function_filename=exc_lin_nonlin_transfer_function_filename, seed=exc_seed)
excitatory_mass.index = 0
inhibitory_mass = InhibitoryALNMass(params=inh_params, lin_nonlin_transfer_function_filename=inh_lin_nonlin_transfer_function_filename, seed=inh_seed)
inhibitory_mass.index = 1
super().__init__(neural_masses=[excitatory_mass, inhibitory_mass], local_connectivity=connectivity, local_delays=delays)
self._rescale_connectivity()<|docstring|>:param exc_params: parameters for the excitatory mass
:type exc_params: dict|None
:param inh_params: parameters for the inhibitory mass
:type inh_params: dict|None
:param exc_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer functions for excitatory ALN mass, if None, will
look for it in this directory
:type exc_lin_nonlin_transfer_function_filename: str|None
:param inh_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer functions for inhibitory ALN mass, if None, will
look for it in this directory
:type inh_lin_nonlin_transfer_function_filename: str|None
:param connectivity: local connectivity matrix
:type connectivity: np.ndarray
:param delays: local delay matrix
:type delays: np.ndarray
:param exc_seed: seed for random number generator for the excitatory
mass
:type exc_seed: int|None
:param inh_seed: seed for random number generator for the inhibitory
mass
:type inh_seed: int|None<|endoftext|>
|
f34f433371f5252b9754d70364f83a3c297510118cdcc9d0fed6605fd12c6977
|
def update_params(self, params_dict, rescale=True):
'\n Rescale connectivity after params update if connectivity was updated.\n '
old_connectivity = self.connectivity.copy()
super().update_params(params_dict)
rescale_flag = (not (self.connectivity == old_connectivity).all())
if (rescale_flag and rescale):
self._rescale_connectivity()
|
Rescale connectivity after params update if connectivity was updated.
|
neurolib/models/multimodel/builder/aln.py
|
update_params
|
lionelkusch/neurolib
| 258 |
python
|
def update_params(self, params_dict, rescale=True):
'\n \n '
old_connectivity = self.connectivity.copy()
super().update_params(params_dict)
rescale_flag = (not (self.connectivity == old_connectivity).all())
if (rescale_flag and rescale):
self._rescale_connectivity()
|
def update_params(self, params_dict, rescale=True):
'\n \n '
old_connectivity = self.connectivity.copy()
super().update_params(params_dict)
rescale_flag = (not (self.connectivity == old_connectivity).all())
if (rescale_flag and rescale):
self._rescale_connectivity()<|docstring|>Rescale connectivity after params update if connectivity was updated.<|endoftext|>
|
5ba9ce6e7cde578a5211b198ff13adad2b424e3994a4f01b43db8c8d7914a9e2
|
def _sync(self):
'\n Apart from basic EXC<->INH connectivity, construct also squared\n variants.\n '
connectivity_sq = ((self.connectivity ** 2) * self.inputs)
sq_connectivity = [(self.sync_symbols[f'node_exc_exc_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.excitatory_masses for col in self.excitatory_masses])), (self.sync_symbols[f'node_inh_exc_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.inhibitory_masses for col in self.excitatory_masses])), (self.sync_symbols[f'node_exc_inh_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.excitatory_masses for col in self.inhibitory_masses])), (self.sync_symbols[f'node_inh_inh_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.inhibitory_masses for col in self.inhibitory_masses]))]
return (super()._sync() + sq_connectivity)
|
Apart from basic EXC<->INH connectivity, construct also squared
variants.
|
neurolib/models/multimodel/builder/aln.py
|
_sync
|
lionelkusch/neurolib
| 258 |
python
|
def _sync(self):
'\n Apart from basic EXC<->INH connectivity, construct also squared\n variants.\n '
connectivity_sq = ((self.connectivity ** 2) * self.inputs)
sq_connectivity = [(self.sync_symbols[f'node_exc_exc_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.excitatory_masses for col in self.excitatory_masses])), (self.sync_symbols[f'node_inh_exc_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.inhibitory_masses for col in self.excitatory_masses])), (self.sync_symbols[f'node_exc_inh_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.excitatory_masses for col in self.inhibitory_masses])), (self.sync_symbols[f'node_inh_inh_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.inhibitory_masses for col in self.inhibitory_masses]))]
return (super()._sync() + sq_connectivity)
|
def _sync(self):
'\n Apart from basic EXC<->INH connectivity, construct also squared\n variants.\n '
connectivity_sq = ((self.connectivity ** 2) * self.inputs)
sq_connectivity = [(self.sync_symbols[f'node_exc_exc_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.excitatory_masses for col in self.excitatory_masses])), (self.sync_symbols[f'node_inh_exc_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.inhibitory_masses for col in self.excitatory_masses])), (self.sync_symbols[f'node_exc_inh_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.excitatory_masses for col in self.inhibitory_masses])), (self.sync_symbols[f'node_inh_inh_sq_{self.index}'], sum([connectivity_sq[(row, col)] for row in self.inhibitory_masses for col in self.inhibitory_masses]))]
return (super()._sync() + sq_connectivity)<|docstring|>Apart from basic EXC<->INH connectivity, construct also squared
variants.<|endoftext|>
|
4d865def66ec1b6c72e9cd31e285165a3febed0ab96415cd1f80791b97581b9b
|
def __init__(self, connectivity_matrix, delay_matrix, exc_mass_params=None, inh_mass_params=None, exc_lin_nonlin_transfer_function_filename=None, inh_lin_nonlin_transfer_function_filename=None, local_connectivity=ALN_NODE_DEFAULT_CONNECTIVITY, local_delays=ALN_NODE_DEFAULT_DELAYS, exc_seed=None, inh_seed=None):
'\n :param connectivity_matrix: connectivity matrix for coupling between\n nodes, defined as [from, to]\n :type connectivity_matrix: np.ndarray\n :param delay_matrix: delay matrix between nodes, if None, delays are\n all zeros, in ms, defined as [from, to]\n :type delay_matrix: np.ndarray|None\n :param exc_mass_params: parameters for each excitatory ALN neural\n mass, if None, will use default\n :type exc_mass_params: list[dict]|dict|None\n :param inh_mass_params: parameters for each inhibitory ALN neural\n mass, if None, will use default\n :type inh_mass_params: list[dict]|dict|None\n param exc_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer_function for excitatory ALN mass, if None, will\n look for it in this directory\n :type exc_lin_nonlin_transfer_function_filename: list[str]|str|None\n :param inh_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer_function for inhibitory ALN mass, if None, will\n look for it in this directory\n :type inh_lin_nonlin_transfer_function_filename: list[str]|str|None\n :param local_connectivity: local within-node connectivity matrix\n :type local_connectivity: np.ndarray\n :param local_delays: local within-node delay matrix\n :type local_delays: list[np.ndarray]|np.ndarray\n :param exc_seed: seed for random number generator for the excitatory\n masses\n :type exc_seed: int|None\n :param inh_seed: seed for random number generator for the excitatory\n masses\n :type inh_seed: int|None\n '
num_nodes = connectivity_matrix.shape[0]
exc_mass_params = self._prepare_mass_params(exc_mass_params, num_nodes)
inh_mass_params = self._prepare_mass_params(inh_mass_params, num_nodes)
exc_lin_nonlin_transfer_function_filename = self._prepare_mass_params(exc_lin_nonlin_transfer_function_filename, num_nodes, native_type=str)
inh_lin_nonlin_transfer_function_filename = self._prepare_mass_params(inh_lin_nonlin_transfer_function_filename, num_nodes, native_type=str)
local_connectivity = self._prepare_mass_params(local_connectivity, num_nodes, native_type=np.ndarray)
local_delays = self._prepare_mass_params(local_delays, num_nodes, native_type=np.ndarray)
exc_seeds = self._prepare_mass_params(exc_seed, num_nodes, native_type=int)
inh_seeds = self._prepare_mass_params(inh_seed, num_nodes, native_type=int)
nodes = []
for (i, (exc_params, inh_params, exc_transfer_function, inh_transfer_function, local_conn, local_dels)) in enumerate(zip(exc_mass_params, inh_mass_params, exc_lin_nonlin_transfer_function_filename, inh_lin_nonlin_transfer_function_filename, local_connectivity, local_delays)):
node = ALNNode(exc_params=exc_params, inh_params=inh_params, exc_lin_nonlin_transfer_function_filename=exc_transfer_function, inh_lin_nonlin_transfer_function_filename=inh_transfer_function, connectivity=local_conn, delays=local_dels, exc_seed=exc_seeds[i], inh_seed=inh_seeds[i])
node.index = i
node.idx_state_var = (i * node.num_state_variables)
for mass in node:
mass.noise_input_idx = [((2 * i) + mass.index)]
nodes.append(node)
super().__init__(nodes=nodes, connectivity_matrix=connectivity_matrix, delay_matrix=delay_matrix)
assert (len(self.sync_variables) == 2)
|
:param connectivity_matrix: connectivity matrix for coupling between
nodes, defined as [from, to]
:type connectivity_matrix: np.ndarray
:param delay_matrix: delay matrix between nodes, if None, delays are
all zeros, in ms, defined as [from, to]
:type delay_matrix: np.ndarray|None
:param exc_mass_params: parameters for each excitatory ALN neural
mass, if None, will use default
:type exc_mass_params: list[dict]|dict|None
:param inh_mass_params: parameters for each inhibitory ALN neural
mass, if None, will use default
:type inh_mass_params: list[dict]|dict|None
param exc_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer_function for excitatory ALN mass, if None, will
look for it in this directory
:type exc_lin_nonlin_transfer_function_filename: list[str]|str|None
:param inh_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer_function for inhibitory ALN mass, if None, will
look for it in this directory
:type inh_lin_nonlin_transfer_function_filename: list[str]|str|None
:param local_connectivity: local within-node connectivity matrix
:type local_connectivity: np.ndarray
:param local_delays: local within-node delay matrix
:type local_delays: list[np.ndarray]|np.ndarray
:param exc_seed: seed for random number generator for the excitatory
masses
:type exc_seed: int|None
:param inh_seed: seed for random number generator for the excitatory
masses
:type inh_seed: int|None
|
neurolib/models/multimodel/builder/aln.py
|
__init__
|
lionelkusch/neurolib
| 258 |
python
|
def __init__(self, connectivity_matrix, delay_matrix, exc_mass_params=None, inh_mass_params=None, exc_lin_nonlin_transfer_function_filename=None, inh_lin_nonlin_transfer_function_filename=None, local_connectivity=ALN_NODE_DEFAULT_CONNECTIVITY, local_delays=ALN_NODE_DEFAULT_DELAYS, exc_seed=None, inh_seed=None):
'\n :param connectivity_matrix: connectivity matrix for coupling between\n nodes, defined as [from, to]\n :type connectivity_matrix: np.ndarray\n :param delay_matrix: delay matrix between nodes, if None, delays are\n all zeros, in ms, defined as [from, to]\n :type delay_matrix: np.ndarray|None\n :param exc_mass_params: parameters for each excitatory ALN neural\n mass, if None, will use default\n :type exc_mass_params: list[dict]|dict|None\n :param inh_mass_params: parameters for each inhibitory ALN neural\n mass, if None, will use default\n :type inh_mass_params: list[dict]|dict|None\n param exc_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer_function for excitatory ALN mass, if None, will\n look for it in this directory\n :type exc_lin_nonlin_transfer_function_filename: list[str]|str|None\n :param inh_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer_function for inhibitory ALN mass, if None, will\n look for it in this directory\n :type inh_lin_nonlin_transfer_function_filename: list[str]|str|None\n :param local_connectivity: local within-node connectivity matrix\n :type local_connectivity: np.ndarray\n :param local_delays: local within-node delay matrix\n :type local_delays: list[np.ndarray]|np.ndarray\n :param exc_seed: seed for random number generator for the excitatory\n masses\n :type exc_seed: int|None\n :param inh_seed: seed for random number generator for the excitatory\n masses\n :type inh_seed: int|None\n '
num_nodes = connectivity_matrix.shape[0]
exc_mass_params = self._prepare_mass_params(exc_mass_params, num_nodes)
inh_mass_params = self._prepare_mass_params(inh_mass_params, num_nodes)
exc_lin_nonlin_transfer_function_filename = self._prepare_mass_params(exc_lin_nonlin_transfer_function_filename, num_nodes, native_type=str)
inh_lin_nonlin_transfer_function_filename = self._prepare_mass_params(inh_lin_nonlin_transfer_function_filename, num_nodes, native_type=str)
local_connectivity = self._prepare_mass_params(local_connectivity, num_nodes, native_type=np.ndarray)
local_delays = self._prepare_mass_params(local_delays, num_nodes, native_type=np.ndarray)
exc_seeds = self._prepare_mass_params(exc_seed, num_nodes, native_type=int)
inh_seeds = self._prepare_mass_params(inh_seed, num_nodes, native_type=int)
nodes = []
for (i, (exc_params, inh_params, exc_transfer_function, inh_transfer_function, local_conn, local_dels)) in enumerate(zip(exc_mass_params, inh_mass_params, exc_lin_nonlin_transfer_function_filename, inh_lin_nonlin_transfer_function_filename, local_connectivity, local_delays)):
node = ALNNode(exc_params=exc_params, inh_params=inh_params, exc_lin_nonlin_transfer_function_filename=exc_transfer_function, inh_lin_nonlin_transfer_function_filename=inh_transfer_function, connectivity=local_conn, delays=local_dels, exc_seed=exc_seeds[i], inh_seed=inh_seeds[i])
node.index = i
node.idx_state_var = (i * node.num_state_variables)
for mass in node:
mass.noise_input_idx = [((2 * i) + mass.index)]
nodes.append(node)
super().__init__(nodes=nodes, connectivity_matrix=connectivity_matrix, delay_matrix=delay_matrix)
assert (len(self.sync_variables) == 2)
|
def __init__(self, connectivity_matrix, delay_matrix, exc_mass_params=None, inh_mass_params=None, exc_lin_nonlin_transfer_function_filename=None, inh_lin_nonlin_transfer_function_filename=None, local_connectivity=ALN_NODE_DEFAULT_CONNECTIVITY, local_delays=ALN_NODE_DEFAULT_DELAYS, exc_seed=None, inh_seed=None):
'\n :param connectivity_matrix: connectivity matrix for coupling between\n nodes, defined as [from, to]\n :type connectivity_matrix: np.ndarray\n :param delay_matrix: delay matrix between nodes, if None, delays are\n all zeros, in ms, defined as [from, to]\n :type delay_matrix: np.ndarray|None\n :param exc_mass_params: parameters for each excitatory ALN neural\n mass, if None, will use default\n :type exc_mass_params: list[dict]|dict|None\n :param inh_mass_params: parameters for each inhibitory ALN neural\n mass, if None, will use default\n :type inh_mass_params: list[dict]|dict|None\n param exc_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer_function for excitatory ALN mass, if None, will\n look for it in this directory\n :type exc_lin_nonlin_transfer_function_filename: list[str]|str|None\n :param inh_lin_nonlin_transfer_function_filename: filename for precomputed\n linear-nonlinear transfer_function for inhibitory ALN mass, if None, will\n look for it in this directory\n :type inh_lin_nonlin_transfer_function_filename: list[str]|str|None\n :param local_connectivity: local within-node connectivity matrix\n :type local_connectivity: np.ndarray\n :param local_delays: local within-node delay matrix\n :type local_delays: list[np.ndarray]|np.ndarray\n :param exc_seed: seed for random number generator for the excitatory\n masses\n :type exc_seed: int|None\n :param inh_seed: seed for random number generator for the excitatory\n masses\n :type inh_seed: int|None\n '
num_nodes = connectivity_matrix.shape[0]
exc_mass_params = self._prepare_mass_params(exc_mass_params, num_nodes)
inh_mass_params = self._prepare_mass_params(inh_mass_params, num_nodes)
exc_lin_nonlin_transfer_function_filename = self._prepare_mass_params(exc_lin_nonlin_transfer_function_filename, num_nodes, native_type=str)
inh_lin_nonlin_transfer_function_filename = self._prepare_mass_params(inh_lin_nonlin_transfer_function_filename, num_nodes, native_type=str)
local_connectivity = self._prepare_mass_params(local_connectivity, num_nodes, native_type=np.ndarray)
local_delays = self._prepare_mass_params(local_delays, num_nodes, native_type=np.ndarray)
exc_seeds = self._prepare_mass_params(exc_seed, num_nodes, native_type=int)
inh_seeds = self._prepare_mass_params(inh_seed, num_nodes, native_type=int)
nodes = []
for (i, (exc_params, inh_params, exc_transfer_function, inh_transfer_function, local_conn, local_dels)) in enumerate(zip(exc_mass_params, inh_mass_params, exc_lin_nonlin_transfer_function_filename, inh_lin_nonlin_transfer_function_filename, local_connectivity, local_delays)):
node = ALNNode(exc_params=exc_params, inh_params=inh_params, exc_lin_nonlin_transfer_function_filename=exc_transfer_function, inh_lin_nonlin_transfer_function_filename=inh_transfer_function, connectivity=local_conn, delays=local_dels, exc_seed=exc_seeds[i], inh_seed=inh_seeds[i])
node.index = i
node.idx_state_var = (i * node.num_state_variables)
for mass in node:
mass.noise_input_idx = [((2 * i) + mass.index)]
nodes.append(node)
super().__init__(nodes=nodes, connectivity_matrix=connectivity_matrix, delay_matrix=delay_matrix)
assert (len(self.sync_variables) == 2)<|docstring|>:param connectivity_matrix: connectivity matrix for coupling between
nodes, defined as [from, to]
:type connectivity_matrix: np.ndarray
:param delay_matrix: delay matrix between nodes, if None, delays are
all zeros, in ms, defined as [from, to]
:type delay_matrix: np.ndarray|None
:param exc_mass_params: parameters for each excitatory ALN neural
mass, if None, will use default
:type exc_mass_params: list[dict]|dict|None
:param inh_mass_params: parameters for each inhibitory ALN neural
mass, if None, will use default
:type inh_mass_params: list[dict]|dict|None
param exc_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer_function for excitatory ALN mass, if None, will
look for it in this directory
:type exc_lin_nonlin_transfer_function_filename: list[str]|str|None
:param inh_lin_nonlin_transfer_function_filename: filename for precomputed
linear-nonlinear transfer_function for inhibitory ALN mass, if None, will
look for it in this directory
:type inh_lin_nonlin_transfer_function_filename: list[str]|str|None
:param local_connectivity: local within-node connectivity matrix
:type local_connectivity: np.ndarray
:param local_delays: local within-node delay matrix
:type local_delays: list[np.ndarray]|np.ndarray
:param exc_seed: seed for random number generator for the excitatory
masses
:type exc_seed: int|None
:param inh_seed: seed for random number generator for the excitatory
masses
:type inh_seed: int|None<|endoftext|>
|
103a9fc65a61803b5f9ccfb8c99ef420475806fc2d0542581b157cc9b85cf9a3
|
def _sync(self):
'\n Overload sync method since the ALN model requires\n squared coupling weights and non-trivial coupling indices.\n '
coupling_var_idx = set(sum([list(node[0].coupling_variables.keys()) for node in self], []))
assert (len(coupling_var_idx) == 1)
coupling_var_idx = next(iter(coupling_var_idx))
return ((self._additive_coupling(within_node_idx=coupling_var_idx, symbol='network_exc_exc') + self._additive_coupling(within_node_idx=coupling_var_idx, symbol='network_exc_exc_sq', connectivity=(self.connectivity * self.connectivity))) + super()._sync())
|
Overload sync method since the ALN model requires
squared coupling weights and non-trivial coupling indices.
|
neurolib/models/multimodel/builder/aln.py
|
_sync
|
lionelkusch/neurolib
| 258 |
python
|
def _sync(self):
'\n Overload sync method since the ALN model requires\n squared coupling weights and non-trivial coupling indices.\n '
coupling_var_idx = set(sum([list(node[0].coupling_variables.keys()) for node in self], []))
assert (len(coupling_var_idx) == 1)
coupling_var_idx = next(iter(coupling_var_idx))
return ((self._additive_coupling(within_node_idx=coupling_var_idx, symbol='network_exc_exc') + self._additive_coupling(within_node_idx=coupling_var_idx, symbol='network_exc_exc_sq', connectivity=(self.connectivity * self.connectivity))) + super()._sync())
|
def _sync(self):
'\n Overload sync method since the ALN model requires\n squared coupling weights and non-trivial coupling indices.\n '
coupling_var_idx = set(sum([list(node[0].coupling_variables.keys()) for node in self], []))
assert (len(coupling_var_idx) == 1)
coupling_var_idx = next(iter(coupling_var_idx))
return ((self._additive_coupling(within_node_idx=coupling_var_idx, symbol='network_exc_exc') + self._additive_coupling(within_node_idx=coupling_var_idx, symbol='network_exc_exc_sq', connectivity=(self.connectivity * self.connectivity))) + super()._sync())<|docstring|>Overload sync method since the ALN model requires
squared coupling weights and non-trivial coupling indices.<|endoftext|>
|
0a67dfce3d8805a55a84687b0bb4c847bb41022e06f84571adbea47ca5adefcd
|
def _table_numba_gen(sigma_range, mu_range, d_sigma, d_mu, transfer_function):
'\n Function generator for numba callbacks. This works similarly as\n `functools.partial` (i.e. sets some of the arguments of the inner\n function), but afterwards can be jitted with `numba.njit()`, while\n partial functions cannot.\n '
def inner(current_mu, current_sigma):
return _table_lookup(current_mu, current_sigma, sigma_range, mu_range, d_sigma, d_mu, transfer_function)
return inner
|
Function generator for numba callbacks. This works similarly as
`functools.partial` (i.e. sets some of the arguments of the inner
function), but afterwards can be jitted with `numba.njit()`, while
partial functions cannot.
|
neurolib/models/multimodel/builder/aln.py
|
_table_numba_gen
|
lionelkusch/neurolib
| 258 |
python
|
def _table_numba_gen(sigma_range, mu_range, d_sigma, d_mu, transfer_function):
'\n Function generator for numba callbacks. This works similarly as\n `functools.partial` (i.e. sets some of the arguments of the inner\n function), but afterwards can be jitted with `numba.njit()`, while\n partial functions cannot.\n '
def inner(current_mu, current_sigma):
return _table_lookup(current_mu, current_sigma, sigma_range, mu_range, d_sigma, d_mu, transfer_function)
return inner
|
def _table_numba_gen(sigma_range, mu_range, d_sigma, d_mu, transfer_function):
'\n Function generator for numba callbacks. This works similarly as\n `functools.partial` (i.e. sets some of the arguments of the inner\n function), but afterwards can be jitted with `numba.njit()`, while\n partial functions cannot.\n '
def inner(current_mu, current_sigma):
return _table_lookup(current_mu, current_sigma, sigma_range, mu_range, d_sigma, d_mu, transfer_function)
return inner<|docstring|>Function generator for numba callbacks. This works similarly as
`functools.partial` (i.e. sets some of the arguments of the inner
function), but afterwards can be jitted with `numba.njit()`, while
partial functions cannot.<|endoftext|>
|
9416f09d202f64f8fb2ea7c8fb2b7c6ed741023e34942ae0773b7306a2e9a7d3
|
def benchmark_cuds_create(benchmark):
'Wrapper function for the CudsCreate benchmark.'
return CudsCreate.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the CudsCreate benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_create
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_create(benchmark):
return CudsCreate.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_create(benchmark):
return CudsCreate.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the CudsCreate benchmark.<|endoftext|>
|
d4077983a58539e12402b1a328a66335797042711333fca2b8c07aac000e1da6
|
def benchmark_add_default(benchmark):
'Wrapper function for the Cuds_add_Default benchmark.'
return Cuds_add_Default.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_add_Default benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_add_default
|
simphony/osp-core
| 17 |
python
|
def benchmark_add_default(benchmark):
return Cuds_add_Default.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_add_default(benchmark):
return Cuds_add_Default.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_add_Default benchmark.<|endoftext|>
|
2490b932d7327234b05dccf98ba0bca0e57e39fa1044af63ae3d5e41137c193f
|
def benchmark_cuds_add_rel(benchmark):
'Wrapper function for the Cuds_add_Rel benchmark.'
return Cuds_add_Rel.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_add_Rel benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_add_rel
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_add_rel(benchmark):
return Cuds_add_Rel.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_add_rel(benchmark):
return Cuds_add_Rel.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_add_Rel benchmark.<|endoftext|>
|
5565a5834dad0f25b13df3291948cab03d82e54e54a919a326b36cceb2de6caa
|
def benchmark_cuds_get_byuiduuid(benchmark):
'Wrapper function for the Cuds_get_ByuidUUID benchmark.'
return Cuds_get_ByuidUUID.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_get_ByuidUUID benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_get_byuiduuid
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_get_byuiduuid(benchmark):
return Cuds_get_ByuidUUID.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_get_byuiduuid(benchmark):
return Cuds_get_ByuidUUID.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_get_ByuidUUID benchmark.<|endoftext|>
|
15b28bcb75cd9b5e148cc03e83d4ed93b6bab5a55643de8665cfc8b203f327a5
|
def benchmark_get_byuiduriref(benchmark):
'Wrapper function for the Cuds_get_ByuidURIRef benchmark.'
return Cuds_get_ByuidURIRef.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_get_ByuidURIRef benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_get_byuiduriref
|
simphony/osp-core
| 17 |
python
|
def benchmark_get_byuiduriref(benchmark):
return Cuds_get_ByuidURIRef.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_get_byuiduriref(benchmark):
return Cuds_get_ByuidURIRef.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_get_ByuidURIRef benchmark.<|endoftext|>
|
270d70eaf940fcc36f6c4707133ed74d58a4d7fba1e98d1c4cfcad262740274b
|
def benchmark_get_byrel(benchmark):
'Wrapper function for the Cuds_get_ByRel benchmark.'
return Cuds_get_ByRel.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_get_ByRel benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_get_byrel
|
simphony/osp-core
| 17 |
python
|
def benchmark_get_byrel(benchmark):
return Cuds_get_ByRel.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_get_byrel(benchmark):
return Cuds_get_ByRel.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_get_ByRel benchmark.<|endoftext|>
|
7e8b394ed75748454e342ebdbef4c6d4bf9160c439c2606e84293a8c9562f424
|
def benchmark_get_byoclass(benchmark):
'Wrapper function for the Cuds_get_Byoclass benchmark.'
return Cuds_get_Byoclass.iterate_pytest_benchmark(benchmark, DEFAULT_SIZE)
|
Wrapper function for the Cuds_get_Byoclass benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_get_byoclass
|
simphony/osp-core
| 17 |
python
|
def benchmark_get_byoclass(benchmark):
return Cuds_get_Byoclass.iterate_pytest_benchmark(benchmark, DEFAULT_SIZE)
|
def benchmark_get_byoclass(benchmark):
return Cuds_get_Byoclass.iterate_pytest_benchmark(benchmark, DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_get_Byoclass benchmark.<|endoftext|>
|
f0769042ea6c63a3f7f28f4d97aa379a27cfa5a6329efcea52d9ecdc029904d4
|
def benchmark_cuds_iter_byuiduuid(benchmark):
'Wrapper function for the Cuds_iter_ByuidUUID benchmark.'
return Cuds_iter_ByuidUUID.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_iter_ByuidUUID benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_iter_byuiduuid
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_iter_byuiduuid(benchmark):
return Cuds_iter_ByuidUUID.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_iter_byuiduuid(benchmark):
return Cuds_iter_ByuidUUID.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_iter_ByuidUUID benchmark.<|endoftext|>
|
860f4e30de8471ccf2c546958d5dbdcb13c6a9ce2541af0f85ce2b41bee187f9
|
def benchmark_cuds_iter_byuiduriref(benchmark):
'Wrapper function for the Cuds_iter_ByuidURIRef benchmark.'
return Cuds_iter_ByuidURIRef.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_iter_ByuidURIRef benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_iter_byuiduriref
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_iter_byuiduriref(benchmark):
return Cuds_iter_ByuidURIRef.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_iter_byuiduriref(benchmark):
return Cuds_iter_ByuidURIRef.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_iter_ByuidURIRef benchmark.<|endoftext|>
|
f2429465682feb63d6798a8590e4a5d6cbc5c5e0fc0ae1f1be7a55f453dbd69a
|
def benchmark_cuds_iter_byrel(benchmark):
'Wrapper function for the Cuds_iter_ByRel benchmark.'
return Cuds_iter_ByRel.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_iter_ByRel benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_iter_byrel
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_iter_byrel(benchmark):
return Cuds_iter_ByRel.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_iter_byrel(benchmark):
return Cuds_iter_ByRel.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_iter_ByRel benchmark.<|endoftext|>
|
849a4d1313733464e1c3616c2872ebbf0e1f5c3772e34ccc17ec6579d7919090
|
def benchmark_iter_byoclass(benchmark):
'Wrapper function for the Cuds_iter_Byoclass benchmark.'
return Cuds_iter_Byoclass.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_iter_Byoclass benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_iter_byoclass
|
simphony/osp-core
| 17 |
python
|
def benchmark_iter_byoclass(benchmark):
return Cuds_iter_Byoclass.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_iter_byoclass(benchmark):
return Cuds_iter_Byoclass.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_iter_Byoclass benchmark.<|endoftext|>
|
1813039272f48983fbef6dda0bf7e830c389d0f29d866c39452d1a6bc42be3f3
|
def benchmark_cuds_is_a(benchmark):
'Wrapper function for the Cuds_is_a benchmark.'
return Cuds_is_a.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_is_a benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_is_a
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_is_a(benchmark):
return Cuds_is_a.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_is_a(benchmark):
return Cuds_is_a.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_is_a benchmark.<|endoftext|>
|
1a2dc8dbaaa55954f5ca3eb325e30a4a4cc57c62efa3aecbdf6b920a121cc1ab
|
def benchmark_cuds_oclass(benchmark):
'Wrapper function for the Cuds_oclass benchmark.'
return Cuds_oclass.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_oclass benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_oclass
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_oclass(benchmark):
return Cuds_oclass.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_oclass(benchmark):
return Cuds_oclass.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_oclass benchmark.<|endoftext|>
|
ae1fa9feb7cff4baee0f22b5e18e71a36b066b0ce8f5f60f3fd3cccf585bacaa
|
def benchmark_cuds_uid(benchmark):
'Wrapper function for the Cuds_uid benchmark.'
return Cuds_uid.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_uid benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_uid
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_uid(benchmark):
return Cuds_uid.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_uid(benchmark):
return Cuds_uid.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_uid benchmark.<|endoftext|>
|
19a33c621f7e0167fd7e3fb5361c271fe683a7fef8843f8c5e6fa6bc65a0cc6b
|
def benchmark_cuds_iri(benchmark):
'Wrapper function for the Cuds_iri benchmark.'
return Cuds_iri.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_iri benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_iri
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_iri(benchmark):
return Cuds_iri.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_iri(benchmark):
return Cuds_iri.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_iri benchmark.<|endoftext|>
|
cc3bfcb3f8653acb89a1141e1ffa50098acab2558b1609acdd93d5896e996ebb
|
def benchmark_cuds_attributes(benchmark):
'Wrapper function for the Cuds_attributes benchmark.'
return Cuds_attributes.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
Wrapper function for the Cuds_attributes benchmark.
|
tests/benchmark_cuds_api.py
|
benchmark_cuds_attributes
|
simphony/osp-core
| 17 |
python
|
def benchmark_cuds_attributes(benchmark):
return Cuds_attributes.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)
|
def benchmark_cuds_attributes(benchmark):
return Cuds_attributes.iterate_pytest_benchmark(benchmark, size=DEFAULT_SIZE)<|docstring|>Wrapper function for the Cuds_attributes benchmark.<|endoftext|>
|
f5c427cccc83a30b11683a09a50d623acde72e1ba6de0862d2a07d7e95a479c1
|
def plot_images(images, cls_true, cls_pred=None):
'\n Adapted from https://github.com/Hvass-Labs/TensorFlow-Tutorials/\n '
(fig, axes) = plt.subplots(3, 3)
for (i, ax) in enumerate(axes.flat):
ax.imshow(images[(i, :, :, :)], interpolation='spline16')
cls_true_name = label_names[cls_true[i]]
if (cls_pred is None):
xlabel = '{0} ({1})'.format(cls_true_name, cls_true[i])
else:
cls_pred_name = label_names[cls_pred[i]]
xlabel = 'True: {0}\nPred: {1}'.format(cls_true_name, cls_pred_name)
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
|
Adapted from https://github.com/Hvass-Labs/TensorFlow-Tutorials/
|
utils.py
|
plot_images
|
archeltaneka/cgp-cnn-PyTorch
| 58 |
python
|
def plot_images(images, cls_true, cls_pred=None):
'\n \n '
(fig, axes) = plt.subplots(3, 3)
for (i, ax) in enumerate(axes.flat):
ax.imshow(images[(i, :, :, :)], interpolation='spline16')
cls_true_name = label_names[cls_true[i]]
if (cls_pred is None):
xlabel = '{0} ({1})'.format(cls_true_name, cls_true[i])
else:
cls_pred_name = label_names[cls_pred[i]]
xlabel = 'True: {0}\nPred: {1}'.format(cls_true_name, cls_pred_name)
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
|
def plot_images(images, cls_true, cls_pred=None):
'\n \n '
(fig, axes) = plt.subplots(3, 3)
for (i, ax) in enumerate(axes.flat):
ax.imshow(images[(i, :, :, :)], interpolation='spline16')
cls_true_name = label_names[cls_true[i]]
if (cls_pred is None):
xlabel = '{0} ({1})'.format(cls_true_name, cls_true[i])
else:
cls_pred_name = label_names[cls_pred[i]]
xlabel = 'True: {0}\nPred: {1}'.format(cls_true_name, cls_pred_name)
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
plt.show()<|docstring|>Adapted from https://github.com/Hvass-Labs/TensorFlow-Tutorials/<|endoftext|>
|
6de4325d259b0294240f8a09bee16ef507a868c62e11b2afa890510b63799df7
|
def main(*args):
'\n usage attr1, attr2, attr3, ..., t0, t1\n '
try:
assert (len(args) > 2)
(t0, t1) = (args[(- 2)], args[(- 1)])
models = args[:(- 2)]
ArchivingWidget.run(models, t0, t1)
except:
print(__usage__)
|
usage attr1, attr2, attr3, ..., t0, t1
|
PyTangoArchiving/widget/tpgarchivingwidget.py
|
main
|
sergirubio/PyTangoArchiving
| 6 |
python
|
def main(*args):
'\n \n '
try:
assert (len(args) > 2)
(t0, t1) = (args[(- 2)], args[(- 1)])
models = args[:(- 2)]
ArchivingWidget.run(models, t0, t1)
except:
print(__usage__)
|
def main(*args):
'\n \n '
try:
assert (len(args) > 2)
(t0, t1) = (args[(- 2)], args[(- 1)])
models = args[:(- 2)]
ArchivingWidget.run(models, t0, t1)
except:
print(__usage__)<|docstring|>usage attr1, attr2, attr3, ..., t0, t1<|endoftext|>
|
9ff94085baa893588fff6489d30d7debd3ca531127692eb25497e3ed635d5174
|
def addXYModels(self, attrs, t0=None, t1=None):
"\n Convert model, dates to \n 'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts',\n "
c = self.cursor()
self.setCursor(Qt.Qt.WaitCursor)
attrs = fn.toList(attrs)
if ((not t0) and (not t1) and (not self.t0) and (not self.t1)):
(t0, t1) = self.tc.getTimes()
if (t0 and t1):
t0 = (t0 if fn.isNumber(t0) else fn.str2time(t0, relative=True))
t1 = (t1 if fn.isNumber(t1) else fn.str2time(t1, relative=True))
(self.t0, self.t1) = (fn.time2str(t0, iso=1), fn.time2str(t1, iso=1))
self.t0 = self.t0.replace(' ', 'T')
self.t1 = self.t1.replace(' ', 'T')
ms = []
for attr in attrs:
attr = fn.tango.get_full_name(attr, fqdn=True)
attr = attr.replace('tango://', '')
q = ('tgarch://%s?db=*;t0=%s;t1=%s' % (attr, self.t0, self.t1))
m = ((q + ';ts'), q)
ms.append(m)
self.plot.onAddXYModel(ms)
self.setCursor(c)
|
Convert model, dates to
'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts',
|
PyTangoArchiving/widget/tpgarchivingwidget.py
|
addXYModels
|
sergirubio/PyTangoArchiving
| 6 |
python
|
def addXYModels(self, attrs, t0=None, t1=None):
"\n Convert model, dates to \n 'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts',\n "
c = self.cursor()
self.setCursor(Qt.Qt.WaitCursor)
attrs = fn.toList(attrs)
if ((not t0) and (not t1) and (not self.t0) and (not self.t1)):
(t0, t1) = self.tc.getTimes()
if (t0 and t1):
t0 = (t0 if fn.isNumber(t0) else fn.str2time(t0, relative=True))
t1 = (t1 if fn.isNumber(t1) else fn.str2time(t1, relative=True))
(self.t0, self.t1) = (fn.time2str(t0, iso=1), fn.time2str(t1, iso=1))
self.t0 = self.t0.replace(' ', 'T')
self.t1 = self.t1.replace(' ', 'T')
ms = []
for attr in attrs:
attr = fn.tango.get_full_name(attr, fqdn=True)
attr = attr.replace('tango://', )
q = ('tgarch://%s?db=*;t0=%s;t1=%s' % (attr, self.t0, self.t1))
m = ((q + ';ts'), q)
ms.append(m)
self.plot.onAddXYModel(ms)
self.setCursor(c)
|
def addXYModels(self, attrs, t0=None, t1=None):
"\n Convert model, dates to \n 'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts',\n "
c = self.cursor()
self.setCursor(Qt.Qt.WaitCursor)
attrs = fn.toList(attrs)
if ((not t0) and (not t1) and (not self.t0) and (not self.t1)):
(t0, t1) = self.tc.getTimes()
if (t0 and t1):
t0 = (t0 if fn.isNumber(t0) else fn.str2time(t0, relative=True))
t1 = (t1 if fn.isNumber(t1) else fn.str2time(t1, relative=True))
(self.t0, self.t1) = (fn.time2str(t0, iso=1), fn.time2str(t1, iso=1))
self.t0 = self.t0.replace(' ', 'T')
self.t1 = self.t1.replace(' ', 'T')
ms = []
for attr in attrs:
attr = fn.tango.get_full_name(attr, fqdn=True)
attr = attr.replace('tango://', )
q = ('tgarch://%s?db=*;t0=%s;t1=%s' % (attr, self.t0, self.t1))
m = ((q + ';ts'), q)
ms.append(m)
self.plot.onAddXYModel(ms)
self.setCursor(c)<|docstring|>Convert model, dates to
'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts',<|endoftext|>
|
7ff59a2a0901ee4a0666ea6b7b33fc92b4d2e0321311d090776b513534702871
|
def onAddXYModel(self, models=None):
"\n models being a list like:\n \n [('tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts', \n 'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03')]\n "
try:
plot = self
self.updateSig.emit(True, False)
print(('onAddXYModel(%s)' % models))
current = plot._model_chooser_tool.getModelNames()
print(('current: %s' % str(current)))
models = [m for m in models if (m not in current)]
print(('new models: %s' % str(models)))
plot.addModels(models)
traceback.print_exc()
self.updateSig.emit(True, True)
except:
traceback.print_exc()
|
models being a list like:
[('tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts',
'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03')]
|
PyTangoArchiving/widget/tpgarchivingwidget.py
|
onAddXYModel
|
sergirubio/PyTangoArchiving
| 6 |
python
|
def onAddXYModel(self, models=None):
"\n models being a list like:\n \n [('tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts', \n 'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03')]\n "
try:
plot = self
self.updateSig.emit(True, False)
print(('onAddXYModel(%s)' % models))
current = plot._model_chooser_tool.getModelNames()
print(('current: %s' % str(current)))
models = [m for m in models if (m not in current)]
print(('new models: %s' % str(models)))
plot.addModels(models)
traceback.print_exc()
self.updateSig.emit(True, True)
except:
traceback.print_exc()
|
def onAddXYModel(self, models=None):
"\n models being a list like:\n \n [('tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts', \n 'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03')]\n "
try:
plot = self
self.updateSig.emit(True, False)
print(('onAddXYModel(%s)' % models))
current = plot._model_chooser_tool.getModelNames()
print(('current: %s' % str(current)))
models = [m for m in models if (m not in current)]
print(('new models: %s' % str(models)))
plot.addModels(models)
traceback.print_exc()
self.updateSig.emit(True, True)
except:
traceback.print_exc()<|docstring|>models being a list like:
[('tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts',
'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03')]<|endoftext|>
|
8225f7ed7d33eb713a2718bd19513f955ce98d0d4608316f6a9d925f5f555ce7
|
def __init__(self, width=0, height=0):
'Inilization of the width and height of the Rectangle'
self.width = width
self.height = height
Rectangle.number_of_instances += 1
|
Inilization of the width and height of the Rectangle
|
0x08-python-more_classes/7-rectangle.py
|
__init__
|
JRodriguez9510/holbertonschool-higher_level_programming-2
| 1 |
python
|
def __init__(self, width=0, height=0):
self.width = width
self.height = height
Rectangle.number_of_instances += 1
|
def __init__(self, width=0, height=0):
self.width = width
self.height = height
Rectangle.number_of_instances += 1<|docstring|>Inilization of the width and height of the Rectangle<|endoftext|>
|
0bdcb37eb526223b64b4e9550d1f0572234645089da2a40eecc8f604a2fa0ff8
|
@property
def width(self):
' returns width variable of Square class instance '
return self.__width
|
returns width variable of Square class instance
|
0x08-python-more_classes/7-rectangle.py
|
width
|
JRodriguez9510/holbertonschool-higher_level_programming-2
| 1 |
python
|
@property
def width(self):
' '
return self.__width
|
@property
def width(self):
' '
return self.__width<|docstring|>returns width variable of Square class instance<|endoftext|>
|
71898f5ee0241deb03f0384ece1837c7490f748e0da9d14439dd1c091faaa5ac
|
@width.setter
def width(self, value):
'Sets width value for Rectangle'
if (type(value) != int):
raise TypeError('width must be an integer')
if (value < 0):
raise ValueError('width must be >= 0')
self.__width = value
|
Sets width value for Rectangle
|
0x08-python-more_classes/7-rectangle.py
|
width
|
JRodriguez9510/holbertonschool-higher_level_programming-2
| 1 |
python
|
@width.setter
def width(self, value):
if (type(value) != int):
raise TypeError('width must be an integer')
if (value < 0):
raise ValueError('width must be >= 0')
self.__width = value
|
@width.setter
def width(self, value):
if (type(value) != int):
raise TypeError('width must be an integer')
if (value < 0):
raise ValueError('width must be >= 0')
self.__width = value<|docstring|>Sets width value for Rectangle<|endoftext|>
|
38119b655de76e0533d469bc736deb3df443f1d7ae80a9cd3e3ef5cf59a46c69
|
@property
def height(self):
' returns height variable of Square class instance '
return self.__height
|
returns height variable of Square class instance
|
0x08-python-more_classes/7-rectangle.py
|
height
|
JRodriguez9510/holbertonschool-higher_level_programming-2
| 1 |
python
|
@property
def height(self):
' '
return self.__height
|
@property
def height(self):
' '
return self.__height<|docstring|>returns height variable of Square class instance<|endoftext|>
|
5caee779a4f76b57610bd6d2433e39e1054f7258836735a2d77a03c1a8872c1d
|
@height.setter
def height(self, value):
'Sets heigth value for Rectangle'
if (type(value) != int):
raise TypeError('height must be an integer')
if (value < 0):
raise ValueError('height must be >= 0')
self.__height = value
|
Sets heigth value for Rectangle
|
0x08-python-more_classes/7-rectangle.py
|
height
|
JRodriguez9510/holbertonschool-higher_level_programming-2
| 1 |
python
|
@height.setter
def height(self, value):
if (type(value) != int):
raise TypeError('height must be an integer')
if (value < 0):
raise ValueError('height must be >= 0')
self.__height = value
|
@height.setter
def height(self, value):
if (type(value) != int):
raise TypeError('height must be an integer')
if (value < 0):
raise ValueError('height must be >= 0')
self.__height = value<|docstring|>Sets heigth value for Rectangle<|endoftext|>
|
4569577e3ca64ab83a3f6882ad4311674d972ea0bcf0a0bb3e55b1f2227ec6dd
|
def area(self):
'Returns the area of the Rectangle'
return (self.__width * self.__height)
|
Returns the area of the Rectangle
|
0x08-python-more_classes/7-rectangle.py
|
area
|
JRodriguez9510/holbertonschool-higher_level_programming-2
| 1 |
python
|
def area(self):
return (self.__width * self.__height)
|
def area(self):
return (self.__width * self.__height)<|docstring|>Returns the area of the Rectangle<|endoftext|>
|
77222fe833fee18394511cae699c32a6a6366dfa81efb119064550b222d11b15
|
def perimeter(self):
'Returns the perimeter of the Rectangle'
if ((self.__width == 0) or (self.__height == 0)):
p = 0
else:
p = ((self.__width * 2) + (self.__height * 2))
return p
|
Returns the perimeter of the Rectangle
|
0x08-python-more_classes/7-rectangle.py
|
perimeter
|
JRodriguez9510/holbertonschool-higher_level_programming-2
| 1 |
python
|
def perimeter(self):
if ((self.__width == 0) or (self.__height == 0)):
p = 0
else:
p = ((self.__width * 2) + (self.__height * 2))
return p
|
def perimeter(self):
if ((self.__width == 0) or (self.__height == 0)):
p = 0
else:
p = ((self.__width * 2) + (self.__height * 2))
return p<|docstring|>Returns the perimeter of the Rectangle<|endoftext|>
|
05a1f4ad00f1827a46b59e2569905e32c165773f0b520821389e972b50470c12
|
def __str__(self):
'Prints the rectangle made with # signs'
st = ''
if ((self.__width == 0) or (self.__height == 0)):
return st
else:
for h in range(self.__height):
st += str(((str(self.print_symbol) * self.__width) + '\n'))
h += 1
return st[:(- 1)]
|
Prints the rectangle made with # signs
|
0x08-python-more_classes/7-rectangle.py
|
__str__
|
JRodriguez9510/holbertonschool-higher_level_programming-2
| 1 |
python
|
def __str__(self):
st =
if ((self.__width == 0) or (self.__height == 0)):
return st
else:
for h in range(self.__height):
st += str(((str(self.print_symbol) * self.__width) + '\n'))
h += 1
return st[:(- 1)]
|
def __str__(self):
st =
if ((self.__width == 0) or (self.__height == 0)):
return st
else:
for h in range(self.__height):
st += str(((str(self.print_symbol) * self.__width) + '\n'))
h += 1
return st[:(- 1)]<|docstring|>Prints the rectangle made with # signs<|endoftext|>
|
6bd153a65c19446272952256b31f1e06f0331616dfe7699c4a1f6194eb73d6df
|
def __repr__(self):
'returns representation of a rectangle'
return 'Rectangle({:d}, {:d})'.format(self.__width, self.__height)
|
returns representation of a rectangle
|
0x08-python-more_classes/7-rectangle.py
|
__repr__
|
JRodriguez9510/holbertonschool-higher_level_programming-2
| 1 |
python
|
def __repr__(self):
return 'Rectangle({:d}, {:d})'.format(self.__width, self.__height)
|
def __repr__(self):
return 'Rectangle({:d}, {:d})'.format(self.__width, self.__height)<|docstring|>returns representation of a rectangle<|endoftext|>
|
d0175c07cd7e8063c94ca43d9d5550a170913aa607f02aafa02aa1bcc32bf1a4
|
def __del__(self):
'deletes a rectangle'
Rectangle.number_of_instances -= 1
return print('Bye rectangle...')
|
deletes a rectangle
|
0x08-python-more_classes/7-rectangle.py
|
__del__
|
JRodriguez9510/holbertonschool-higher_level_programming-2
| 1 |
python
|
def __del__(self):
Rectangle.number_of_instances -= 1
return print('Bye rectangle...')
|
def __del__(self):
Rectangle.number_of_instances -= 1
return print('Bye rectangle...')<|docstring|>deletes a rectangle<|endoftext|>
|
272396952efcab4dffb6241282b410bc0a3da9b64e21f6c826471882c6a4dd33
|
def generate_pdf():
'his methods generates pdf report of cinemas statistics\n There are informations like: total sum of selled tickets or total income'
pdf_filename = 'cinemas_report.pdf'
c = canvas.Canvas(pdf_filename, pagesize=portrait(A4))
w = defaultPageSize[0]
h = defaultPageSize[1]
c.setFont('Helvetica', 35, leading=None)
c.drawCentredString((w / 2), 740, 'Reports of the Cinemas')
cinemas = list_of_cinemas()
cinemas_stats = cinema_stats()
(cnx, cursor) = create_connection()
for (name, cinema_id) in cinemas:
c.setFontSize(20)
c.drawCentredString((w / 2), 600, name)
c.setFontSize(16)
text = 'Total sum of selled tickets: {}'.format(cinemas_stats[cinema_id][0])
c.drawString((w / 10), 520, text)
text = 'Averrage amount of tickets selled per movie: {}'.format(cinemas_stats[cinema_id][1])
c.drawString((w / 10), 490, text)
text = 'Percentage: {}'.format(cinemas_stats[cinema_id][2])
c.drawString((w / 10), 460, text)
text = 'Total income: {} PLN'.format(cinemas_stats[cinema_id][3])
c.drawString((w / 10), 430, text)
c.showPage()
close_connection(cnx, cursor)
c.save()
return True
|
his methods generates pdf report of cinemas statistics
There are informations like: total sum of selled tickets or total income
|
app_dir/reports.py
|
generate_pdf
|
mkwiatek770/Cinema_Assistant
| 1 |
python
|
def generate_pdf():
'his methods generates pdf report of cinemas statistics\n There are informations like: total sum of selled tickets or total income'
pdf_filename = 'cinemas_report.pdf'
c = canvas.Canvas(pdf_filename, pagesize=portrait(A4))
w = defaultPageSize[0]
h = defaultPageSize[1]
c.setFont('Helvetica', 35, leading=None)
c.drawCentredString((w / 2), 740, 'Reports of the Cinemas')
cinemas = list_of_cinemas()
cinemas_stats = cinema_stats()
(cnx, cursor) = create_connection()
for (name, cinema_id) in cinemas:
c.setFontSize(20)
c.drawCentredString((w / 2), 600, name)
c.setFontSize(16)
text = 'Total sum of selled tickets: {}'.format(cinemas_stats[cinema_id][0])
c.drawString((w / 10), 520, text)
text = 'Averrage amount of tickets selled per movie: {}'.format(cinemas_stats[cinema_id][1])
c.drawString((w / 10), 490, text)
text = 'Percentage: {}'.format(cinemas_stats[cinema_id][2])
c.drawString((w / 10), 460, text)
text = 'Total income: {} PLN'.format(cinemas_stats[cinema_id][3])
c.drawString((w / 10), 430, text)
c.showPage()
close_connection(cnx, cursor)
c.save()
return True
|
def generate_pdf():
'his methods generates pdf report of cinemas statistics\n There are informations like: total sum of selled tickets or total income'
pdf_filename = 'cinemas_report.pdf'
c = canvas.Canvas(pdf_filename, pagesize=portrait(A4))
w = defaultPageSize[0]
h = defaultPageSize[1]
c.setFont('Helvetica', 35, leading=None)
c.drawCentredString((w / 2), 740, 'Reports of the Cinemas')
cinemas = list_of_cinemas()
cinemas_stats = cinema_stats()
(cnx, cursor) = create_connection()
for (name, cinema_id) in cinemas:
c.setFontSize(20)
c.drawCentredString((w / 2), 600, name)
c.setFontSize(16)
text = 'Total sum of selled tickets: {}'.format(cinemas_stats[cinema_id][0])
c.drawString((w / 10), 520, text)
text = 'Averrage amount of tickets selled per movie: {}'.format(cinemas_stats[cinema_id][1])
c.drawString((w / 10), 490, text)
text = 'Percentage: {}'.format(cinemas_stats[cinema_id][2])
c.drawString((w / 10), 460, text)
text = 'Total income: {} PLN'.format(cinemas_stats[cinema_id][3])
c.drawString((w / 10), 430, text)
c.showPage()
close_connection(cnx, cursor)
c.save()
return True<|docstring|>his methods generates pdf report of cinemas statistics
There are informations like: total sum of selled tickets or total income<|endoftext|>
|
c1f6e07570927183ba19df5e6c9460102d4363ae57c194d6ec83eabb380043ee
|
def generate_xls():
'This methods generates xls document of cinemas statistics\n There are informations like: total sum of selled tickets or total income'
xls_filename = 'raport.xls'
(cnx, cursor) = create_connection()
workbook = xlsxwriter.Workbook(xls_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
bold.set_align('center')
money_format = workbook.add_format({'num_format': '#,##0 ZŁ'})
money_format.set_align('center')
worksheet.set_column(1, 0, 40)
worksheet.set_column(1, 1, 40)
worksheet.set_column(1, 2, 40)
worksheet.set_column(1, 3, 40)
cell_format = workbook.add_format()
cell_format.set_align('center')
worksheet.write('A1', 'Cinemas', bold)
worksheet.write('A2', 'Selled tickets', bold)
worksheet.write('A3', 'Tickets per movie', bold)
worksheet.write('A4', 'Total Income', bold)
info = cinema_stats()
row = 0
col = 1
for cinema_id in info:
sql = '\n SELECT name FROM cinemas \n WHERE cinema_id = %s\n '
cursor.execute(sql, (cinema_id,))
cinema_name = cursor.fetchone()[0]
worksheet.write(row, col, cinema_name, bold)
worksheet.write((row + 1), col, info[cinema_id][0], cell_format)
worksheet.write((row + 2), col, info[cinema_id][1], cell_format)
worksheet.write((row + 3), col, info[cinema_id][3], cell_format)
col += 1
close_connection(cnx, cursor)
|
This methods generates xls document of cinemas statistics
There are informations like: total sum of selled tickets or total income
|
app_dir/reports.py
|
generate_xls
|
mkwiatek770/Cinema_Assistant
| 1 |
python
|
def generate_xls():
'This methods generates xls document of cinemas statistics\n There are informations like: total sum of selled tickets or total income'
xls_filename = 'raport.xls'
(cnx, cursor) = create_connection()
workbook = xlsxwriter.Workbook(xls_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
bold.set_align('center')
money_format = workbook.add_format({'num_format': '#,##0 ZŁ'})
money_format.set_align('center')
worksheet.set_column(1, 0, 40)
worksheet.set_column(1, 1, 40)
worksheet.set_column(1, 2, 40)
worksheet.set_column(1, 3, 40)
cell_format = workbook.add_format()
cell_format.set_align('center')
worksheet.write('A1', 'Cinemas', bold)
worksheet.write('A2', 'Selled tickets', bold)
worksheet.write('A3', 'Tickets per movie', bold)
worksheet.write('A4', 'Total Income', bold)
info = cinema_stats()
row = 0
col = 1
for cinema_id in info:
sql = '\n SELECT name FROM cinemas \n WHERE cinema_id = %s\n '
cursor.execute(sql, (cinema_id,))
cinema_name = cursor.fetchone()[0]
worksheet.write(row, col, cinema_name, bold)
worksheet.write((row + 1), col, info[cinema_id][0], cell_format)
worksheet.write((row + 2), col, info[cinema_id][1], cell_format)
worksheet.write((row + 3), col, info[cinema_id][3], cell_format)
col += 1
close_connection(cnx, cursor)
|
def generate_xls():
'This methods generates xls document of cinemas statistics\n There are informations like: total sum of selled tickets or total income'
xls_filename = 'raport.xls'
(cnx, cursor) = create_connection()
workbook = xlsxwriter.Workbook(xls_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
bold.set_align('center')
money_format = workbook.add_format({'num_format': '#,##0 ZŁ'})
money_format.set_align('center')
worksheet.set_column(1, 0, 40)
worksheet.set_column(1, 1, 40)
worksheet.set_column(1, 2, 40)
worksheet.set_column(1, 3, 40)
cell_format = workbook.add_format()
cell_format.set_align('center')
worksheet.write('A1', 'Cinemas', bold)
worksheet.write('A2', 'Selled tickets', bold)
worksheet.write('A3', 'Tickets per movie', bold)
worksheet.write('A4', 'Total Income', bold)
info = cinema_stats()
row = 0
col = 1
for cinema_id in info:
sql = '\n SELECT name FROM cinemas \n WHERE cinema_id = %s\n '
cursor.execute(sql, (cinema_id,))
cinema_name = cursor.fetchone()[0]
worksheet.write(row, col, cinema_name, bold)
worksheet.write((row + 1), col, info[cinema_id][0], cell_format)
worksheet.write((row + 2), col, info[cinema_id][1], cell_format)
worksheet.write((row + 3), col, info[cinema_id][3], cell_format)
col += 1
close_connection(cnx, cursor)<|docstring|>This methods generates xls document of cinemas statistics
There are informations like: total sum of selled tickets or total income<|endoftext|>
|
4f0ae01e08baf81844fc4cdc4110d381045f361d486836e7a5576b702093b0c3
|
def crystallographic_resolution(wavelength, pixel_center_distance, detector_distance):
'\n Returns crystallographic resolution :math:`R_f` (full-period resolution) in unit meter\n\n .. math::\n\n R_f = \\frac{ \\lambda }{ 2 \\sin\\left( \\arctan\\left( \\frac{X}{D} \\right) / 2 \\right) }\n\n Args:\n :wavelength (float): Photon wavelength :math:`\\lambda` in unit meter\n\n :pixel_center_distance (float): Distance :math:`X` between beam center and pixel measured orthogonally with respect to the beam axis. The unit is meter\n \n :detector_distance: Distance :math:`D` between interaction point and detector plane in unit meter\n '
return ((wavelength / 2.0) / numpy.sin((numpy.arctan((pixel_center_distance / detector_distance)) / 2.0)))
|
Returns crystallographic resolution :math:`R_f` (full-period resolution) in unit meter
.. math::
R_f = \frac{ \lambda }{ 2 \sin\left( \arctan\left( \frac{X}{D} \right) / 2 \right) }
Args:
:wavelength (float): Photon wavelength :math:`\lambda` in unit meter
:pixel_center_distance (float): Distance :math:`X` between beam center and pixel measured orthogonally with respect to the beam axis. The unit is meter
:detector_distance: Distance :math:`D` between interaction point and detector plane in unit meter
|
condor/utils/diffraction.py
|
crystallographic_resolution
|
irischang020/condor
| 20 |
python
|
def crystallographic_resolution(wavelength, pixel_center_distance, detector_distance):
'\n Returns crystallographic resolution :math:`R_f` (full-period resolution) in unit meter\n\n .. math::\n\n R_f = \\frac{ \\lambda }{ 2 \\sin\\left( \\arctan\\left( \\frac{X}{D} \\right) / 2 \\right) }\n\n Args:\n :wavelength (float): Photon wavelength :math:`\\lambda` in unit meter\n\n :pixel_center_distance (float): Distance :math:`X` between beam center and pixel measured orthogonally with respect to the beam axis. The unit is meter\n \n :detector_distance: Distance :math:`D` between interaction point and detector plane in unit meter\n '
return ((wavelength / 2.0) / numpy.sin((numpy.arctan((pixel_center_distance / detector_distance)) / 2.0)))
|
def crystallographic_resolution(wavelength, pixel_center_distance, detector_distance):
'\n Returns crystallographic resolution :math:`R_f` (full-period resolution) in unit meter\n\n .. math::\n\n R_f = \\frac{ \\lambda }{ 2 \\sin\\left( \\arctan\\left( \\frac{X}{D} \\right) / 2 \\right) }\n\n Args:\n :wavelength (float): Photon wavelength :math:`\\lambda` in unit meter\n\n :pixel_center_distance (float): Distance :math:`X` between beam center and pixel measured orthogonally with respect to the beam axis. The unit is meter\n \n :detector_distance: Distance :math:`D` between interaction point and detector plane in unit meter\n '
return ((wavelength / 2.0) / numpy.sin((numpy.arctan((pixel_center_distance / detector_distance)) / 2.0)))<|docstring|>Returns crystallographic resolution :math:`R_f` (full-period resolution) in unit meter
.. math::
R_f = \frac{ \lambda }{ 2 \sin\left( \arctan\left( \frac{X}{D} \right) / 2 \right) }
Args:
:wavelength (float): Photon wavelength :math:`\lambda` in unit meter
:pixel_center_distance (float): Distance :math:`X` between beam center and pixel measured orthogonally with respect to the beam axis. The unit is meter
:detector_distance: Distance :math:`D` between interaction point and detector plane in unit meter<|endoftext|>
|
7d8b0d7318e62a88b8cf7a0293237de0dd50b02fe4a07a6bb4b700b662930816
|
def resolution_element(wavelength, pixel_center_distance, detector_distance):
'\n Returns length :math:`R_h` of one resolution element (half-period resolution) in unit meter\n\n .. math::\n\n R_h = \\frac{ \\lambda }{ 4 \\, \\sin\\left( \\arctan \\left( \\frac{X}{D} \\right) / 2 \\right) }\n\n Args:\n :wavelength (float): Photon wavelength :math:`\\lambda` in unit meter\n\n :pixel_center_distance (float): Distance :math:`X` between beam center and pixel measured orthogonally with respect to the beam axis. The unit is meter\n \n :detector_distance: Distance :math:`D` between interaction point and detector plane in unit meter\n '
return (0.5 * crystallographic_resolution(wavelength, pixel_center_distance, detector_distance))
|
Returns length :math:`R_h` of one resolution element (half-period resolution) in unit meter
.. math::
R_h = \frac{ \lambda }{ 4 \, \sin\left( \arctan \left( \frac{X}{D} \right) / 2 \right) }
Args:
:wavelength (float): Photon wavelength :math:`\lambda` in unit meter
:pixel_center_distance (float): Distance :math:`X` between beam center and pixel measured orthogonally with respect to the beam axis. The unit is meter
:detector_distance: Distance :math:`D` between interaction point and detector plane in unit meter
|
condor/utils/diffraction.py
|
resolution_element
|
irischang020/condor
| 20 |
python
|
def resolution_element(wavelength, pixel_center_distance, detector_distance):
'\n Returns length :math:`R_h` of one resolution element (half-period resolution) in unit meter\n\n .. math::\n\n R_h = \\frac{ \\lambda }{ 4 \\, \\sin\\left( \\arctan \\left( \\frac{X}{D} \\right) / 2 \\right) }\n\n Args:\n :wavelength (float): Photon wavelength :math:`\\lambda` in unit meter\n\n :pixel_center_distance (float): Distance :math:`X` between beam center and pixel measured orthogonally with respect to the beam axis. The unit is meter\n \n :detector_distance: Distance :math:`D` between interaction point and detector plane in unit meter\n '
return (0.5 * crystallographic_resolution(wavelength, pixel_center_distance, detector_distance))
|
def resolution_element(wavelength, pixel_center_distance, detector_distance):
'\n Returns length :math:`R_h` of one resolution element (half-period resolution) in unit meter\n\n .. math::\n\n R_h = \\frac{ \\lambda }{ 4 \\, \\sin\\left( \\arctan \\left( \\frac{X}{D} \\right) / 2 \\right) }\n\n Args:\n :wavelength (float): Photon wavelength :math:`\\lambda` in unit meter\n\n :pixel_center_distance (float): Distance :math:`X` between beam center and pixel measured orthogonally with respect to the beam axis. The unit is meter\n \n :detector_distance: Distance :math:`D` between interaction point and detector plane in unit meter\n '
return (0.5 * crystallographic_resolution(wavelength, pixel_center_distance, detector_distance))<|docstring|>Returns length :math:`R_h` of one resolution element (half-period resolution) in unit meter
.. math::
R_h = \frac{ \lambda }{ 4 \, \sin\left( \arctan \left( \frac{X}{D} \right) / 2 \right) }
Args:
:wavelength (float): Photon wavelength :math:`\lambda` in unit meter
:pixel_center_distance (float): Distance :math:`X` between beam center and pixel measured orthogonally with respect to the beam axis. The unit is meter
:detector_distance: Distance :math:`D` between interaction point and detector plane in unit meter<|endoftext|>
|
e572bda57e7879ec7b58b67e7c463802e9464338650165714f85c859ac197aff
|
def nyquist_pixel_size(wavelength, detector_distance, particle_size):
'\n Returns size :math:`p_N` of one Nyquist pixel on the detector in unit meter\n\n .. math::\n \n p_N = \\frac{ D \\lambda }{ d }\n\n Args:\n :wavelength (float): Photon wavelength :math:`\\lambda` in unit meter\n \n :detector_distance (float): Distance :math:`D` between interaction point and detector plane in unit meter\n\n :particle_size (float): Size or characteristic dimension :math:`d` of the particle in unit meter\n '
return ((detector_distance * wavelength) / particle_size)
|
Returns size :math:`p_N` of one Nyquist pixel on the detector in unit meter
.. math::
p_N = \frac{ D \lambda }{ d }
Args:
:wavelength (float): Photon wavelength :math:`\lambda` in unit meter
:detector_distance (float): Distance :math:`D` between interaction point and detector plane in unit meter
:particle_size (float): Size or characteristic dimension :math:`d` of the particle in unit meter
|
condor/utils/diffraction.py
|
nyquist_pixel_size
|
irischang020/condor
| 20 |
python
|
def nyquist_pixel_size(wavelength, detector_distance, particle_size):
'\n Returns size :math:`p_N` of one Nyquist pixel on the detector in unit meter\n\n .. math::\n \n p_N = \\frac{ D \\lambda }{ d }\n\n Args:\n :wavelength (float): Photon wavelength :math:`\\lambda` in unit meter\n \n :detector_distance (float): Distance :math:`D` between interaction point and detector plane in unit meter\n\n :particle_size (float): Size or characteristic dimension :math:`d` of the particle in unit meter\n '
return ((detector_distance * wavelength) / particle_size)
|
def nyquist_pixel_size(wavelength, detector_distance, particle_size):
'\n Returns size :math:`p_N` of one Nyquist pixel on the detector in unit meter\n\n .. math::\n \n p_N = \\frac{ D \\lambda }{ d }\n\n Args:\n :wavelength (float): Photon wavelength :math:`\\lambda` in unit meter\n \n :detector_distance (float): Distance :math:`D` between interaction point and detector plane in unit meter\n\n :particle_size (float): Size or characteristic dimension :math:`d` of the particle in unit meter\n '
return ((detector_distance * wavelength) / particle_size)<|docstring|>Returns size :math:`p_N` of one Nyquist pixel on the detector in unit meter
.. math::
p_N = \frac{ D \lambda }{ d }
Args:
:wavelength (float): Photon wavelength :math:`\lambda` in unit meter
:detector_distance (float): Distance :math:`D` between interaction point and detector plane in unit meter
:particle_size (float): Size or characteristic dimension :math:`d` of the particle in unit meter<|endoftext|>
|
babbc18b295925db62206e1a25d27a36aad5e96518690f85fa475d5ff03ebfc9
|
def polarization_factor(x, y, detector_distance, polarization='ignore'):
'\n Returns polarization factor for a given geometry and polarization\n \n Horizontally polarized:\n\n .. math::\n\n P = \\cos^2\\left(\x07rcsin\\left(\x0crac{x}{\\sqrt{x^2+y^2+D^2}}\right)\right)\n\n Vertically polarized:\n\n .. math::\n\n P = \\cos^2\\left(\x07rcsin\\left(\x0crac{y}{\\sqrt{x^2+y^2+D^2}}\right)\right)\n\n Unpolarized:\n \n P = 0.5\\left(1 + \\cos^2\\left(\x0crac{\\sqrt{x^2+y^2}}{\\sqrt{x^2+y^2+D^2}}\right)^2\right)\n\n Ignore polarization:\n\n .. math::\n \n P = 1\n\n Args:\n :x (float): horizontal pixel coordinate :math:`x` with respect to beam center in unit meter\n\n :y (float): vertical pixel coordinate :math:`y` with respect to beam center in unit meter\n\n :detector_distance (float): detector distance :math:`D` in unit meter\n\n :polarization (str): Type of polarization can be either *vertical*, *horizontal*, *unpolarized*, or *ignore* \n '
if (polarization not in ['ignore', 'vertical', 'horizontal', 'unpolarized']):
log_and_raise_error(logger, ('polarization="%s" is an invalid argument for this function.' % polarization))
return
if (polarization == 'ignore'):
P = 1.0
else:
r = numpy.sqrt((((x ** 2) + (y ** 2)) + (detector_distance ** 2)))
if (polarization == 'vertical'):
P = (numpy.cos(numpy.arcsin((y / r))) ** 2)
elif (polarization == 'horizontal'):
P = (numpy.cos(numpy.arcsin((x / r))) ** 2)
elif (polarization == 'unpolarized'):
P = (0.5 * (1.0 + (numpy.cos(numpy.arcsin((numpy.sqrt(((x ** 2) + (y ** 2))) / r))) ** 2)))
return P
|
Returns polarization factor for a given geometry and polarization
Horizontally polarized:
.. math::
P = \cos^2\left(rcsin\left(rac{x}{\sqrt{x^2+y^2+D^2}}
ight)
ight)
Vertically polarized:
.. math::
P = \cos^2\left(rcsin\left(rac{y}{\sqrt{x^2+y^2+D^2}}
ight)
ight)
Unpolarized:
P = 0.5\left(1 + \cos^2\left(rac{\sqrt{x^2+y^2}}{\sqrt{x^2+y^2+D^2}}
ight)^2
ight)
Ignore polarization:
.. math::
P = 1
Args:
:x (float): horizontal pixel coordinate :math:`x` with respect to beam center in unit meter
:y (float): vertical pixel coordinate :math:`y` with respect to beam center in unit meter
:detector_distance (float): detector distance :math:`D` in unit meter
:polarization (str): Type of polarization can be either *vertical*, *horizontal*, *unpolarized*, or *ignore*
|
condor/utils/diffraction.py
|
polarization_factor
|
irischang020/condor
| 20 |
python
|
def polarization_factor(x, y, detector_distance, polarization='ignore'):
'\n Returns polarization factor for a given geometry and polarization\n \n Horizontally polarized:\n\n .. math::\n\n P = \\cos^2\\left(\x07rcsin\\left(\x0crac{x}{\\sqrt{x^2+y^2+D^2}}\right)\right)\n\n Vertically polarized:\n\n .. math::\n\n P = \\cos^2\\left(\x07rcsin\\left(\x0crac{y}{\\sqrt{x^2+y^2+D^2}}\right)\right)\n\n Unpolarized:\n \n P = 0.5\\left(1 + \\cos^2\\left(\x0crac{\\sqrt{x^2+y^2}}{\\sqrt{x^2+y^2+D^2}}\right)^2\right)\n\n Ignore polarization:\n\n .. math::\n \n P = 1\n\n Args:\n :x (float): horizontal pixel coordinate :math:`x` with respect to beam center in unit meter\n\n :y (float): vertical pixel coordinate :math:`y` with respect to beam center in unit meter\n\n :detector_distance (float): detector distance :math:`D` in unit meter\n\n :polarization (str): Type of polarization can be either *vertical*, *horizontal*, *unpolarized*, or *ignore* \n '
if (polarization not in ['ignore', 'vertical', 'horizontal', 'unpolarized']):
log_and_raise_error(logger, ('polarization="%s" is an invalid argument for this function.' % polarization))
return
if (polarization == 'ignore'):
P = 1.0
else:
r = numpy.sqrt((((x ** 2) + (y ** 2)) + (detector_distance ** 2)))
if (polarization == 'vertical'):
P = (numpy.cos(numpy.arcsin((y / r))) ** 2)
elif (polarization == 'horizontal'):
P = (numpy.cos(numpy.arcsin((x / r))) ** 2)
elif (polarization == 'unpolarized'):
P = (0.5 * (1.0 + (numpy.cos(numpy.arcsin((numpy.sqrt(((x ** 2) + (y ** 2))) / r))) ** 2)))
return P
|
def polarization_factor(x, y, detector_distance, polarization='ignore'):
'\n Returns polarization factor for a given geometry and polarization\n \n Horizontally polarized:\n\n .. math::\n\n P = \\cos^2\\left(\x07rcsin\\left(\x0crac{x}{\\sqrt{x^2+y^2+D^2}}\right)\right)\n\n Vertically polarized:\n\n .. math::\n\n P = \\cos^2\\left(\x07rcsin\\left(\x0crac{y}{\\sqrt{x^2+y^2+D^2}}\right)\right)\n\n Unpolarized:\n \n P = 0.5\\left(1 + \\cos^2\\left(\x0crac{\\sqrt{x^2+y^2}}{\\sqrt{x^2+y^2+D^2}}\right)^2\right)\n\n Ignore polarization:\n\n .. math::\n \n P = 1\n\n Args:\n :x (float): horizontal pixel coordinate :math:`x` with respect to beam center in unit meter\n\n :y (float): vertical pixel coordinate :math:`y` with respect to beam center in unit meter\n\n :detector_distance (float): detector distance :math:`D` in unit meter\n\n :polarization (str): Type of polarization can be either *vertical*, *horizontal*, *unpolarized*, or *ignore* \n '
if (polarization not in ['ignore', 'vertical', 'horizontal', 'unpolarized']):
log_and_raise_error(logger, ('polarization="%s" is an invalid argument for this function.' % polarization))
return
if (polarization == 'ignore'):
P = 1.0
else:
r = numpy.sqrt((((x ** 2) + (y ** 2)) + (detector_distance ** 2)))
if (polarization == 'vertical'):
P = (numpy.cos(numpy.arcsin((y / r))) ** 2)
elif (polarization == 'horizontal'):
P = (numpy.cos(numpy.arcsin((x / r))) ** 2)
elif (polarization == 'unpolarized'):
P = (0.5 * (1.0 + (numpy.cos(numpy.arcsin((numpy.sqrt(((x ** 2) + (y ** 2))) / r))) ** 2)))
return P<|docstring|>Returns polarization factor for a given geometry and polarization
Horizontally polarized:
.. math::
P = \cos^2\left(rcsin\left(rac{x}{\sqrt{x^2+y^2+D^2}}
ight)
ight)
Vertically polarized:
.. math::
P = \cos^2\left(rcsin\left(rac{y}{\sqrt{x^2+y^2+D^2}}
ight)
ight)
Unpolarized:
P = 0.5\left(1 + \cos^2\left(rac{\sqrt{x^2+y^2}}{\sqrt{x^2+y^2+D^2}}
ight)^2
ight)
Ignore polarization:
.. math::
P = 1
Args:
:x (float): horizontal pixel coordinate :math:`x` with respect to beam center in unit meter
:y (float): vertical pixel coordinate :math:`y` with respect to beam center in unit meter
:detector_distance (float): detector distance :math:`D` in unit meter
:polarization (str): Type of polarization can be either *vertical*, *horizontal*, *unpolarized*, or *ignore*<|endoftext|>
|
a031a63d347a3a1f432a371e230aeb6ac619ac38229951f8455ce839bbc15d2f
|
def file_mock(func):
'Create pseudo file object.'
@functools.wraps(func)
def wrapper(*args, **kwargs):
content = '\n'.join((f'{i},{i}' for i in range(6)))
with mock.patch('lib.image_data.open', mock.mock_open(read_data=content)) as f_mock:
func(*args, **kwargs)
return wrapper
|
Create pseudo file object.
|
dito/tests/test_image_data.py
|
file_mock
|
luise-strietzel/slurk-bots
| 0 |
python
|
def file_mock(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
content = '\n'.join((f'{i},{i}' for i in range(6)))
with mock.patch('lib.image_data.open', mock.mock_open(read_data=content)) as f_mock:
func(*args, **kwargs)
return wrapper
|
def file_mock(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
content = '\n'.join((f'{i},{i}' for i in range(6)))
with mock.patch('lib.image_data.open', mock.mock_open(read_data=content)) as f_mock:
func(*args, **kwargs)
return wrapper<|docstring|>Create pseudo file object.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.