body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
1f25df9cc4ad5aef833db5d066fe61924110c6682c1c60b9709ed1fe306ec612 | def _separable_approx2(h, N=1):
' returns the N first approximations to the 2d function h\n whose sum should be h\n '
return np.cumsum([np.outer(fy, fx) for (fy, fx) in _separable_series2(h, N)], 0) | returns the N first approximations to the 2d function h
whose sum should be h | gputools/separable/separable_approx.py | _separable_approx2 | tlambert03/gputools | 89 | python | def _separable_approx2(h, N=1):
' returns the N first approximations to the 2d function h\n whose sum should be h\n '
return np.cumsum([np.outer(fy, fx) for (fy, fx) in _separable_series2(h, N)], 0) | def _separable_approx2(h, N=1):
' returns the N first approximations to the 2d function h\n whose sum should be h\n '
return np.cumsum([np.outer(fy, fx) for (fy, fx) in _separable_series2(h, N)], 0)<|docstring|>returns the N first approximations to the 2d function h
whose sum should be h<|endoftext|> |
27fb3a8d27c5958201c98f6ea08f3e694eacaf5c7db2ad3190b12f3a6a0ca5ea | def _separable_series3(h, N=1, verbose=False):
' finds separable approximations to the 3d kernel h\n returns res = (hx,hy,hz)[N]\n s.t. h \x07pprox sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2])\n\n FIXME: This is just a naive and slow first try!\n '
(hx, hy, hz) = ([], [], [])
res = h.copy()
for i in range(N):
(_hx, _hy, _hz, P) = _splitrank3(res, verbose=verbose)
res -= P
hx.append(_hx)
hy.append(_hy)
hz.append(_hz)
return np.array(list(zip(hx, hy, hz))) | finds separable approximations to the 3d kernel h
returns res = (hx,hy,hz)[N]
s.t. h pprox sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2])
FIXME: This is just a naive and slow first try! | gputools/separable/separable_approx.py | _separable_series3 | tlambert03/gputools | 89 | python | def _separable_series3(h, N=1, verbose=False):
' finds separable approximations to the 3d kernel h\n returns res = (hx,hy,hz)[N]\n s.t. h \x07pprox sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2])\n\n FIXME: This is just a naive and slow first try!\n '
(hx, hy, hz) = ([], [], [])
res = h.copy()
for i in range(N):
(_hx, _hy, _hz, P) = _splitrank3(res, verbose=verbose)
res -= P
hx.append(_hx)
hy.append(_hy)
hz.append(_hz)
return np.array(list(zip(hx, hy, hz))) | def _separable_series3(h, N=1, verbose=False):
' finds separable approximations to the 3d kernel h\n returns res = (hx,hy,hz)[N]\n s.t. h \x07pprox sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2])\n\n FIXME: This is just a naive and slow first try!\n '
(hx, hy, hz) = ([], [], [])
res = h.copy()
for i in range(N):
(_hx, _hy, _hz, P) = _splitrank3(res, verbose=verbose)
res -= P
hx.append(_hx)
hy.append(_hy)
hz.append(_hz)
return np.array(list(zip(hx, hy, hz)))<|docstring|>finds separable approximations to the 3d kernel h
returns res = (hx,hy,hz)[N]
s.t. h pprox sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2])
FIXME: This is just a naive and slow first try!<|endoftext|> |
fdac818e7bbcd366ba8e7eb93a262dbf7656d0e0ac4973a8389c8552bd1e807e | def _separable_approx3(h, N=1):
' returns the N first approximations to the 3d function h\n '
return np.cumsum([np.einsum('i,j,k', fz, fy, fx) for (fz, fy, fx) in _separable_series3(h, N)], 0) | returns the N first approximations to the 3d function h | gputools/separable/separable_approx.py | _separable_approx3 | tlambert03/gputools | 89 | python | def _separable_approx3(h, N=1):
' \n '
return np.cumsum([np.einsum('i,j,k', fz, fy, fx) for (fz, fy, fx) in _separable_series3(h, N)], 0) | def _separable_approx3(h, N=1):
' \n '
return np.cumsum([np.einsum('i,j,k', fz, fy, fx) for (fz, fy, fx) in _separable_series3(h, N)], 0)<|docstring|>returns the N first approximations to the 3d function h<|endoftext|> |
ef72023021582cc035cc4fa3a8d81d7ba4eb4cde5faa34b151a8ba9d108ae84d | def separable_series(h, N=1):
'\n finds the first N rank 1 tensors such that their sum approximates\n the tensor h (2d or 3d) best\n\n returns (e.g. for 3d case) res = (hx,hy,hz)[i]\n\n s.t.\n\n h \x07pprox sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2])\n\n Parameters\n ----------\n h: ndarray\n input array (2 or 2 dimensional)\n N: int\n order of approximation\n\n Returns\n -------\n res, the series of tensors\n res[i] = (hx,hy,hz)[i]\n\n '
if (h.ndim == 2):
return _separable_series2(h, N)
elif (h.ndim == 3):
return _separable_series3(h, N)
else:
raise ValueError(('unsupported array dimension: %s (only 2d or 3d) ' % h.ndim)) | finds the first N rank 1 tensors such that their sum approximates
the tensor h (2d or 3d) best
returns (e.g. for 3d case) res = (hx,hy,hz)[i]
s.t.
h pprox sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2])
Parameters
----------
h: ndarray
input array (2 or 2 dimensional)
N: int
order of approximation
Returns
-------
res, the series of tensors
res[i] = (hx,hy,hz)[i] | gputools/separable/separable_approx.py | separable_series | tlambert03/gputools | 89 | python | def separable_series(h, N=1):
'\n finds the first N rank 1 tensors such that their sum approximates\n the tensor h (2d or 3d) best\n\n returns (e.g. for 3d case) res = (hx,hy,hz)[i]\n\n s.t.\n\n h \x07pprox sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2])\n\n Parameters\n ----------\n h: ndarray\n input array (2 or 2 dimensional)\n N: int\n order of approximation\n\n Returns\n -------\n res, the series of tensors\n res[i] = (hx,hy,hz)[i]\n\n '
if (h.ndim == 2):
return _separable_series2(h, N)
elif (h.ndim == 3):
return _separable_series3(h, N)
else:
raise ValueError(('unsupported array dimension: %s (only 2d or 3d) ' % h.ndim)) | def separable_series(h, N=1):
'\n finds the first N rank 1 tensors such that their sum approximates\n the tensor h (2d or 3d) best\n\n returns (e.g. for 3d case) res = (hx,hy,hz)[i]\n\n s.t.\n\n h \x07pprox sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2])\n\n Parameters\n ----------\n h: ndarray\n input array (2 or 2 dimensional)\n N: int\n order of approximation\n\n Returns\n -------\n res, the series of tensors\n res[i] = (hx,hy,hz)[i]\n\n '
if (h.ndim == 2):
return _separable_series2(h, N)
elif (h.ndim == 3):
return _separable_series3(h, N)
else:
raise ValueError(('unsupported array dimension: %s (only 2d or 3d) ' % h.ndim))<|docstring|>finds the first N rank 1 tensors such that their sum approximates
the tensor h (2d or 3d) best
returns (e.g. for 3d case) res = (hx,hy,hz)[i]
s.t.
h pprox sum_i einsum("i,j,k",res[i,0],res[i,1],res[i,2])
Parameters
----------
h: ndarray
input array (2 or 2 dimensional)
N: int
order of approximation
Returns
-------
res, the series of tensors
res[i] = (hx,hy,hz)[i]<|endoftext|> |
b9d3ae0c656900ca787053e83aba3f79567e44d9f512817db3f79906930ceb9a | def separable_approx(h, N=1):
'\n finds the k-th rank approximation to h, where k = 1..N\n\n similar to separable_series\n\n Parameters\n ----------\n h: ndarray\n input array (2 or 2 dimensional)\n N: int\n order of approximation\n\n Returns\n -------\n all N apprxoimations res[i], the i-th approximation\n\n '
if (h.ndim == 2):
return _separable_approx2(h, N)
elif (h.ndim == 3):
return _separable_approx3(h, N)
else:
raise ValueError(('unsupported array dimension: %s (only 2d or 3d) ' % h.ndim)) | finds the k-th rank approximation to h, where k = 1..N
similar to separable_series
Parameters
----------
h: ndarray
input array (2 or 2 dimensional)
N: int
order of approximation
Returns
-------
all N apprxoimations res[i], the i-th approximation | gputools/separable/separable_approx.py | separable_approx | tlambert03/gputools | 89 | python | def separable_approx(h, N=1):
'\n finds the k-th rank approximation to h, where k = 1..N\n\n similar to separable_series\n\n Parameters\n ----------\n h: ndarray\n input array (2 or 2 dimensional)\n N: int\n order of approximation\n\n Returns\n -------\n all N apprxoimations res[i], the i-th approximation\n\n '
if (h.ndim == 2):
return _separable_approx2(h, N)
elif (h.ndim == 3):
return _separable_approx3(h, N)
else:
raise ValueError(('unsupported array dimension: %s (only 2d or 3d) ' % h.ndim)) | def separable_approx(h, N=1):
'\n finds the k-th rank approximation to h, where k = 1..N\n\n similar to separable_series\n\n Parameters\n ----------\n h: ndarray\n input array (2 or 2 dimensional)\n N: int\n order of approximation\n\n Returns\n -------\n all N apprxoimations res[i], the i-th approximation\n\n '
if (h.ndim == 2):
return _separable_approx2(h, N)
elif (h.ndim == 3):
return _separable_approx3(h, N)
else:
raise ValueError(('unsupported array dimension: %s (only 2d or 3d) ' % h.ndim))<|docstring|>finds the k-th rank approximation to h, where k = 1..N
similar to separable_series
Parameters
----------
h: ndarray
input array (2 or 2 dimensional)
N: int
order of approximation
Returns
-------
all N apprxoimations res[i], the i-th approximation<|endoftext|> |
163b21ffa8db61a92f02adab8635443fa213ad8bd7eab3f6779ea1b3664bba12 | def get_issues_without_due_date(connection):
'Fin Issues where we need to set due_date value'
query = 'SELECT id FROM issues WHERE status IN :statuses AND due_date IS null'
return connection.execute(sa.text(query), statuses=STATUSES).fetchall() | Fin Issues where we need to set due_date value | src/ggrc/migrations/versions/20190412_84c5ff059f75_set_due_date_for_fixed_and_depricated_.py | get_issues_without_due_date | MikalaiMikalalai/ggrc-core | 1 | python | def get_issues_without_due_date(connection):
query = 'SELECT id FROM issues WHERE status IN :statuses AND due_date IS null'
return connection.execute(sa.text(query), statuses=STATUSES).fetchall() | def get_issues_without_due_date(connection):
query = 'SELECT id FROM issues WHERE status IN :statuses AND due_date IS null'
return connection.execute(sa.text(query), statuses=STATUSES).fetchall()<|docstring|>Fin Issues where we need to set due_date value<|endoftext|> |
04b9f3db00ccc73d4b28d7a5f237039ec71f5f863b763a103b6136fb19deaae6 | def get_revision_due_date(con, issue_id):
'Fund due_date value in related revision'
query = "SELECT content, created_at FROM revisions WHERE resource_type = 'Issue' AND resource_id = :id ORDER BY id DESC"
all_revisions = con.execute(sa.text(query), id=issue_id)
result = None
last_status = None
for rev in all_revisions:
if (not result):
last_status = json.loads(rev['content'])['status']
elif (json.loads(rev['content'])['status'] != last_status):
break
result = rev['created_at']
return result | Fund due_date value in related revision | src/ggrc/migrations/versions/20190412_84c5ff059f75_set_due_date_for_fixed_and_depricated_.py | get_revision_due_date | MikalaiMikalalai/ggrc-core | 1 | python | def get_revision_due_date(con, issue_id):
query = "SELECT content, created_at FROM revisions WHERE resource_type = 'Issue' AND resource_id = :id ORDER BY id DESC"
all_revisions = con.execute(sa.text(query), id=issue_id)
result = None
last_status = None
for rev in all_revisions:
if (not result):
last_status = json.loads(rev['content'])['status']
elif (json.loads(rev['content'])['status'] != last_status):
break
result = rev['created_at']
return result | def get_revision_due_date(con, issue_id):
query = "SELECT content, created_at FROM revisions WHERE resource_type = 'Issue' AND resource_id = :id ORDER BY id DESC"
all_revisions = con.execute(sa.text(query), id=issue_id)
result = None
last_status = None
for rev in all_revisions:
if (not result):
last_status = json.loads(rev['content'])['status']
elif (json.loads(rev['content'])['status'] != last_status):
break
result = rev['created_at']
return result<|docstring|>Fund due_date value in related revision<|endoftext|> |
1622720df8bc0abf91c1e03d02865576725065783f0a57d49a15ddb10d608f12 | def upgrade():
'Upgrade database schema and/or data, creating a new revision.'
connection = op.get_bind()
issues_for_update = get_issues_without_due_date(connection)
issues_ids = [issue['id'] for issue in issues_for_update]
for issue_id in issues_ids:
due_date = get_revision_due_date(connection, issue_id)
set_due_date(connection, issue_id, due_date)
utils.add_to_objects_without_revisions_bulk(connection, issues_ids, 'Issue', 'modified') | Upgrade database schema and/or data, creating a new revision. | src/ggrc/migrations/versions/20190412_84c5ff059f75_set_due_date_for_fixed_and_depricated_.py | upgrade | MikalaiMikalalai/ggrc-core | 1 | python | def upgrade():
connection = op.get_bind()
issues_for_update = get_issues_without_due_date(connection)
issues_ids = [issue['id'] for issue in issues_for_update]
for issue_id in issues_ids:
due_date = get_revision_due_date(connection, issue_id)
set_due_date(connection, issue_id, due_date)
utils.add_to_objects_without_revisions_bulk(connection, issues_ids, 'Issue', 'modified') | def upgrade():
connection = op.get_bind()
issues_for_update = get_issues_without_due_date(connection)
issues_ids = [issue['id'] for issue in issues_for_update]
for issue_id in issues_ids:
due_date = get_revision_due_date(connection, issue_id)
set_due_date(connection, issue_id, due_date)
utils.add_to_objects_without_revisions_bulk(connection, issues_ids, 'Issue', 'modified')<|docstring|>Upgrade database schema and/or data, creating a new revision.<|endoftext|> |
25eb65cb2baefeaff9ce12a6638cc9c687d20f629c8691da947804dff60199e8 | def downgrade():
'Downgrade database schema and/or data back to the previous revision.'
raise NotImplementedError('Downgrade is not supported') | Downgrade database schema and/or data back to the previous revision. | src/ggrc/migrations/versions/20190412_84c5ff059f75_set_due_date_for_fixed_and_depricated_.py | downgrade | MikalaiMikalalai/ggrc-core | 1 | python | def downgrade():
raise NotImplementedError('Downgrade is not supported') | def downgrade():
raise NotImplementedError('Downgrade is not supported')<|docstring|>Downgrade database schema and/or data back to the previous revision.<|endoftext|> |
1586f3ebb7740132e8b5d4cf628a6afd1c53939eff485661daeb5c604d3b1789 | def earth_distance(pos1, pos2):
'Taken from http://www.johndcook.com/python_longitude_latitude.html.'
(lat1, long1) = pos1
(lat2, long2) = pos2
degrees_to_radians = (pi / 180.0)
phi1 = ((90.0 - lat1) * degrees_to_radians)
phi2 = ((90.0 - lat2) * degrees_to_radians)
theta1 = (long1 * degrees_to_radians)
theta2 = (long2 * degrees_to_radians)
c = (((sin(phi1) * sin(phi2)) * cos((theta1 - theta2))) + (cos(phi1) * cos(phi2)))
arc = acos(c)
return (arc * 6373) | Taken from http://www.johndcook.com/python_longitude_latitude.html. | workshops/util.py | earth_distance | r-gaia-cs/swc-amy | 0 | python | def earth_distance(pos1, pos2):
(lat1, long1) = pos1
(lat2, long2) = pos2
degrees_to_radians = (pi / 180.0)
phi1 = ((90.0 - lat1) * degrees_to_radians)
phi2 = ((90.0 - lat2) * degrees_to_radians)
theta1 = (long1 * degrees_to_radians)
theta2 = (long2 * degrees_to_radians)
c = (((sin(phi1) * sin(phi2)) * cos((theta1 - theta2))) + (cos(phi1) * cos(phi2)))
arc = acos(c)
return (arc * 6373) | def earth_distance(pos1, pos2):
(lat1, long1) = pos1
(lat2, long2) = pos2
degrees_to_radians = (pi / 180.0)
phi1 = ((90.0 - lat1) * degrees_to_radians)
phi2 = ((90.0 - lat2) * degrees_to_radians)
theta1 = (long1 * degrees_to_radians)
theta2 = (long2 * degrees_to_radians)
c = (((sin(phi1) * sin(phi2)) * cos((theta1 - theta2))) + (cos(phi1) * cos(phi2)))
arc = acos(c)
return (arc * 6373)<|docstring|>Taken from http://www.johndcook.com/python_longitude_latitude.html.<|endoftext|> |
4c1f801cc9d746c79489c4df1c8c0de47c039a917ca6db36b7b1119d33e9e17d | def upload_person_task_csv(stream):
'Read people from CSV and return a JSON-serializable list of dicts.\n\n The input `stream` should be a file-like object that returns\n Unicode data.\n\n "Serializability" is required because we put this data into session. See\n https://docs.djangoproject.com/en/1.7/topics/http/sessions/ for details.\n\n Also return a list of fields from Person.PERSON_UPLOAD_FIELDS for which\n no data was given.\n '
result = []
reader = csv.DictReader(stream)
empty_fields = set()
for row in reader:
entry = {}
for col in Person.PERSON_UPLOAD_FIELDS:
if (col in row):
entry[col] = row[col].strip()
else:
entry[col] = None
empty_fields.add(col)
for col in Person.PERSON_TASK_EXTRA_FIELDS:
entry[col] = row.get(col, None)
entry['errors'] = None
result.append(entry)
return (result, list(empty_fields)) | Read people from CSV and return a JSON-serializable list of dicts.
The input `stream` should be a file-like object that returns
Unicode data.
"Serializability" is required because we put this data into session. See
https://docs.djangoproject.com/en/1.7/topics/http/sessions/ for details.
Also return a list of fields from Person.PERSON_UPLOAD_FIELDS for which
no data was given. | workshops/util.py | upload_person_task_csv | r-gaia-cs/swc-amy | 0 | python | def upload_person_task_csv(stream):
'Read people from CSV and return a JSON-serializable list of dicts.\n\n The input `stream` should be a file-like object that returns\n Unicode data.\n\n "Serializability" is required because we put this data into session. See\n https://docs.djangoproject.com/en/1.7/topics/http/sessions/ for details.\n\n Also return a list of fields from Person.PERSON_UPLOAD_FIELDS for which\n no data was given.\n '
result = []
reader = csv.DictReader(stream)
empty_fields = set()
for row in reader:
entry = {}
for col in Person.PERSON_UPLOAD_FIELDS:
if (col in row):
entry[col] = row[col].strip()
else:
entry[col] = None
empty_fields.add(col)
for col in Person.PERSON_TASK_EXTRA_FIELDS:
entry[col] = row.get(col, None)
entry['errors'] = None
result.append(entry)
return (result, list(empty_fields)) | def upload_person_task_csv(stream):
'Read people from CSV and return a JSON-serializable list of dicts.\n\n The input `stream` should be a file-like object that returns\n Unicode data.\n\n "Serializability" is required because we put this data into session. See\n https://docs.djangoproject.com/en/1.7/topics/http/sessions/ for details.\n\n Also return a list of fields from Person.PERSON_UPLOAD_FIELDS for which\n no data was given.\n '
result = []
reader = csv.DictReader(stream)
empty_fields = set()
for row in reader:
entry = {}
for col in Person.PERSON_UPLOAD_FIELDS:
if (col in row):
entry[col] = row[col].strip()
else:
entry[col] = None
empty_fields.add(col)
for col in Person.PERSON_TASK_EXTRA_FIELDS:
entry[col] = row.get(col, None)
entry['errors'] = None
result.append(entry)
return (result, list(empty_fields))<|docstring|>Read people from CSV and return a JSON-serializable list of dicts.
The input `stream` should be a file-like object that returns
Unicode data.
"Serializability" is required because we put this data into session. See
https://docs.djangoproject.com/en/1.7/topics/http/sessions/ for details.
Also return a list of fields from Person.PERSON_UPLOAD_FIELDS for which
no data was given.<|endoftext|> |
e7c8d42fba7cf97e28e99e3db4193ebb2056f6dbd0e290fdbb62634f761cd6eb | def verify_upload_person_task(data):
'\n Verify that uploaded data is correct. Show errors by populating ``errors``\n dictionary item. This function changes ``data`` in place.\n '
errors_occur = False
for item in data:
errors = []
event = item.get('event', None)
if event:
try:
Event.objects.get(slug=event)
except Event.DoesNotExist:
errors.append(u'Event with slug {0} does not exist.'.format(event))
role = item.get('role', None)
if role:
try:
Role.objects.get(name=role)
except Role.DoesNotExist:
errors.append(u'Role with name {0} does not exist.'.format(role))
except Role.MultipleObjectsReturned:
errors.append(u'More than one role named {0} exists.'.format(role))
email = item.get('email', None)
personal = item.get('personal', None)
middle = item.get('middle', None)
family = item.get('family', None)
person = None
if email:
try:
person = Person.objects.get(email__iexact=email)
assert (person.personal == personal)
assert (person.middle == middle)
assert (person.family == family)
except Person.DoesNotExist:
pass
except AssertionError:
errors.append("Personal, middle or family name of existing user don't match: {0} vs {1}, {2} vs {3}, {4} vs {5}".format(personal, person.personal, middle, person.middle, family, person.family))
if person:
if (not any([event, role])):
errors.append('User exists but no event and role to assign the user to was provided')
else:
try:
Task.objects.get(event__slug=event, role__name=role, person=person)
except Task.DoesNotExist:
pass
else:
errors.append('Existing person {2} already has role {0} in event {1}'.format(role, event, person))
if ((event and (not role)) or (role and (not event))):
errors.append('Must have both or either of event ({0}) and role ({1})'.format(event, role))
if errors:
errors_occur = True
item['errors'] = errors
return errors_occur | Verify that uploaded data is correct. Show errors by populating ``errors``
dictionary item. This function changes ``data`` in place. | workshops/util.py | verify_upload_person_task | r-gaia-cs/swc-amy | 0 | python | def verify_upload_person_task(data):
'\n Verify that uploaded data is correct. Show errors by populating ``errors``\n dictionary item. This function changes ``data`` in place.\n '
errors_occur = False
for item in data:
errors = []
event = item.get('event', None)
if event:
try:
Event.objects.get(slug=event)
except Event.DoesNotExist:
errors.append(u'Event with slug {0} does not exist.'.format(event))
role = item.get('role', None)
if role:
try:
Role.objects.get(name=role)
except Role.DoesNotExist:
errors.append(u'Role with name {0} does not exist.'.format(role))
except Role.MultipleObjectsReturned:
errors.append(u'More than one role named {0} exists.'.format(role))
email = item.get('email', None)
personal = item.get('personal', None)
middle = item.get('middle', None)
family = item.get('family', None)
person = None
if email:
try:
person = Person.objects.get(email__iexact=email)
assert (person.personal == personal)
assert (person.middle == middle)
assert (person.family == family)
except Person.DoesNotExist:
pass
except AssertionError:
errors.append("Personal, middle or family name of existing user don't match: {0} vs {1}, {2} vs {3}, {4} vs {5}".format(personal, person.personal, middle, person.middle, family, person.family))
if person:
if (not any([event, role])):
errors.append('User exists but no event and role to assign the user to was provided')
else:
try:
Task.objects.get(event__slug=event, role__name=role, person=person)
except Task.DoesNotExist:
pass
else:
errors.append('Existing person {2} already has role {0} in event {1}'.format(role, event, person))
if ((event and (not role)) or (role and (not event))):
errors.append('Must have both or either of event ({0}) and role ({1})'.format(event, role))
if errors:
errors_occur = True
item['errors'] = errors
return errors_occur | def verify_upload_person_task(data):
'\n Verify that uploaded data is correct. Show errors by populating ``errors``\n dictionary item. This function changes ``data`` in place.\n '
errors_occur = False
for item in data:
errors = []
event = item.get('event', None)
if event:
try:
Event.objects.get(slug=event)
except Event.DoesNotExist:
errors.append(u'Event with slug {0} does not exist.'.format(event))
role = item.get('role', None)
if role:
try:
Role.objects.get(name=role)
except Role.DoesNotExist:
errors.append(u'Role with name {0} does not exist.'.format(role))
except Role.MultipleObjectsReturned:
errors.append(u'More than one role named {0} exists.'.format(role))
email = item.get('email', None)
personal = item.get('personal', None)
middle = item.get('middle', None)
family = item.get('family', None)
person = None
if email:
try:
person = Person.objects.get(email__iexact=email)
assert (person.personal == personal)
assert (person.middle == middle)
assert (person.family == family)
except Person.DoesNotExist:
pass
except AssertionError:
errors.append("Personal, middle or family name of existing user don't match: {0} vs {1}, {2} vs {3}, {4} vs {5}".format(personal, person.personal, middle, person.middle, family, person.family))
if person:
if (not any([event, role])):
errors.append('User exists but no event and role to assign the user to was provided')
else:
try:
Task.objects.get(event__slug=event, role__name=role, person=person)
except Task.DoesNotExist:
pass
else:
errors.append('Existing person {2} already has role {0} in event {1}'.format(role, event, person))
if ((event and (not role)) or (role and (not event))):
errors.append('Must have both or either of event ({0}) and role ({1})'.format(event, role))
if errors:
errors_occur = True
item['errors'] = errors
return errors_occur<|docstring|>Verify that uploaded data is correct. Show errors by populating ``errors``
dictionary item. This function changes ``data`` in place.<|endoftext|> |
b292523a1dece95f277620c18638cb0d3a86154763ad6e585ff34276833b45d1 | def create_uploaded_persons_tasks(data):
'\n Create persons and tasks from upload data.\n '
if any([row.get('errors') for row in data]):
raise InternalError('Uploaded data contains errors, cancelling upload')
persons_created = []
tasks_created = []
with transaction.atomic():
for row in data:
try:
fields = {key: row[key] for key in Person.PERSON_UPLOAD_FIELDS}
fields['username'] = create_username(row['personal'], row['family'])
if fields['email']:
(p, created) = Person.objects.get_or_create(email=fields['email'], defaults=fields)
if created:
persons_created.append(p)
else:
p = Person(**fields)
p.save()
persons_created.append(p)
if (row['event'] and row['role']):
e = Event.objects.get(slug=row['event'])
r = Role.objects.get(name=row['role'])
t = Task(person=p, event=e, role=r)
t.save()
tasks_created.append(t)
except IntegrityError as e:
raise IntegrityError('{0} (for {1})'.format(str(e), row))
except ObjectDoesNotExist as e:
raise ObjectDoesNotExist('{0} (for {1})'.format(str(e), row))
return (persons_created, tasks_created) | Create persons and tasks from upload data. | workshops/util.py | create_uploaded_persons_tasks | r-gaia-cs/swc-amy | 0 | python | def create_uploaded_persons_tasks(data):
'\n \n '
if any([row.get('errors') for row in data]):
raise InternalError('Uploaded data contains errors, cancelling upload')
persons_created = []
tasks_created = []
with transaction.atomic():
for row in data:
try:
fields = {key: row[key] for key in Person.PERSON_UPLOAD_FIELDS}
fields['username'] = create_username(row['personal'], row['family'])
if fields['email']:
(p, created) = Person.objects.get_or_create(email=fields['email'], defaults=fields)
if created:
persons_created.append(p)
else:
p = Person(**fields)
p.save()
persons_created.append(p)
if (row['event'] and row['role']):
e = Event.objects.get(slug=row['event'])
r = Role.objects.get(name=row['role'])
t = Task(person=p, event=e, role=r)
t.save()
tasks_created.append(t)
except IntegrityError as e:
raise IntegrityError('{0} (for {1})'.format(str(e), row))
except ObjectDoesNotExist as e:
raise ObjectDoesNotExist('{0} (for {1})'.format(str(e), row))
return (persons_created, tasks_created) | def create_uploaded_persons_tasks(data):
'\n \n '
if any([row.get('errors') for row in data]):
raise InternalError('Uploaded data contains errors, cancelling upload')
persons_created = []
tasks_created = []
with transaction.atomic():
for row in data:
try:
fields = {key: row[key] for key in Person.PERSON_UPLOAD_FIELDS}
fields['username'] = create_username(row['personal'], row['family'])
if fields['email']:
(p, created) = Person.objects.get_or_create(email=fields['email'], defaults=fields)
if created:
persons_created.append(p)
else:
p = Person(**fields)
p.save()
persons_created.append(p)
if (row['event'] and row['role']):
e = Event.objects.get(slug=row['event'])
r = Role.objects.get(name=row['role'])
t = Task(person=p, event=e, role=r)
t.save()
tasks_created.append(t)
except IntegrityError as e:
raise IntegrityError('{0} (for {1})'.format(str(e), row))
except ObjectDoesNotExist as e:
raise ObjectDoesNotExist('{0} (for {1})'.format(str(e), row))
return (persons_created, tasks_created)<|docstring|>Create persons and tasks from upload data.<|endoftext|> |
ebcee3b5878a4143c5a46fb858cf0618d35c7cc981c6740c3819361819d95afb | def create_username(personal, family):
'Generate unique username.'
stem = ((normalize_name(family) + '.') + normalize_name(personal))
counter = None
while True:
try:
if (counter is None):
username = stem
counter = 1
else:
counter += 1
username = '{0}.{1}'.format(stem, counter)
Person.objects.get(username=username)
except ObjectDoesNotExist:
break
if any([(ord(c) >= 128) for c in username]):
raise InternalError('Normalized username still contains non-normal characters "{0}"'.format(username))
return username | Generate unique username. | workshops/util.py | create_username | r-gaia-cs/swc-amy | 0 | python | def create_username(personal, family):
stem = ((normalize_name(family) + '.') + normalize_name(personal))
counter = None
while True:
try:
if (counter is None):
username = stem
counter = 1
else:
counter += 1
username = '{0}.{1}'.format(stem, counter)
Person.objects.get(username=username)
except ObjectDoesNotExist:
break
if any([(ord(c) >= 128) for c in username]):
raise InternalError('Normalized username still contains non-normal characters "{0}"'.format(username))
return username | def create_username(personal, family):
stem = ((normalize_name(family) + '.') + normalize_name(personal))
counter = None
while True:
try:
if (counter is None):
username = stem
counter = 1
else:
counter += 1
username = '{0}.{1}'.format(stem, counter)
Person.objects.get(username=username)
except ObjectDoesNotExist:
break
if any([(ord(c) >= 128) for c in username]):
raise InternalError('Normalized username still contains non-normal characters "{0}"'.format(username))
return username<|docstring|>Generate unique username.<|endoftext|> |
03759732926be232300efc18809c48f0041b3519aeb9a6d824b3167d2ca10d81 | def normalize_name(name):
'Get rid of spaces, funky characters, etc.'
name = name.strip()
for (accented, flat) in [(' ', '-')]:
name = name.replace(accented, flat)
return name.lower() | Get rid of spaces, funky characters, etc. | workshops/util.py | normalize_name | r-gaia-cs/swc-amy | 0 | python | def normalize_name(name):
name = name.strip()
for (accented, flat) in [(' ', '-')]:
name = name.replace(accented, flat)
return name.lower() | def normalize_name(name):
name = name.strip()
for (accented, flat) in [(' ', '-')]:
name = name.replace(accented, flat)
return name.lower()<|docstring|>Get rid of spaces, funky characters, etc.<|endoftext|> |
160e9806d43c9b12f63247f5804003a93aa4907edf095fd2ee830254ef48794a | def train(train_dataloader, query_dataloader, retrieval_dataloader, arch, feature_dim, code_length, num_classes, dynamic_meta_embedding, num_prototypes, device, lr, max_iter, beta, gamma, mapping, topk, evaluate_interval):
'\n Training model.\n\n Args\n train_dataloader, query_dataloader, retrieval_dataloader(torch.utils.data.dataloader.DataLoader): Data loader.\n arch(str): CNN model name.\n code_length(int): Hash code length.\n device(torch.device): GPU or CPU.\n lr(float): Learning rate.\n max_iter(int): Number of iterations.\n alpha(float): Hyper-parameters.\n topk(int): Compute top k map.\n evaluate_interval(int): Interval of evaluation.\n\n Returns\n checkpoint(dict): Checkpoint.\n '
model = load_model(arch, feature_dim, code_length, num_classes, num_prototypes).to(device)
criterion = LTHNetLoss()
optimizer = optim.RMSprop(model.parameters(), lr=lr, weight_decay=0.0005)
scheduler = CosineAnnealingLR(optimizer, max_iter, (lr / 100))
running_loss = 0.0
best_map = 0.0
training_time = 0.0
prototypes = torch.zeros([num_prototypes, feature_dim])
prototypes = prototypes.to(device)
for it in range(max_iter):
prototypes = generate_prototypes(model, train_dataloader, num_prototypes, feature_dim, device, dynamic_meta_embedding, prototypes)
prototypes = prototypes.to(device)
model.train()
tic = time.time()
for (data, targets, index) in train_dataloader:
(data, targets, index) = (data.to(device), targets.to(device), index.to(device))
optimizer.zero_grad()
(hashcodes, assignments, _) = model(data, dynamic_meta_embedding, prototypes)
loss = criterion(hashcodes, assignments, targets, device, beta, gamma, mapping, it, max_iter)
running_loss = (running_loss + loss.item())
loss.backward()
optimizer.step()
scheduler.step()
training_time = (time.time() - tic)
if ((it % evaluate_interval) == (evaluate_interval - 1)):
(query_code, query_assignment) = generate_code(model, query_dataloader, code_length, num_classes, device, dynamic_meta_embedding, prototypes)
(retrieval_code, retrieval_assignment) = generate_code(model, retrieval_dataloader, code_length, num_classes, device, dynamic_meta_embedding, prototypes)
query_targets = query_dataloader.dataset.get_onehot_targets()
retrieval_targets = retrieval_dataloader.dataset.get_onehot_targets()
mAP = mean_average_precision(query_code.to(device), retrieval_code.to(device), query_targets.to(device), retrieval_targets.to(device), device, topk)
logger.info('[iter:{}/{}][loss:{:.2f}][map:{:.4f}][time:{:.2f}]'.format((it + 1), max_iter, (running_loss / evaluate_interval), mAP, training_time))
running_loss = 0.0
if (best_map < mAP):
best_map = mAP
checkpoint = {'model': model.state_dict(), 'qB': query_code.cpu(), 'rB': retrieval_code.cpu(), 'qL': query_targets.cpu(), 'rL': retrieval_targets.cpu(), 'qAssignment': query_assignment.cpu(), 'rAssignment': retrieval_assignment.cpu(), 'map': best_map, 'prototypes': prototypes.cpu(), 'beta': beta, 'gamma': gamma, 'mapping': mapping}
return checkpoint | Training model.
Args
train_dataloader, query_dataloader, retrieval_dataloader(torch.utils.data.dataloader.DataLoader): Data loader.
arch(str): CNN model name.
code_length(int): Hash code length.
device(torch.device): GPU or CPU.
lr(float): Learning rate.
max_iter(int): Number of iterations.
alpha(float): Hyper-parameters.
topk(int): Compute top k map.
evaluate_interval(int): Interval of evaluation.
Returns
checkpoint(dict): Checkpoint. | lthNet.py | train | butterfly-chinese/long-tail-hashing | 6 | python | def train(train_dataloader, query_dataloader, retrieval_dataloader, arch, feature_dim, code_length, num_classes, dynamic_meta_embedding, num_prototypes, device, lr, max_iter, beta, gamma, mapping, topk, evaluate_interval):
'\n Training model.\n\n Args\n train_dataloader, query_dataloader, retrieval_dataloader(torch.utils.data.dataloader.DataLoader): Data loader.\n arch(str): CNN model name.\n code_length(int): Hash code length.\n device(torch.device): GPU or CPU.\n lr(float): Learning rate.\n max_iter(int): Number of iterations.\n alpha(float): Hyper-parameters.\n topk(int): Compute top k map.\n evaluate_interval(int): Interval of evaluation.\n\n Returns\n checkpoint(dict): Checkpoint.\n '
model = load_model(arch, feature_dim, code_length, num_classes, num_prototypes).to(device)
criterion = LTHNetLoss()
optimizer = optim.RMSprop(model.parameters(), lr=lr, weight_decay=0.0005)
scheduler = CosineAnnealingLR(optimizer, max_iter, (lr / 100))
running_loss = 0.0
best_map = 0.0
training_time = 0.0
prototypes = torch.zeros([num_prototypes, feature_dim])
prototypes = prototypes.to(device)
for it in range(max_iter):
prototypes = generate_prototypes(model, train_dataloader, num_prototypes, feature_dim, device, dynamic_meta_embedding, prototypes)
prototypes = prototypes.to(device)
model.train()
tic = time.time()
for (data, targets, index) in train_dataloader:
(data, targets, index) = (data.to(device), targets.to(device), index.to(device))
optimizer.zero_grad()
(hashcodes, assignments, _) = model(data, dynamic_meta_embedding, prototypes)
loss = criterion(hashcodes, assignments, targets, device, beta, gamma, mapping, it, max_iter)
running_loss = (running_loss + loss.item())
loss.backward()
optimizer.step()
scheduler.step()
training_time = (time.time() - tic)
if ((it % evaluate_interval) == (evaluate_interval - 1)):
(query_code, query_assignment) = generate_code(model, query_dataloader, code_length, num_classes, device, dynamic_meta_embedding, prototypes)
(retrieval_code, retrieval_assignment) = generate_code(model, retrieval_dataloader, code_length, num_classes, device, dynamic_meta_embedding, prototypes)
query_targets = query_dataloader.dataset.get_onehot_targets()
retrieval_targets = retrieval_dataloader.dataset.get_onehot_targets()
mAP = mean_average_precision(query_code.to(device), retrieval_code.to(device), query_targets.to(device), retrieval_targets.to(device), device, topk)
logger.info('[iter:{}/{}][loss:{:.2f}][map:{:.4f}][time:{:.2f}]'.format((it + 1), max_iter, (running_loss / evaluate_interval), mAP, training_time))
running_loss = 0.0
if (best_map < mAP):
best_map = mAP
checkpoint = {'model': model.state_dict(), 'qB': query_code.cpu(), 'rB': retrieval_code.cpu(), 'qL': query_targets.cpu(), 'rL': retrieval_targets.cpu(), 'qAssignment': query_assignment.cpu(), 'rAssignment': retrieval_assignment.cpu(), 'map': best_map, 'prototypes': prototypes.cpu(), 'beta': beta, 'gamma': gamma, 'mapping': mapping}
return checkpoint | def train(train_dataloader, query_dataloader, retrieval_dataloader, arch, feature_dim, code_length, num_classes, dynamic_meta_embedding, num_prototypes, device, lr, max_iter, beta, gamma, mapping, topk, evaluate_interval):
'\n Training model.\n\n Args\n train_dataloader, query_dataloader, retrieval_dataloader(torch.utils.data.dataloader.DataLoader): Data loader.\n arch(str): CNN model name.\n code_length(int): Hash code length.\n device(torch.device): GPU or CPU.\n lr(float): Learning rate.\n max_iter(int): Number of iterations.\n alpha(float): Hyper-parameters.\n topk(int): Compute top k map.\n evaluate_interval(int): Interval of evaluation.\n\n Returns\n checkpoint(dict): Checkpoint.\n '
model = load_model(arch, feature_dim, code_length, num_classes, num_prototypes).to(device)
criterion = LTHNetLoss()
optimizer = optim.RMSprop(model.parameters(), lr=lr, weight_decay=0.0005)
scheduler = CosineAnnealingLR(optimizer, max_iter, (lr / 100))
running_loss = 0.0
best_map = 0.0
training_time = 0.0
prototypes = torch.zeros([num_prototypes, feature_dim])
prototypes = prototypes.to(device)
for it in range(max_iter):
prototypes = generate_prototypes(model, train_dataloader, num_prototypes, feature_dim, device, dynamic_meta_embedding, prototypes)
prototypes = prototypes.to(device)
model.train()
tic = time.time()
for (data, targets, index) in train_dataloader:
(data, targets, index) = (data.to(device), targets.to(device), index.to(device))
optimizer.zero_grad()
(hashcodes, assignments, _) = model(data, dynamic_meta_embedding, prototypes)
loss = criterion(hashcodes, assignments, targets, device, beta, gamma, mapping, it, max_iter)
running_loss = (running_loss + loss.item())
loss.backward()
optimizer.step()
scheduler.step()
training_time = (time.time() - tic)
if ((it % evaluate_interval) == (evaluate_interval - 1)):
(query_code, query_assignment) = generate_code(model, query_dataloader, code_length, num_classes, device, dynamic_meta_embedding, prototypes)
(retrieval_code, retrieval_assignment) = generate_code(model, retrieval_dataloader, code_length, num_classes, device, dynamic_meta_embedding, prototypes)
query_targets = query_dataloader.dataset.get_onehot_targets()
retrieval_targets = retrieval_dataloader.dataset.get_onehot_targets()
mAP = mean_average_precision(query_code.to(device), retrieval_code.to(device), query_targets.to(device), retrieval_targets.to(device), device, topk)
logger.info('[iter:{}/{}][loss:{:.2f}][map:{:.4f}][time:{:.2f}]'.format((it + 1), max_iter, (running_loss / evaluate_interval), mAP, training_time))
running_loss = 0.0
if (best_map < mAP):
best_map = mAP
checkpoint = {'model': model.state_dict(), 'qB': query_code.cpu(), 'rB': retrieval_code.cpu(), 'qL': query_targets.cpu(), 'rL': retrieval_targets.cpu(), 'qAssignment': query_assignment.cpu(), 'rAssignment': retrieval_assignment.cpu(), 'map': best_map, 'prototypes': prototypes.cpu(), 'beta': beta, 'gamma': gamma, 'mapping': mapping}
return checkpoint<|docstring|>Training model.
Args
train_dataloader, query_dataloader, retrieval_dataloader(torch.utils.data.dataloader.DataLoader): Data loader.
arch(str): CNN model name.
code_length(int): Hash code length.
device(torch.device): GPU or CPU.
lr(float): Learning rate.
max_iter(int): Number of iterations.
alpha(float): Hyper-parameters.
topk(int): Compute top k map.
evaluate_interval(int): Interval of evaluation.
Returns
checkpoint(dict): Checkpoint.<|endoftext|> |
cdf907274e6955859636f039b6d362fa76254762c93e3f32b0ee965af4a4bb00 | def generate_code(model, dataloader, code_length, num_classes, device, dynamic_meta_embedding, prototypes):
'\n Generate hash code\n\n Args\n dataloader(torch.utils.data.dataloader.DataLoader): Data loader.\n code_length(int): Hash code length.\n device(torch.device): Using gpu or cpu.\n\n Returns\n code(torch.Tensor): Hash code.\n '
model.eval()
with torch.no_grad():
N = len(dataloader.dataset)
code = torch.zeros([N, code_length])
assignment = torch.zeros([N, num_classes])
for (data, _, index) in dataloader:
data = data.to(device)
(hash_code, class_assignment, _) = model(data, dynamic_meta_embedding, prototypes)
code[(index, :)] = hash_code.sign().cpu()
assignment[(index, :)] = class_assignment.cpu()
torch.cuda.empty_cache()
return (code, assignment) | Generate hash code
Args
dataloader(torch.utils.data.dataloader.DataLoader): Data loader.
code_length(int): Hash code length.
device(torch.device): Using gpu or cpu.
Returns
code(torch.Tensor): Hash code. | lthNet.py | generate_code | butterfly-chinese/long-tail-hashing | 6 | python | def generate_code(model, dataloader, code_length, num_classes, device, dynamic_meta_embedding, prototypes):
'\n Generate hash code\n\n Args\n dataloader(torch.utils.data.dataloader.DataLoader): Data loader.\n code_length(int): Hash code length.\n device(torch.device): Using gpu or cpu.\n\n Returns\n code(torch.Tensor): Hash code.\n '
model.eval()
with torch.no_grad():
N = len(dataloader.dataset)
code = torch.zeros([N, code_length])
assignment = torch.zeros([N, num_classes])
for (data, _, index) in dataloader:
data = data.to(device)
(hash_code, class_assignment, _) = model(data, dynamic_meta_embedding, prototypes)
code[(index, :)] = hash_code.sign().cpu()
assignment[(index, :)] = class_assignment.cpu()
torch.cuda.empty_cache()
return (code, assignment) | def generate_code(model, dataloader, code_length, num_classes, device, dynamic_meta_embedding, prototypes):
'\n Generate hash code\n\n Args\n dataloader(torch.utils.data.dataloader.DataLoader): Data loader.\n code_length(int): Hash code length.\n device(torch.device): Using gpu or cpu.\n\n Returns\n code(torch.Tensor): Hash code.\n '
model.eval()
with torch.no_grad():
N = len(dataloader.dataset)
code = torch.zeros([N, code_length])
assignment = torch.zeros([N, num_classes])
for (data, _, index) in dataloader:
data = data.to(device)
(hash_code, class_assignment, _) = model(data, dynamic_meta_embedding, prototypes)
code[(index, :)] = hash_code.sign().cpu()
assignment[(index, :)] = class_assignment.cpu()
torch.cuda.empty_cache()
return (code, assignment)<|docstring|>Generate hash code
Args
dataloader(torch.utils.data.dataloader.DataLoader): Data loader.
code_length(int): Hash code length.
device(torch.device): Using gpu or cpu.
Returns
code(torch.Tensor): Hash code.<|endoftext|> |
74cbb516a069aae8fac68a4016c1999022c058269a7b51e7c2377fa43129d099 | def generate_prototypes(model, dataloader, num_prototypes, feature_dim, device, dynamic_meta_embedding, prototypes_placeholder):
'\n Generate prototypes (visual memory)\n\n Args\n dataloader(torch.utils.data.dataloader.DataLoader): Data loader.\n code_length(int): Hash code length.\n device(torch.device): Using gpu or cpu.\n\n Returns\n code(torch.Tensor): prototypes.\n '
model.eval()
with torch.no_grad():
prototypes = torch.zeros([num_prototypes, feature_dim])
counter = torch.zeros([num_prototypes])
for (data, targets, _) in dataloader:
(data, targets) = (data.to(device), targets.to(device))
(_, _, direct_feature) = model(data, dynamic_meta_embedding, prototypes_placeholder)
direct_feature = direct_feature.to('cpu')
index = torch.nonzero(targets, as_tuple=False)[(:, 1)]
index = index.to('cpu')
for j in range(len(data)):
prototypes[(index[j], :)] = (prototypes[(index[j], :)] + direct_feature[(j, :)])
counter[index[j]] = (counter[index[j]] + 1)
for k in range(num_prototypes):
prototypes[(k, :)] = (prototypes[(k, :)] / counter[k])
torch.cuda.empty_cache()
return prototypes | Generate prototypes (visual memory)
Args
dataloader(torch.utils.data.dataloader.DataLoader): Data loader.
code_length(int): Hash code length.
device(torch.device): Using gpu or cpu.
Returns
code(torch.Tensor): prototypes. | lthNet.py | generate_prototypes | butterfly-chinese/long-tail-hashing | 6 | python | def generate_prototypes(model, dataloader, num_prototypes, feature_dim, device, dynamic_meta_embedding, prototypes_placeholder):
'\n Generate prototypes (visual memory)\n\n Args\n dataloader(torch.utils.data.dataloader.DataLoader): Data loader.\n code_length(int): Hash code length.\n device(torch.device): Using gpu or cpu.\n\n Returns\n code(torch.Tensor): prototypes.\n '
model.eval()
with torch.no_grad():
prototypes = torch.zeros([num_prototypes, feature_dim])
counter = torch.zeros([num_prototypes])
for (data, targets, _) in dataloader:
(data, targets) = (data.to(device), targets.to(device))
(_, _, direct_feature) = model(data, dynamic_meta_embedding, prototypes_placeholder)
direct_feature = direct_feature.to('cpu')
index = torch.nonzero(targets, as_tuple=False)[(:, 1)]
index = index.to('cpu')
for j in range(len(data)):
prototypes[(index[j], :)] = (prototypes[(index[j], :)] + direct_feature[(j, :)])
counter[index[j]] = (counter[index[j]] + 1)
for k in range(num_prototypes):
prototypes[(k, :)] = (prototypes[(k, :)] / counter[k])
torch.cuda.empty_cache()
return prototypes | def generate_prototypes(model, dataloader, num_prototypes, feature_dim, device, dynamic_meta_embedding, prototypes_placeholder):
'\n Generate prototypes (visual memory)\n\n Args\n dataloader(torch.utils.data.dataloader.DataLoader): Data loader.\n code_length(int): Hash code length.\n device(torch.device): Using gpu or cpu.\n\n Returns\n code(torch.Tensor): prototypes.\n '
model.eval()
with torch.no_grad():
prototypes = torch.zeros([num_prototypes, feature_dim])
counter = torch.zeros([num_prototypes])
for (data, targets, _) in dataloader:
(data, targets) = (data.to(device), targets.to(device))
(_, _, direct_feature) = model(data, dynamic_meta_embedding, prototypes_placeholder)
direct_feature = direct_feature.to('cpu')
index = torch.nonzero(targets, as_tuple=False)[(:, 1)]
index = index.to('cpu')
for j in range(len(data)):
prototypes[(index[j], :)] = (prototypes[(index[j], :)] + direct_feature[(j, :)])
counter[index[j]] = (counter[index[j]] + 1)
for k in range(num_prototypes):
prototypes[(k, :)] = (prototypes[(k, :)] / counter[k])
torch.cuda.empty_cache()
return prototypes<|docstring|>Generate prototypes (visual memory)
Args
dataloader(torch.utils.data.dataloader.DataLoader): Data loader.
code_length(int): Hash code length.
device(torch.device): Using gpu or cpu.
Returns
code(torch.Tensor): prototypes.<|endoftext|> |
5a1385edd81fa2ea3a3fc7b2fe6d4934d11f126a3c86f6631b83e6765790f058 | def roc_auc(predictions, target):
'\n This methods returns the AUC Score when given the Predictions\n and Labels\n '
(fpr, tpr, thresholds) = metrics.roc_curve(target, predictions)
roc_auc = metrics.auc(fpr, tpr)
return roc_auc | This methods returns the AUC Score when given the Predictions
and Labels | Jigsaw-Multilingual-Toxic-Comment-Classification/train-by-lstm.py | roc_auc | NCcoco/kaggle-project | 0 | python | def roc_auc(predictions, target):
'\n This methods returns the AUC Score when given the Predictions\n and Labels\n '
(fpr, tpr, thresholds) = metrics.roc_curve(target, predictions)
roc_auc = metrics.auc(fpr, tpr)
return roc_auc | def roc_auc(predictions, target):
'\n This methods returns the AUC Score when given the Predictions\n and Labels\n '
(fpr, tpr, thresholds) = metrics.roc_curve(target, predictions)
roc_auc = metrics.auc(fpr, tpr)
return roc_auc<|docstring|>This methods returns the AUC Score when given the Predictions
and Labels<|endoftext|> |
df06cdef78bb1d30663d76123313c03910bd17ae413b5f570b10ecedaa4af8c3 | def __init__(self, exception: Exception, plugin_name: str=None, entry_point: EntryPoint=None):
'Initialize FailedToLoadPlugin exception.'
self.plugin_name = plugin_name
self.original_exception = exception
self.entry_point = entry_point | Initialize FailedToLoadPlugin exception. | src/valiant/plugins/exceptions.py | __init__ | pomes/valiant | 2 | python | def __init__(self, exception: Exception, plugin_name: str=None, entry_point: EntryPoint=None):
self.plugin_name = plugin_name
self.original_exception = exception
self.entry_point = entry_point | def __init__(self, exception: Exception, plugin_name: str=None, entry_point: EntryPoint=None):
self.plugin_name = plugin_name
self.original_exception = exception
self.entry_point = entry_point<|docstring|>Initialize FailedToLoadPlugin exception.<|endoftext|> |
dcf311029fd7fe46fbdb97f49b2c90a8dfc3796c03a5bbda33045df1828108ca | def __str__(self):
'Format our exception message.'
return f"Failed to load plugin '{self.plugin_name}' due to {self.original_exception}. Entry point is: {self.entry_point}. sys.path is: {sys.path}" | Format our exception message. | src/valiant/plugins/exceptions.py | __str__ | pomes/valiant | 2 | python | def __str__(self):
return f"Failed to load plugin '{self.plugin_name}' due to {self.original_exception}. Entry point is: {self.entry_point}. sys.path is: {sys.path}" | def __str__(self):
return f"Failed to load plugin '{self.plugin_name}' due to {self.original_exception}. Entry point is: {self.entry_point}. sys.path is: {sys.path}"<|docstring|>Format our exception message.<|endoftext|> |
1e9e36aae44db3f23d44efb0c623213e7370adf7fe66317427665a89273848d9 | def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.ProcessProposal = channel.unary_unary('/protos.Endorser/ProcessProposal', request_serializer=peer_dot_fabric__proposal__pb2.SignedProposal.SerializeToString, response_deserializer=peer_dot_fabric__proposal__response__pb2.ProposalResponse.FromString) | Constructor.
Args:
channel: A grpc.Channel. | bddtests/peer/fabric_service_pb2_grpc.py | __init__ | memoutng/BlockchainTesteo | 1 | python | def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.ProcessProposal = channel.unary_unary('/protos.Endorser/ProcessProposal', request_serializer=peer_dot_fabric__proposal__pb2.SignedProposal.SerializeToString, response_deserializer=peer_dot_fabric__proposal__response__pb2.ProposalResponse.FromString) | def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.ProcessProposal = channel.unary_unary('/protos.Endorser/ProcessProposal', request_serializer=peer_dot_fabric__proposal__pb2.SignedProposal.SerializeToString, response_deserializer=peer_dot_fabric__proposal__response__pb2.ProposalResponse.FromString)<|docstring|>Constructor.
Args:
channel: A grpc.Channel.<|endoftext|> |
66f1f8a224faff68eebaf153b6a47f99e84c96c92a17d722875d1c3046169b6c | @login_required(login_url='login')
def profile(request: object):
'Profile function processes 1 types of request.\n\n 1. GET\n Returns the reset profile page.\n '
if (request.method == 'GET'):
return render(request, template_name='alfastaff-products/profile.html', context={'user': request.user, 'avatar': request.user.profile.avatar.url}) | Profile function processes 1 types of request.
1. GET
Returns the reset profile page. | alfastaff_products/views.py | profile | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def profile(request: object):
'Profile function processes 1 types of request.\n\n 1. GET\n Returns the reset profile page.\n '
if (request.method == 'GET'):
return render(request, template_name='alfastaff-products/profile.html', context={'user': request.user, 'avatar': request.user.profile.avatar.url}) | @login_required(login_url='login')
def profile(request: object):
'Profile function processes 1 types of request.\n\n 1. GET\n Returns the reset profile page.\n '
if (request.method == 'GET'):
return render(request, template_name='alfastaff-products/profile.html', context={'user': request.user, 'avatar': request.user.profile.avatar.url})<|docstring|>Profile function processes 1 types of request.
1. GET
Returns the reset profile page.<|endoftext|> |
23d69a37fff2f00741c13d7027e56b653b06c89506a748680cde301225a355a9 | @login_required(login_url='login')
def edit(request: object):
'Edit function processes 1 types of request.\n\n 1. GET\n Returns the edit page.\n '
if (request.method == 'GET'):
return render(request, template_name='alfastaff-products/edit.html', context={'user': request.user, 'avatar': request.user.profile.avatar.url}) | Edit function processes 1 types of request.
1. GET
Returns the edit page. | alfastaff_products/views.py | edit | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def edit(request: object):
'Edit function processes 1 types of request.\n\n 1. GET\n Returns the edit page.\n '
if (request.method == 'GET'):
return render(request, template_name='alfastaff-products/edit.html', context={'user': request.user, 'avatar': request.user.profile.avatar.url}) | @login_required(login_url='login')
def edit(request: object):
'Edit function processes 1 types of request.\n\n 1. GET\n Returns the edit page.\n '
if (request.method == 'GET'):
return render(request, template_name='alfastaff-products/edit.html', context={'user': request.user, 'avatar': request.user.profile.avatar.url})<|docstring|>Edit function processes 1 types of request.
1. GET
Returns the edit page.<|endoftext|> |
02246f70ead566342b1b9ce85fad8e403b304af73797c62bf2510a21ae18e002 | @login_required(login_url='login')
def edit_password(request: object):
'edit_password function processes 2 types of request post and get.\n\n 1. GET\n Redirect to the edit page;\n 2. POST\n Checks the validity of the data,\n checks whether the user verifies the passwords for equality;\n if everything is good, then he changes the password and redirects to the page,\n if the error returns it to the page.\n '
if (request.method == 'POST'):
password_change_form = PasswordChangeForm(request.POST)
if password_change_form.is_valid():
return edit_password_processing(request, password_change_form)
else:
return render(request, template_name='alfastaff-products/edit.html', context={'user': request.user, 'error': True, 'avatar': request.user.profile.avatar.url})
else:
return redirect(to='edit') | edit_password function processes 2 types of request post and get.
1. GET
Redirect to the edit page;
2. POST
Checks the validity of the data,
checks whether the user verifies the passwords for equality;
if everything is good, then he changes the password and redirects to the page,
if the error returns it to the page. | alfastaff_products/views.py | edit_password | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def edit_password(request: object):
'edit_password function processes 2 types of request post and get.\n\n 1. GET\n Redirect to the edit page;\n 2. POST\n Checks the validity of the data,\n checks whether the user verifies the passwords for equality;\n if everything is good, then he changes the password and redirects to the page,\n if the error returns it to the page.\n '
if (request.method == 'POST'):
password_change_form = PasswordChangeForm(request.POST)
if password_change_form.is_valid():
return edit_password_processing(request, password_change_form)
else:
return render(request, template_name='alfastaff-products/edit.html', context={'user': request.user, 'error': True, 'avatar': request.user.profile.avatar.url})
else:
return redirect(to='edit') | @login_required(login_url='login')
def edit_password(request: object):
'edit_password function processes 2 types of request post and get.\n\n 1. GET\n Redirect to the edit page;\n 2. POST\n Checks the validity of the data,\n checks whether the user verifies the passwords for equality;\n if everything is good, then he changes the password and redirects to the page,\n if the error returns it to the page.\n '
if (request.method == 'POST'):
password_change_form = PasswordChangeForm(request.POST)
if password_change_form.is_valid():
return edit_password_processing(request, password_change_form)
else:
return render(request, template_name='alfastaff-products/edit.html', context={'user': request.user, 'error': True, 'avatar': request.user.profile.avatar.url})
else:
return redirect(to='edit')<|docstring|>edit_password function processes 2 types of request post and get.
1. GET
Redirect to the edit page;
2. POST
Checks the validity of the data,
checks whether the user verifies the passwords for equality;
if everything is good, then he changes the password and redirects to the page,
if the error returns it to the page.<|endoftext|> |
aa5e345671ac35196ff49ff32f563c96fc4142d386949f0a0dfe0c9bdcb0d209 | @login_required(login_url='login')
def edit_profile(request: object):
'edit_profile function processes 2 types of request post and get.\n\n 1. GET\n Redirect to the edit page;\n 2. POST\n Checks the validity of the data,\n changes the user’s object fields and checks for the presence of a standard photo,\n saves the user and authorizes him again and then redirects to editing.\n '
if (request.method == 'POST'):
profile_change_form = ProfileChangeForm(request.POST, request.FILES)
if profile_change_form.is_valid():
return edit_profile_processing(request, profile_change_form)
else:
return render(request, template_name='alfastaff-products/edit.html', context={'user': request.user, 'error_profile': True})
else:
return redirect(to='edit') | edit_profile function processes 2 types of request post and get.
1. GET
Redirect to the edit page;
2. POST
Checks the validity of the data,
changes the user’s object fields and checks for the presence of a standard photo,
saves the user and authorizes him again and then redirects to editing. | alfastaff_products/views.py | edit_profile | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def edit_profile(request: object):
'edit_profile function processes 2 types of request post and get.\n\n 1. GET\n Redirect to the edit page;\n 2. POST\n Checks the validity of the data,\n changes the user’s object fields and checks for the presence of a standard photo,\n saves the user and authorizes him again and then redirects to editing.\n '
if (request.method == 'POST'):
profile_change_form = ProfileChangeForm(request.POST, request.FILES)
if profile_change_form.is_valid():
return edit_profile_processing(request, profile_change_form)
else:
return render(request, template_name='alfastaff-products/edit.html', context={'user': request.user, 'error_profile': True})
else:
return redirect(to='edit') | @login_required(login_url='login')
def edit_profile(request: object):
'edit_profile function processes 2 types of request post and get.\n\n 1. GET\n Redirect to the edit page;\n 2. POST\n Checks the validity of the data,\n changes the user’s object fields and checks for the presence of a standard photo,\n saves the user and authorizes him again and then redirects to editing.\n '
if (request.method == 'POST'):
profile_change_form = ProfileChangeForm(request.POST, request.FILES)
if profile_change_form.is_valid():
return edit_profile_processing(request, profile_change_form)
else:
return render(request, template_name='alfastaff-products/edit.html', context={'user': request.user, 'error_profile': True})
else:
return redirect(to='edit')<|docstring|>edit_profile function processes 2 types of request post and get.
1. GET
Redirect to the edit page;
2. POST
Checks the validity of the data,
changes the user’s object fields and checks for the presence of a standard photo,
saves the user and authorizes him again and then redirects to editing.<|endoftext|> |
2304f1884d992935a24b254b5a723218ff9f6c75cfeb9a5366f423905751cff8 | @login_required(login_url='login')
def logout_user(request: object):
'logout_user function processes 1 types of request.\n\n 1. GET\n Returns the login page and logout user.\n '
if (request.method == 'GET'):
logout(request)
return render(request, template_name='alfastaff-account/login.html', context={'user': request.user}) | logout_user function processes 1 types of request.
1. GET
Returns the login page and logout user. | alfastaff_products/views.py | logout_user | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def logout_user(request: object):
'logout_user function processes 1 types of request.\n\n 1. GET\n Returns the login page and logout user.\n '
if (request.method == 'GET'):
logout(request)
return render(request, template_name='alfastaff-account/login.html', context={'user': request.user}) | @login_required(login_url='login')
def logout_user(request: object):
'logout_user function processes 1 types of request.\n\n 1. GET\n Returns the login page and logout user.\n '
if (request.method == 'GET'):
logout(request)
return render(request, template_name='alfastaff-account/login.html', context={'user': request.user})<|docstring|>logout_user function processes 1 types of request.
1. GET
Returns the login page and logout user.<|endoftext|> |
511ea99bfb567325de6cdd299195772004fb6e199c5b88d442504d8d4e99b542 | @login_required(login_url='login')
def purchases(request: object):
'Purchases function processes 1 types of request.\n\n 1. GET\n return number of page on purchases.html\n '
if (request.method == 'GET'):
count_page = count_page_purchases(request)
return render(request, template_name='alfastaff-products/purchases.html', context={'user': request.user, 'count_page': count_page}) | Purchases function processes 1 types of request.
1. GET
return number of page on purchases.html | alfastaff_products/views.py | purchases | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def purchases(request: object):
'Purchases function processes 1 types of request.\n\n 1. GET\n return number of page on purchases.html\n '
if (request.method == 'GET'):
count_page = count_page_purchases(request)
return render(request, template_name='alfastaff-products/purchases.html', context={'user': request.user, 'count_page': count_page}) | @login_required(login_url='login')
def purchases(request: object):
'Purchases function processes 1 types of request.\n\n 1. GET\n return number of page on purchases.html\n '
if (request.method == 'GET'):
count_page = count_page_purchases(request)
return render(request, template_name='alfastaff-products/purchases.html', context={'user': request.user, 'count_page': count_page})<|docstring|>Purchases function processes 1 types of request.
1. GET
return number of page on purchases.html<|endoftext|> |
36622a86e4611c7a4c41817669ad8e86b209899d94b047f67313a584137ee260 | @login_required(login_url='login')
def purchases_page(request: object, page: int, sort: str):
'purchases_page function processes 1 types of request.\n\n 1. GET\n It takes several arguments from the query string such as the page number and sort name,\n takes out the elements according to the page and sorts them according to the sort name\n and returns to the page.\n '
if (request.method == 'GET'):
purchases = get_purchases(request, page, sort)
return render(request, template_name='alfastaff-products/list_purchases.html', context={'purchases': purchases}) | purchases_page function processes 1 types of request.
1. GET
It takes several arguments from the query string such as the page number and sort name,
takes out the elements according to the page and sorts them according to the sort name
and returns to the page. | alfastaff_products/views.py | purchases_page | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def purchases_page(request: object, page: int, sort: str):
'purchases_page function processes 1 types of request.\n\n 1. GET\n It takes several arguments from the query string such as the page number and sort name,\n takes out the elements according to the page and sorts them according to the sort name\n and returns to the page.\n '
if (request.method == 'GET'):
purchases = get_purchases(request, page, sort)
return render(request, template_name='alfastaff-products/list_purchases.html', context={'purchases': purchases}) | @login_required(login_url='login')
def purchases_page(request: object, page: int, sort: str):
'purchases_page function processes 1 types of request.\n\n 1. GET\n It takes several arguments from the query string such as the page number and sort name,\n takes out the elements according to the page and sorts them according to the sort name\n and returns to the page.\n '
if (request.method == 'GET'):
purchases = get_purchases(request, page, sort)
return render(request, template_name='alfastaff-products/list_purchases.html', context={'purchases': purchases})<|docstring|>purchases_page function processes 1 types of request.
1. GET
It takes several arguments from the query string such as the page number and sort name,
takes out the elements according to the page and sorts them according to the sort name
and returns to the page.<|endoftext|> |
30d2312331438e6f8cb6bbe48e29caf92a7750411cae98fa5e9955378df57c6c | @login_required(login_url='login')
def products(request: object):
'Product function processes 1 types of request.\n\n 1. GET\n return number of page on catalog.html\n '
if (request.method == 'GET'):
count_page = count_page_products()
return render(request, template_name='alfastaff-products/catalog.html', context={'user': request.user, 'count_page': count_page}) | Product function processes 1 types of request.
1. GET
return number of page on catalog.html | alfastaff_products/views.py | products | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def products(request: object):
'Product function processes 1 types of request.\n\n 1. GET\n return number of page on catalog.html\n '
if (request.method == 'GET'):
count_page = count_page_products()
return render(request, template_name='alfastaff-products/catalog.html', context={'user': request.user, 'count_page': count_page}) | @login_required(login_url='login')
def products(request: object):
'Product function processes 1 types of request.\n\n 1. GET\n return number of page on catalog.html\n '
if (request.method == 'GET'):
count_page = count_page_products()
return render(request, template_name='alfastaff-products/catalog.html', context={'user': request.user, 'count_page': count_page})<|docstring|>Product function processes 1 types of request.
1. GET
return number of page on catalog.html<|endoftext|> |
24394623048df6774b6521ffa057d7c96304ea173a62ca0188a4b594df736582 | @login_required(login_url='login')
def products_page(request: object, page: int, sort: str):
'products_page function processes 1 types of request.\n\n 1. GET\n It takes several arguments from the query string such as the page number and sort name,\n takes out the elements according to the page and sorts them according to the sort name\n and returns to the page.\n '
if (request.method == 'GET'):
products = get_products(request, page, sort)
return render(request, template_name='alfastaff-products/list_products.html', context={'products': products}) | products_page function processes 1 types of request.
1. GET
It takes several arguments from the query string such as the page number and sort name,
takes out the elements according to the page and sorts them according to the sort name
and returns to the page. | alfastaff_products/views.py | products_page | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def products_page(request: object, page: int, sort: str):
'products_page function processes 1 types of request.\n\n 1. GET\n It takes several arguments from the query string such as the page number and sort name,\n takes out the elements according to the page and sorts them according to the sort name\n and returns to the page.\n '
if (request.method == 'GET'):
products = get_products(request, page, sort)
return render(request, template_name='alfastaff-products/list_products.html', context={'products': products}) | @login_required(login_url='login')
def products_page(request: object, page: int, sort: str):
'products_page function processes 1 types of request.\n\n 1. GET\n It takes several arguments from the query string such as the page number and sort name,\n takes out the elements according to the page and sorts them according to the sort name\n and returns to the page.\n '
if (request.method == 'GET'):
products = get_products(request, page, sort)
return render(request, template_name='alfastaff-products/list_products.html', context={'products': products})<|docstring|>products_page function processes 1 types of request.
1. GET
It takes several arguments from the query string such as the page number and sort name,
takes out the elements according to the page and sorts them according to the sort name
and returns to the page.<|endoftext|> |
2f92e38435debbbb8def0918c5db15f92f79d756ed356558272fd52fe45974d1 | @login_required(login_url='login')
def buy(request: object, id: int):
'buy function processes 1 types of request.\n\n 1. GET\n We get the goods from the user’s database,\n check whether the purchase is possible and create a new purchase object,\n then save it, after which we send the message about the purchase to the administrator,\n otherwise we return an error in JSON format\n '
if (request.method == 'GET'):
return buy_processing(request, id) | buy function processes 1 types of request.
1. GET
We get the goods from the user’s database,
check whether the purchase is possible and create a new purchase object,
then save it, after which we send the message about the purchase to the administrator,
otherwise we return an error in JSON format | alfastaff_products/views.py | buy | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def buy(request: object, id: int):
'buy function processes 1 types of request.\n\n 1. GET\n We get the goods from the user’s database,\n check whether the purchase is possible and create a new purchase object,\n then save it, after which we send the message about the purchase to the administrator,\n otherwise we return an error in JSON format\n '
if (request.method == 'GET'):
return buy_processing(request, id) | @login_required(login_url='login')
def buy(request: object, id: int):
'buy function processes 1 types of request.\n\n 1. GET\n We get the goods from the user’s database,\n check whether the purchase is possible and create a new purchase object,\n then save it, after which we send the message about the purchase to the administrator,\n otherwise we return an error in JSON format\n '
if (request.method == 'GET'):
return buy_processing(request, id)<|docstring|>buy function processes 1 types of request.
1. GET
We get the goods from the user’s database,
check whether the purchase is possible and create a new purchase object,
then save it, after which we send the message about the purchase to the administrator,
otherwise we return an error in JSON format<|endoftext|> |
dbbb6748e6d5c28507e8a2212e029c75f8bbb4241e33adebdadf849fc0a0be62 | @login_required(login_url='login')
def top_up_account(request: object):
'top up an account function processes 1 types of request.\n\n 1. POST\n '
if (request.method == 'POST'):
return top_up_account_processing(request) | top up an account function processes 1 types of request.
1. POST | alfastaff_products/views.py | top_up_account | spanickroon/Alfa-Staff | 1 | python | @login_required(login_url='login')
def top_up_account(request: object):
'top up an account function processes 1 types of request.\n\n 1. POST\n '
if (request.method == 'POST'):
return top_up_account_processing(request) | @login_required(login_url='login')
def top_up_account(request: object):
'top up an account function processes 1 types of request.\n\n 1. POST\n '
if (request.method == 'POST'):
return top_up_account_processing(request)<|docstring|>top up an account function processes 1 types of request.
1. POST<|endoftext|> |
3f22a56aadb020be1fc28cc267f6d6c137f57bedb0f83d661bf6365fa1749217 | def parse_loc(location_in: List[float], filecache=True):
'Takes location parameter and returns a list of coordinates.\n\n This function cleans the location parameter to a list of coordinates. If\n the location_in is a list it returns the list, else it uses the geopy\n interface to generatea list of coordinates from the descriptor.\n Args:\n location_in :List[float,float], str): List of latitude and longitude\n\n Returns:\n loc: List[float,float]\n '
if isinstance(location_in, str):
if filecache:
reqhash = md5(bytes(location_in, 'utf')).hexdigest()
temp_dir = Path(tempfile.gettempdir())
fname = Path((reqhash + '.geolocator_cache'))
if Path.exists(Path.joinpath(temp_dir, fname)):
print("Using cached answer for '{}' as geolocator request".format(location_in))
with gzip.open(Path.joinpath(temp_dir, fname), 'rb') as f:
locstring = f.readlines()
loc = [float(item.decode()) for item in locstring]
return loc
else:
geolocator = geopy.geocoders.Nominatim(user_agent='BuildingEnergySimulation')
location = geolocator.geocode(location_in)
loc = [location.latitude, location.longitude]
with gzip.open(Path.joinpath(temp_dir, fname), 'wb') as f:
for coord in loc:
f.write(bytes((str(coord) + '\n'), 'ASCII'))
return loc
else:
geolocator = geopy.geocoders.Nominatim(user_agent='BuildingEnergySimulation')
location = geolocator.geocode(location_in)
loc = [location.latitude, location.longitude]
elif isinstance(location_in, list):
loc = location_in
pass
return loc | Takes location parameter and returns a list of coordinates.
This function cleans the location parameter to a list of coordinates. If
the location_in is a list it returns the list, else it uses the geopy
interface to generatea list of coordinates from the descriptor.
Args:
location_in :List[float,float], str): List of latitude and longitude
Returns:
loc: List[float,float] | BuildingEnergySimulation/construction.py | parse_loc | cbaretzky/BuiidingEnergySimulation | 3 | python | def parse_loc(location_in: List[float], filecache=True):
'Takes location parameter and returns a list of coordinates.\n\n This function cleans the location parameter to a list of coordinates. If\n the location_in is a list it returns the list, else it uses the geopy\n interface to generatea list of coordinates from the descriptor.\n Args:\n location_in :List[float,float], str): List of latitude and longitude\n\n Returns:\n loc: List[float,float]\n '
if isinstance(location_in, str):
if filecache:
reqhash = md5(bytes(location_in, 'utf')).hexdigest()
temp_dir = Path(tempfile.gettempdir())
fname = Path((reqhash + '.geolocator_cache'))
if Path.exists(Path.joinpath(temp_dir, fname)):
print("Using cached answer for '{}' as geolocator request".format(location_in))
with gzip.open(Path.joinpath(temp_dir, fname), 'rb') as f:
locstring = f.readlines()
loc = [float(item.decode()) for item in locstring]
return loc
else:
geolocator = geopy.geocoders.Nominatim(user_agent='BuildingEnergySimulation')
location = geolocator.geocode(location_in)
loc = [location.latitude, location.longitude]
with gzip.open(Path.joinpath(temp_dir, fname), 'wb') as f:
for coord in loc:
f.write(bytes((str(coord) + '\n'), 'ASCII'))
return loc
else:
geolocator = geopy.geocoders.Nominatim(user_agent='BuildingEnergySimulation')
location = geolocator.geocode(location_in)
loc = [location.latitude, location.longitude]
elif isinstance(location_in, list):
loc = location_in
pass
return loc | def parse_loc(location_in: List[float], filecache=True):
'Takes location parameter and returns a list of coordinates.\n\n This function cleans the location parameter to a list of coordinates. If\n the location_in is a list it returns the list, else it uses the geopy\n interface to generatea list of coordinates from the descriptor.\n Args:\n location_in :List[float,float], str): List of latitude and longitude\n\n Returns:\n loc: List[float,float]\n '
if isinstance(location_in, str):
if filecache:
reqhash = md5(bytes(location_in, 'utf')).hexdigest()
temp_dir = Path(tempfile.gettempdir())
fname = Path((reqhash + '.geolocator_cache'))
if Path.exists(Path.joinpath(temp_dir, fname)):
print("Using cached answer for '{}' as geolocator request".format(location_in))
with gzip.open(Path.joinpath(temp_dir, fname), 'rb') as f:
locstring = f.readlines()
loc = [float(item.decode()) for item in locstring]
return loc
else:
geolocator = geopy.geocoders.Nominatim(user_agent='BuildingEnergySimulation')
location = geolocator.geocode(location_in)
loc = [location.latitude, location.longitude]
with gzip.open(Path.joinpath(temp_dir, fname), 'wb') as f:
for coord in loc:
f.write(bytes((str(coord) + '\n'), 'ASCII'))
return loc
else:
geolocator = geopy.geocoders.Nominatim(user_agent='BuildingEnergySimulation')
location = geolocator.geocode(location_in)
loc = [location.latitude, location.longitude]
elif isinstance(location_in, list):
loc = location_in
pass
return loc<|docstring|>Takes location parameter and returns a list of coordinates.
This function cleans the location parameter to a list of coordinates. If
the location_in is a list it returns the list, else it uses the geopy
interface to generatea list of coordinates from the descriptor.
Args:
location_in :List[float,float], str): List of latitude and longitude
Returns:
loc: List[float,float]<|endoftext|> |
f63c56c00bf311c27028ae32a40a6142525d7de423309be64057420e5789d90e | def get_component(self, searchterm: str) -> list:
'Return all components of a specifc type.\n\n Args:\n searchterm (str): Name of component/type\n\n Returns:\n found (List): List of objects with specific name/type.\n\n '
found = []
for (name, component) in self.components.items():
if (searchterm in name):
found.append(component)
return found | Return all components of a specifc type.
Args:
searchterm (str): Name of component/type
Returns:
found (List): List of objects with specific name/type. | BuildingEnergySimulation/construction.py | get_component | cbaretzky/BuiidingEnergySimulation | 3 | python | def get_component(self, searchterm: str) -> list:
'Return all components of a specifc type.\n\n Args:\n searchterm (str): Name of component/type\n\n Returns:\n found (List): List of objects with specific name/type.\n\n '
found = []
for (name, component) in self.components.items():
if (searchterm in name):
found.append(component)
return found | def get_component(self, searchterm: str) -> list:
'Return all components of a specifc type.\n\n Args:\n searchterm (str): Name of component/type\n\n Returns:\n found (List): List of objects with specific name/type.\n\n '
found = []
for (name, component) in self.components.items():
if (searchterm in name):
found.append(component)
return found<|docstring|>Return all components of a specifc type.
Args:
searchterm (str): Name of component/type
Returns:
found (List): List of objects with specific name/type.<|endoftext|> |
82f5aa039e4a8cec272d3780121be8d2b0546c2b50e1bca448e41a5de5805dc2 | def reg(self, component, *args, **kwargs):
'Wrapper to register from within the building instance.\n\n instead of::\n $ bes.Component.reg(*args, **kwargs)\n\n it can be::\n $ building.reg(bes.Wall, *args, **kwargs)\n\n '
component.reg(self, *args, **kwargs) | Wrapper to register from within the building instance.
instead of::
$ bes.Component.reg(*args, **kwargs)
it can be::
$ building.reg(bes.Wall, *args, **kwargs) | BuildingEnergySimulation/construction.py | reg | cbaretzky/BuiidingEnergySimulation | 3 | python | def reg(self, component, *args, **kwargs):
'Wrapper to register from within the building instance.\n\n instead of::\n $ bes.Component.reg(*args, **kwargs)\n\n it can be::\n $ building.reg(bes.Wall, *args, **kwargs)\n\n '
component.reg(self, *args, **kwargs) | def reg(self, component, *args, **kwargs):
'Wrapper to register from within the building instance.\n\n instead of::\n $ bes.Component.reg(*args, **kwargs)\n\n it can be::\n $ building.reg(bes.Wall, *args, **kwargs)\n\n '
component.reg(self, *args, **kwargs)<|docstring|>Wrapper to register from within the building instance.
instead of::
$ bes.Component.reg(*args, **kwargs)
it can be::
$ building.reg(bes.Wall, *args, **kwargs)<|endoftext|> |
18676f5afc1a47856415ecd26505154eae44a62041df3f6eb7e72b73a5284d53 | def simulate(self, timeframe_start: datetime.datetime, timeframe_stop: datetime.datetime) -> pd.DataFrame:
'Run the simulation from timeframe_start to timeframe_stop with the\n defined timestep\n\n Args:\n timeframe_start (datetime.datetime): First date of timeframe.\n timeframe_stop (datetime.datetime): Last date of timeframe.\n '
freq = '{}s'.format(self.timestep)
times = pd.date_range(timeframe_start, timeframe_stop, freq=freq)
self.sim_result_list = []
for time in times:
self.date = pd.to_datetime(time)
self.result = {}
self.result.update({'Date': self.date, 'Tamb': self.Pvgis[self.date]['Tamb']})
for (_, component) in self.components.items():
_ = component.out
self.sim_result_list.append(self.result)
self.sim_results = pd.DataFrame(self.sim_result_list)
self.sim_results.index = self.sim_results.Date
return self.sim_results | Run the simulation from timeframe_start to timeframe_stop with the
defined timestep
Args:
timeframe_start (datetime.datetime): First date of timeframe.
timeframe_stop (datetime.datetime): Last date of timeframe. | BuildingEnergySimulation/construction.py | simulate | cbaretzky/BuiidingEnergySimulation | 3 | python | def simulate(self, timeframe_start: datetime.datetime, timeframe_stop: datetime.datetime) -> pd.DataFrame:
'Run the simulation from timeframe_start to timeframe_stop with the\n defined timestep\n\n Args:\n timeframe_start (datetime.datetime): First date of timeframe.\n timeframe_stop (datetime.datetime): Last date of timeframe.\n '
freq = '{}s'.format(self.timestep)
times = pd.date_range(timeframe_start, timeframe_stop, freq=freq)
self.sim_result_list = []
for time in times:
self.date = pd.to_datetime(time)
self.result = {}
self.result.update({'Date': self.date, 'Tamb': self.Pvgis[self.date]['Tamb']})
for (_, component) in self.components.items():
_ = component.out
self.sim_result_list.append(self.result)
self.sim_results = pd.DataFrame(self.sim_result_list)
self.sim_results.index = self.sim_results.Date
return self.sim_results | def simulate(self, timeframe_start: datetime.datetime, timeframe_stop: datetime.datetime) -> pd.DataFrame:
'Run the simulation from timeframe_start to timeframe_stop with the\n defined timestep\n\n Args:\n timeframe_start (datetime.datetime): First date of timeframe.\n timeframe_stop (datetime.datetime): Last date of timeframe.\n '
freq = '{}s'.format(self.timestep)
times = pd.date_range(timeframe_start, timeframe_stop, freq=freq)
self.sim_result_list = []
for time in times:
self.date = pd.to_datetime(time)
self.result = {}
self.result.update({'Date': self.date, 'Tamb': self.Pvgis[self.date]['Tamb']})
for (_, component) in self.components.items():
_ = component.out
self.sim_result_list.append(self.result)
self.sim_results = pd.DataFrame(self.sim_result_list)
self.sim_results.index = self.sim_results.Date
return self.sim_results<|docstring|>Run the simulation from timeframe_start to timeframe_stop with the
defined timestep
Args:
timeframe_start (datetime.datetime): First date of timeframe.
timeframe_stop (datetime.datetime): Last date of timeframe.<|endoftext|> |
31d164e7a3f0e844e4b9fa06fe668212f2978940c8abe4099d356cd4f43d15af | def reg(self, name, head):
'\n Let Classes register new losses\n '
pass | Let Classes register new losses | BuildingEnergySimulation/construction.py | reg | cbaretzky/BuiidingEnergySimulation | 3 | python | def reg(self, name, head):
'\n \n '
pass | def reg(self, name, head):
'\n \n '
pass<|docstring|>Let Classes register new losses<|endoftext|> |
6e1050a3e6f9ba171286894e0e525aecf154cd870c2fd6581ff32654ae0918a4 | def update(self, name, vals):
'\n Shift Timestamp forward\n Do Calculations\n '
pass | Shift Timestamp forward
Do Calculations | BuildingEnergySimulation/construction.py | update | cbaretzky/BuiidingEnergySimulation | 3 | python | def update(self, name, vals):
'\n Shift Timestamp forward\n Do Calculations\n '
pass | def update(self, name, vals):
'\n Shift Timestamp forward\n Do Calculations\n '
pass<|docstring|>Shift Timestamp forward
Do Calculations<|endoftext|> |
984dc1603a88837ed59d6b8a23928a9d5be080c41b3fb014bad1befa9000288f | def build_BNN(data, output_condition, cd=98, mss=1, md=30, relevant_neuron_dictionary={}, with_data=1, discretization=0, cluster_means=None):
'\n\tStarting from the target condition and until the conditions with respect \n\tto the first hidden layer, it extracts a DNF that explains each condition\n\tusing conditions of the next shallower layer\n\t\n\tparam data: instance of DataSet\n\tparam output_condition: condition of interest\n\tparam cd: class dominance\n\tparam mss: minimum dataset size\n\tparam md: maximum tree depth\n\tparam with_data: Avoid ==0. If == 1, the regular simplification operations are performed, if == 2, post-ppruning is performed\n\tparam discretization: method used to determine the thresholds that split the activation range of each neuron\n\t'
BNN = {}
deep_layer = data.output_layer
target_class = [output_condition]
print('deep layer: ')
print(deep_layer)
print('targetclass: ')
print(target_class)
while (deep_layer > 0):
target_split_values = set(((l, n, t) for (l, n, t, u) in target_class))
print('target_split_values: ')
print(target_split_values)
if (not target_split_values):
warnings.warn(('Warning: no split points, returning current dictionary at layer: ' + str(deep_layer)))
print('Target split values', target_split_values)
used_shallow_conditions = set([])
current_data = temp_data(data, (deep_layer - 1), target_class)
if (discretization == 0):
split_points = dis.all_features_trivial_mid_points(current_data)
elif ((discretization == 1) or (discretization == 3)):
split_points = dis.one_time_discretization(current_data, discretization, rnd=relevant_neuron_dictionary, tsv=list(target_split_values))
elif ((discretization == 2) or (discretization == 4)):
split_points = cluster_means[(deep_layer - 1)]
elif (discretization == 6):
colum = [[d[c] for d in current_data] for c in range((len(current_data[0]) - 1))]
split_points = [[(sum(vq.kmeans(v, 2)[0]) / 2)] for v in colum]
elif (discretization == 5):
if (deep_layer == 1):
split_points = [[0.5] for l in range((len(current_data[0]) - 1))]
else:
split_points = [[0] for l in range((len(current_data[0]) - 1))]
print('Split points', [len(l) for l in split_points])
print('')
for i in target_split_values:
print('')
print('i: ', i)
t = time.time()
i_data = temp_data(data, (deep_layer - 1), i)
tree = None
if (relevant_neuron_dictionary and (discretization == 0)):
pruned_split_points = [_sp(j, i, split_points, relevant_neuron_dictionary) for j in range(len(split_points))]
tree = dt.buildtree(i_data, pruned_split_points, class_dominance=cd, min_set_size=mss, max_depth=md, root=True)
else:
tree = dt.buildtree(i_data, split_points, class_dominance=cd, min_set_size=mss, max_depth=md, root=True)
if (not tree):
cero_class = sum((1 for x in i_data if (x[(- 1)] == 0)))
one_class = sum((1 for x in i_data if (x[(- 1)] == 1)))
if (cero_class > one_class):
BNN[(i[0], i[1], i[2], True)] = False
BNN[(i[0], i[1], i[2], False)] = True
else:
BNN[(i[0], i[1], i[2], False)] = True
BNN[(i[0], i[1], i[2], True)] = False
break
print('Tree is formed')
print('Time: ', (time.time() - t))
dnfs = dt.get_dnfs((deep_layer - 1), tree)
print('DNF:')
print(dnfs)
if ((i[0], i[1], i[2], False) in target_class):
print('False case')
pruned = None
if isinstance(dnfs[0], list):
print('Fidelity pre-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], False), dnfs[0], True, False, False, True))
print('Precision pre-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], False), dnfs[0], True, False, False, True))
print('Recall pre-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], False), dnfs[0], True, False, False, True))
data.update_dictionary([(l, n, t) for conj in dnfs[0] for (l, n, t, u) in conj])
if (with_data == 0):
pruned = s.boolean_simplify_basic(dnfs[0])
elif (with_data >= 1):
pruned = s.boolean_simplify_complex(dnfs[0])
if (with_data == 2):
pruned = p.post_prune(pruned, (i[0], i[1], i[2], False), data.example_cond_dict, data.dict_indexes, data=None)
used_shallow_conditions.update(set((c for conj in pruned for c in conj)))
else:
pruned = dnfs[0]
print('Fidelity post-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], False), pruned, True, False, False, True))
print('Precision post-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], False), pruned, True, False, False, True))
print('Recall post-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], False), pruned, True, False, False, True))
BNN[(i[0], i[1], i[2], False)] = pruned
print((i[0], i[1], i[2], False), pruned)
if ((i[0], i[1], i[2], True) in target_class):
print('True case')
pruned = None
if isinstance(dnfs[1], list):
print('Fidelity pre-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], True), dnfs[1], True, False, False, True))
print('Precision pre-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], True), dnfs[1], True, False, False, True))
print('Recall pre-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], True), dnfs[1], True, False, False, True))
data.update_dictionary([(l, n, t) for conj in dnfs[1] for (l, n, t, u) in conj])
if (with_data == 0):
pruned = s.boolean_simplify_basic(dnfs[1])
elif (with_data >= 1):
pruned = s.boolean_simplify_complex(dnfs[1])
if (with_data == 2):
pruned = p.post_prune(pruned, (i[0], i[1], i[2], True), data.example_cond_dict, data.dict_indexes, data=None)
used_shallow_conditions.update(set((c for conj in pruned for c in conj)))
else:
pruned = dnfs[1]
print('Fidelity post-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], True), pruned, True, False, False, True))
print('Precision post-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], True), pruned, True, False, False, True))
print('Recall post-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], True), pruned, True, False, False, True))
BNN[(i[0], i[1], i[2], True)] = pruned
print((i[0], i[1], i[2], True), pruned)
deep_layer -= 1
target_class = list(used_shallow_conditions)
return BNN | Starting from the target condition and until the conditions with respect
to the first hidden layer, it extracts a DNF that explains each condition
using conditions of the next shallower layer
param data: instance of DataSet
param output_condition: condition of interest
param cd: class dominance
param mss: minimum dataset size
param md: maximum tree depth
param with_data: Avoid ==0. If == 1, the regular simplification operations are performed, if == 2, post-ppruning is performed
param discretization: method used to determine the thresholds that split the activation range of each neuron | lens/models/ext_models/deep_red/decision_tree_induction.py | build_BNN | pietrobarbiero/logic_explained_networks | 18 | python | def build_BNN(data, output_condition, cd=98, mss=1, md=30, relevant_neuron_dictionary={}, with_data=1, discretization=0, cluster_means=None):
'\n\tStarting from the target condition and until the conditions with respect \n\tto the first hidden layer, it extracts a DNF that explains each condition\n\tusing conditions of the next shallower layer\n\t\n\tparam data: instance of DataSet\n\tparam output_condition: condition of interest\n\tparam cd: class dominance\n\tparam mss: minimum dataset size\n\tparam md: maximum tree depth\n\tparam with_data: Avoid ==0. If == 1, the regular simplification operations are performed, if == 2, post-ppruning is performed\n\tparam discretization: method used to determine the thresholds that split the activation range of each neuron\n\t'
BNN = {}
deep_layer = data.output_layer
target_class = [output_condition]
print('deep layer: ')
print(deep_layer)
print('targetclass: ')
print(target_class)
while (deep_layer > 0):
target_split_values = set(((l, n, t) for (l, n, t, u) in target_class))
print('target_split_values: ')
print(target_split_values)
if (not target_split_values):
warnings.warn(('Warning: no split points, returning current dictionary at layer: ' + str(deep_layer)))
print('Target split values', target_split_values)
used_shallow_conditions = set([])
current_data = temp_data(data, (deep_layer - 1), target_class)
if (discretization == 0):
split_points = dis.all_features_trivial_mid_points(current_data)
elif ((discretization == 1) or (discretization == 3)):
split_points = dis.one_time_discretization(current_data, discretization, rnd=relevant_neuron_dictionary, tsv=list(target_split_values))
elif ((discretization == 2) or (discretization == 4)):
split_points = cluster_means[(deep_layer - 1)]
elif (discretization == 6):
colum = [[d[c] for d in current_data] for c in range((len(current_data[0]) - 1))]
split_points = [[(sum(vq.kmeans(v, 2)[0]) / 2)] for v in colum]
elif (discretization == 5):
if (deep_layer == 1):
split_points = [[0.5] for l in range((len(current_data[0]) - 1))]
else:
split_points = [[0] for l in range((len(current_data[0]) - 1))]
print('Split points', [len(l) for l in split_points])
print()
for i in target_split_values:
print()
print('i: ', i)
t = time.time()
i_data = temp_data(data, (deep_layer - 1), i)
tree = None
if (relevant_neuron_dictionary and (discretization == 0)):
pruned_split_points = [_sp(j, i, split_points, relevant_neuron_dictionary) for j in range(len(split_points))]
tree = dt.buildtree(i_data, pruned_split_points, class_dominance=cd, min_set_size=mss, max_depth=md, root=True)
else:
tree = dt.buildtree(i_data, split_points, class_dominance=cd, min_set_size=mss, max_depth=md, root=True)
if (not tree):
cero_class = sum((1 for x in i_data if (x[(- 1)] == 0)))
one_class = sum((1 for x in i_data if (x[(- 1)] == 1)))
if (cero_class > one_class):
BNN[(i[0], i[1], i[2], True)] = False
BNN[(i[0], i[1], i[2], False)] = True
else:
BNN[(i[0], i[1], i[2], False)] = True
BNN[(i[0], i[1], i[2], True)] = False
break
print('Tree is formed')
print('Time: ', (time.time() - t))
dnfs = dt.get_dnfs((deep_layer - 1), tree)
print('DNF:')
print(dnfs)
if ((i[0], i[1], i[2], False) in target_class):
print('False case')
pruned = None
if isinstance(dnfs[0], list):
print('Fidelity pre-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], False), dnfs[0], True, False, False, True))
print('Precision pre-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], False), dnfs[0], True, False, False, True))
print('Recall pre-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], False), dnfs[0], True, False, False, True))
data.update_dictionary([(l, n, t) for conj in dnfs[0] for (l, n, t, u) in conj])
if (with_data == 0):
pruned = s.boolean_simplify_basic(dnfs[0])
elif (with_data >= 1):
pruned = s.boolean_simplify_complex(dnfs[0])
if (with_data == 2):
pruned = p.post_prune(pruned, (i[0], i[1], i[2], False), data.example_cond_dict, data.dict_indexes, data=None)
used_shallow_conditions.update(set((c for conj in pruned for c in conj)))
else:
pruned = dnfs[0]
print('Fidelity post-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], False), pruned, True, False, False, True))
print('Precision post-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], False), pruned, True, False, False, True))
print('Recall post-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], False), pruned, True, False, False, True))
BNN[(i[0], i[1], i[2], False)] = pruned
print((i[0], i[1], i[2], False), pruned)
if ((i[0], i[1], i[2], True) in target_class):
print('True case')
pruned = None
if isinstance(dnfs[1], list):
print('Fidelity pre-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], True), dnfs[1], True, False, False, True))
print('Precision pre-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], True), dnfs[1], True, False, False, True))
print('Recall pre-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], True), dnfs[1], True, False, False, True))
data.update_dictionary([(l, n, t) for conj in dnfs[1] for (l, n, t, u) in conj])
if (with_data == 0):
pruned = s.boolean_simplify_basic(dnfs[1])
elif (with_data >= 1):
pruned = s.boolean_simplify_complex(dnfs[1])
if (with_data == 2):
pruned = p.post_prune(pruned, (i[0], i[1], i[2], True), data.example_cond_dict, data.dict_indexes, data=None)
used_shallow_conditions.update(set((c for conj in pruned for c in conj)))
else:
pruned = dnfs[1]
print('Fidelity post-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], True), pruned, True, False, False, True))
print('Precision post-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], True), pruned, True, False, False, True))
print('Recall post-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], True), pruned, True, False, False, True))
BNN[(i[0], i[1], i[2], True)] = pruned
print((i[0], i[1], i[2], True), pruned)
deep_layer -= 1
target_class = list(used_shallow_conditions)
return BNN | def build_BNN(data, output_condition, cd=98, mss=1, md=30, relevant_neuron_dictionary={}, with_data=1, discretization=0, cluster_means=None):
'\n\tStarting from the target condition and until the conditions with respect \n\tto the first hidden layer, it extracts a DNF that explains each condition\n\tusing conditions of the next shallower layer\n\t\n\tparam data: instance of DataSet\n\tparam output_condition: condition of interest\n\tparam cd: class dominance\n\tparam mss: minimum dataset size\n\tparam md: maximum tree depth\n\tparam with_data: Avoid ==0. If == 1, the regular simplification operations are performed, if == 2, post-ppruning is performed\n\tparam discretization: method used to determine the thresholds that split the activation range of each neuron\n\t'
BNN = {}
deep_layer = data.output_layer
target_class = [output_condition]
print('deep layer: ')
print(deep_layer)
print('targetclass: ')
print(target_class)
while (deep_layer > 0):
target_split_values = set(((l, n, t) for (l, n, t, u) in target_class))
print('target_split_values: ')
print(target_split_values)
if (not target_split_values):
warnings.warn(('Warning: no split points, returning current dictionary at layer: ' + str(deep_layer)))
print('Target split values', target_split_values)
used_shallow_conditions = set([])
current_data = temp_data(data, (deep_layer - 1), target_class)
if (discretization == 0):
split_points = dis.all_features_trivial_mid_points(current_data)
elif ((discretization == 1) or (discretization == 3)):
split_points = dis.one_time_discretization(current_data, discretization, rnd=relevant_neuron_dictionary, tsv=list(target_split_values))
elif ((discretization == 2) or (discretization == 4)):
split_points = cluster_means[(deep_layer - 1)]
elif (discretization == 6):
colum = [[d[c] for d in current_data] for c in range((len(current_data[0]) - 1))]
split_points = [[(sum(vq.kmeans(v, 2)[0]) / 2)] for v in colum]
elif (discretization == 5):
if (deep_layer == 1):
split_points = [[0.5] for l in range((len(current_data[0]) - 1))]
else:
split_points = [[0] for l in range((len(current_data[0]) - 1))]
print('Split points', [len(l) for l in split_points])
print()
for i in target_split_values:
print()
print('i: ', i)
t = time.time()
i_data = temp_data(data, (deep_layer - 1), i)
tree = None
if (relevant_neuron_dictionary and (discretization == 0)):
pruned_split_points = [_sp(j, i, split_points, relevant_neuron_dictionary) for j in range(len(split_points))]
tree = dt.buildtree(i_data, pruned_split_points, class_dominance=cd, min_set_size=mss, max_depth=md, root=True)
else:
tree = dt.buildtree(i_data, split_points, class_dominance=cd, min_set_size=mss, max_depth=md, root=True)
if (not tree):
cero_class = sum((1 for x in i_data if (x[(- 1)] == 0)))
one_class = sum((1 for x in i_data if (x[(- 1)] == 1)))
if (cero_class > one_class):
BNN[(i[0], i[1], i[2], True)] = False
BNN[(i[0], i[1], i[2], False)] = True
else:
BNN[(i[0], i[1], i[2], False)] = True
BNN[(i[0], i[1], i[2], True)] = False
break
print('Tree is formed')
print('Time: ', (time.time() - t))
dnfs = dt.get_dnfs((deep_layer - 1), tree)
print('DNF:')
print(dnfs)
if ((i[0], i[1], i[2], False) in target_class):
print('False case')
pruned = None
if isinstance(dnfs[0], list):
print('Fidelity pre-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], False), dnfs[0], True, False, False, True))
print('Precision pre-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], False), dnfs[0], True, False, False, True))
print('Recall pre-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], False), dnfs[0], True, False, False, True))
data.update_dictionary([(l, n, t) for conj in dnfs[0] for (l, n, t, u) in conj])
if (with_data == 0):
pruned = s.boolean_simplify_basic(dnfs[0])
elif (with_data >= 1):
pruned = s.boolean_simplify_complex(dnfs[0])
if (with_data == 2):
pruned = p.post_prune(pruned, (i[0], i[1], i[2], False), data.example_cond_dict, data.dict_indexes, data=None)
used_shallow_conditions.update(set((c for conj in pruned for c in conj)))
else:
pruned = dnfs[0]
print('Fidelity post-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], False), pruned, True, False, False, True))
print('Precision post-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], False), pruned, True, False, False, True))
print('Recall post-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], False), pruned, True, False, False, True))
BNN[(i[0], i[1], i[2], False)] = pruned
print((i[0], i[1], i[2], False), pruned)
if ((i[0], i[1], i[2], True) in target_class):
print('True case')
pruned = None
if isinstance(dnfs[1], list):
print('Fidelity pre-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], True), dnfs[1], True, False, False, True))
print('Precision pre-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], True), dnfs[1], True, False, False, True))
print('Recall pre-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], True), dnfs[1], True, False, False, True))
data.update_dictionary([(l, n, t) for conj in dnfs[1] for (l, n, t, u) in conj])
if (with_data == 0):
pruned = s.boolean_simplify_basic(dnfs[1])
elif (with_data >= 1):
pruned = s.boolean_simplify_complex(dnfs[1])
if (with_data == 2):
pruned = p.post_prune(pruned, (i[0], i[1], i[2], True), data.example_cond_dict, data.dict_indexes, data=None)
used_shallow_conditions.update(set((c for conj in pruned for c in conj)))
else:
pruned = dnfs[1]
print('Fidelity post-pruning:', ef.accuracy_of_dnf(data, (i[0], i[1], i[2], True), pruned, True, False, False, True))
print('Precision post-pruning:', ef.precision_of_dnf(data, (i[0], i[1], i[2], True), pruned, True, False, False, True))
print('Recall post-pruning:', ef.recall_of_dnf(data, (i[0], i[1], i[2], True), pruned, True, False, False, True))
BNN[(i[0], i[1], i[2], True)] = pruned
print((i[0], i[1], i[2], True), pruned)
deep_layer -= 1
target_class = list(used_shallow_conditions)
return BNN<|docstring|>Starting from the target condition and until the conditions with respect
to the first hidden layer, it extracts a DNF that explains each condition
using conditions of the next shallower layer
param data: instance of DataSet
param output_condition: condition of interest
param cd: class dominance
param mss: minimum dataset size
param md: maximum tree depth
param with_data: Avoid ==0. If == 1, the regular simplification operations are performed, if == 2, post-ppruning is performed
param discretization: method used to determine the thresholds that split the activation range of each neuron<|endoftext|> |
f628079eb8cc10a08e5ec6354cb9b3db9a175615b978d61bab8b1fec0c015d4b | def temp_data(data, shallow, tc, deep=None):
'\n\t param data: the dataset\n\t type data: DataSet\n\t param shallow: shallow layer index\n\t type shallow: int\n\t param target_class: list of split points\n\t type target_class: list of (int, int, float) tuples\n\t return: a dataset that includes all instances from the train and\n\tvaldation sets made of the attributes of the shallow layer and a class\n\tmade up of a concatenation of the target_class values\n\t rtype: list of lists\n\t'
if (not deep):
deep = (shallow + 1)
return [(list(e.values[shallow]) + [target_class(tc, e.values[deep])]) for e in data.get_train_obs()] | param data: the dataset
type data: DataSet
param shallow: shallow layer index
type shallow: int
param target_class: list of split points
type target_class: list of (int, int, float) tuples
return: a dataset that includes all instances from the train and
valdation sets made of the attributes of the shallow layer and a class
made up of a concatenation of the target_class values
rtype: list of lists | lens/models/ext_models/deep_red/decision_tree_induction.py | temp_data | pietrobarbiero/logic_explained_networks | 18 | python | def temp_data(data, shallow, tc, deep=None):
'\n\t param data: the dataset\n\t type data: DataSet\n\t param shallow: shallow layer index\n\t type shallow: int\n\t param target_class: list of split points\n\t type target_class: list of (int, int, float) tuples\n\t return: a dataset that includes all instances from the train and\n\tvaldation sets made of the attributes of the shallow layer and a class\n\tmade up of a concatenation of the target_class values\n\t rtype: list of lists\n\t'
if (not deep):
deep = (shallow + 1)
return [(list(e.values[shallow]) + [target_class(tc, e.values[deep])]) for e in data.get_train_obs()] | def temp_data(data, shallow, tc, deep=None):
'\n\t param data: the dataset\n\t type data: DataSet\n\t param shallow: shallow layer index\n\t type shallow: int\n\t param target_class: list of split points\n\t type target_class: list of (int, int, float) tuples\n\t return: a dataset that includes all instances from the train and\n\tvaldation sets made of the attributes of the shallow layer and a class\n\tmade up of a concatenation of the target_class values\n\t rtype: list of lists\n\t'
if (not deep):
deep = (shallow + 1)
return [(list(e.values[shallow]) + [target_class(tc, e.values[deep])]) for e in data.get_train_obs()]<|docstring|>param data: the dataset
type data: DataSet
param shallow: shallow layer index
type shallow: int
param target_class: list of split points
type target_class: list of (int, int, float) tuples
return: a dataset that includes all instances from the train and
valdation sets made of the attributes of the shallow layer and a class
made up of a concatenation of the target_class values
rtype: list of lists<|endoftext|> |
cdd5b47a1bdd227c764cb8ea4326c91685bd04df344670308c5bca873bf88992 | def server_error_401(request, template_name='401.html'):
'A simple 401 handler so we get media.'
response = render(request, template_name)
response.status_code = 401
return response | A simple 401 handler so we get media. | readthedocs/docsitalia/views/core_views.py | server_error_401 | italia/readthedocs.org | 19 | python | def server_error_401(request, template_name='401.html'):
response = render(request, template_name)
response.status_code = 401
return response | def server_error_401(request, template_name='401.html'):
response = render(request, template_name)
response.status_code = 401
return response<|docstring|>A simple 401 handler so we get media.<|endoftext|> |
b3721ac6a314deb54f56677b0d7ef01bff41bac4220da39acc775878456e0d34 | def search_by_tag(request, tag):
'Wrapper around readthedocs.search.views.elastic_search to search by tag.'
get_data = request.GET.copy()
if (get_data.get('tags') or get_data.get('q') or get_data.get('type')):
real_search = ('%s?%s' % (reverse('search'), request.GET.urlencode()))
return HttpResponseRedirect(real_search)
if (not get_data.get('q')):
get_data['q'] = '*'
if (not get_data.get('type')):
get_data['type'] = 'file'
get_data.appendlist('tags', tag)
request.GET = get_data
return elastic_search(request) | Wrapper around readthedocs.search.views.elastic_search to search by tag. | readthedocs/docsitalia/views/core_views.py | search_by_tag | italia/readthedocs.org | 19 | python | def search_by_tag(request, tag):
get_data = request.GET.copy()
if (get_data.get('tags') or get_data.get('q') or get_data.get('type')):
real_search = ('%s?%s' % (reverse('search'), request.GET.urlencode()))
return HttpResponseRedirect(real_search)
if (not get_data.get('q')):
get_data['q'] = '*'
if (not get_data.get('type')):
get_data['type'] = 'file'
get_data.appendlist('tags', tag)
request.GET = get_data
return elastic_search(request) | def search_by_tag(request, tag):
get_data = request.GET.copy()
if (get_data.get('tags') or get_data.get('q') or get_data.get('type')):
real_search = ('%s?%s' % (reverse('search'), request.GET.urlencode()))
return HttpResponseRedirect(real_search)
if (not get_data.get('q')):
get_data['q'] = '*'
if (not get_data.get('type')):
get_data['type'] = 'file'
get_data.appendlist('tags', tag)
request.GET = get_data
return elastic_search(request)<|docstring|>Wrapper around readthedocs.search.views.elastic_search to search by tag.<|endoftext|> |
f6bae6e311ed47222faa8e271d0bd39a40da8c004bb7a331313f6a4efe2d6845 | def get_queryset(self):
'\n Filter projects to show in homepage.\n\n We show in homepage projects that matches the following requirements:\n - Publisher is active\n - PublisherProject is active\n - document (Project) has a public build\n - Build is success and finished\n\n Ordering by:\n - ProjectOrder model values\n - modified_date descending\n - pub_date descending\n '
active_pub_projects = PublisherProject.objects.filter(active=True, publisher__active=True)
qs = get_projects_with_builds()
order_by_list = ['-modified_date', '-pub_date']
projects_priority_list = ProjectOrder.objects.all().values_list('project', flat=True)
if projects_priority_list:
project_priority_order = Case(*[When(id=pk, then=pos) for (pos, pk) in enumerate(projects_priority_list)])
order_by_list.insert(0, project_priority_order)
return qs.filter(publisherproject__in=active_pub_projects).order_by(*order_by_list)[:24] | Filter projects to show in homepage.
We show in homepage projects that matches the following requirements:
- Publisher is active
- PublisherProject is active
- document (Project) has a public build
- Build is success and finished
Ordering by:
- ProjectOrder model values
- modified_date descending
- pub_date descending | readthedocs/docsitalia/views/core_views.py | get_queryset | italia/readthedocs.org | 19 | python | def get_queryset(self):
'\n Filter projects to show in homepage.\n\n We show in homepage projects that matches the following requirements:\n - Publisher is active\n - PublisherProject is active\n - document (Project) has a public build\n - Build is success and finished\n\n Ordering by:\n - ProjectOrder model values\n - modified_date descending\n - pub_date descending\n '
active_pub_projects = PublisherProject.objects.filter(active=True, publisher__active=True)
qs = get_projects_with_builds()
order_by_list = ['-modified_date', '-pub_date']
projects_priority_list = ProjectOrder.objects.all().values_list('project', flat=True)
if projects_priority_list:
project_priority_order = Case(*[When(id=pk, then=pos) for (pos, pk) in enumerate(projects_priority_list)])
order_by_list.insert(0, project_priority_order)
return qs.filter(publisherproject__in=active_pub_projects).order_by(*order_by_list)[:24] | def get_queryset(self):
'\n Filter projects to show in homepage.\n\n We show in homepage projects that matches the following requirements:\n - Publisher is active\n - PublisherProject is active\n - document (Project) has a public build\n - Build is success and finished\n\n Ordering by:\n - ProjectOrder model values\n - modified_date descending\n - pub_date descending\n '
active_pub_projects = PublisherProject.objects.filter(active=True, publisher__active=True)
qs = get_projects_with_builds()
order_by_list = ['-modified_date', '-pub_date']
projects_priority_list = ProjectOrder.objects.all().values_list('project', flat=True)
if projects_priority_list:
project_priority_order = Case(*[When(id=pk, then=pos) for (pos, pk) in enumerate(projects_priority_list)])
order_by_list.insert(0, project_priority_order)
return qs.filter(publisherproject__in=active_pub_projects).order_by(*order_by_list)[:24]<|docstring|>Filter projects to show in homepage.
We show in homepage projects that matches the following requirements:
- Publisher is active
- PublisherProject is active
- document (Project) has a public build
- Build is success and finished
Ordering by:
- ProjectOrder model values
- modified_date descending
- pub_date descending<|endoftext|> |
b8e66b40eb2337579c1be9f13d131b6045fc56306af1b024587dcf60335856da | def get_queryset(self):
'\n Filter publisher to be listed.\n\n We show publishers that matches the following requirements:\n - are active\n - have documents with successful public build\n '
active_pub_projects = PublisherProject.objects.filter(active=True, publisher__active=True)
publishers_with_projects = get_projects_with_builds().filter(publisherproject__in=active_pub_projects).values_list('publisherproject__publisher', flat=True)
return Publisher.objects.filter(pk__in=publishers_with_projects) | Filter publisher to be listed.
We show publishers that matches the following requirements:
- are active
- have documents with successful public build | readthedocs/docsitalia/views/core_views.py | get_queryset | italia/readthedocs.org | 19 | python | def get_queryset(self):
'\n Filter publisher to be listed.\n\n We show publishers that matches the following requirements:\n - are active\n - have documents with successful public build\n '
active_pub_projects = PublisherProject.objects.filter(active=True, publisher__active=True)
publishers_with_projects = get_projects_with_builds().filter(publisherproject__in=active_pub_projects).values_list('publisherproject__publisher', flat=True)
return Publisher.objects.filter(pk__in=publishers_with_projects) | def get_queryset(self):
'\n Filter publisher to be listed.\n\n We show publishers that matches the following requirements:\n - are active\n - have documents with successful public build\n '
active_pub_projects = PublisherProject.objects.filter(active=True, publisher__active=True)
publishers_with_projects = get_projects_with_builds().filter(publisherproject__in=active_pub_projects).values_list('publisherproject__publisher', flat=True)
return Publisher.objects.filter(pk__in=publishers_with_projects)<|docstring|>Filter publisher to be listed.
We show publishers that matches the following requirements:
- are active
- have documents with successful public build<|endoftext|> |
7e3e0c71c1583458179e010ab98d9d2e638113250f897491fb96ab254ab47ebc | def get_queryset(self):
'Filter for active Publisher.'
return Publisher.objects.filter(active=True) | Filter for active Publisher. | readthedocs/docsitalia/views/core_views.py | get_queryset | italia/readthedocs.org | 19 | python | def get_queryset(self):
return Publisher.objects.filter(active=True) | def get_queryset(self):
return Publisher.objects.filter(active=True)<|docstring|>Filter for active Publisher.<|endoftext|> |
1f2d72f0c43e1a2f020174a9fc217bc5fd765b7ed3d140adc131b16264003916 | def get_queryset(self):
'Filter for active PublisherProject.'
return PublisherProject.objects.filter(active=True, publisher__active=True) | Filter for active PublisherProject. | readthedocs/docsitalia/views/core_views.py | get_queryset | italia/readthedocs.org | 19 | python | def get_queryset(self):
return PublisherProject.objects.filter(active=True, publisher__active=True) | def get_queryset(self):
return PublisherProject.objects.filter(active=True, publisher__active=True)<|docstring|>Filter for active PublisherProject.<|endoftext|> |
30a63a0d8d8bbc1b6a5498fb6b6f46921344b8227de2431753d1c34370a712d1 | def get_queryset(self):
'Filter projects based on user permissions.'
return Project.objects.protected(self.request.user) | Filter projects based on user permissions. | readthedocs/docsitalia/views/core_views.py | get_queryset | italia/readthedocs.org | 19 | python | def get_queryset(self):
return Project.objects.protected(self.request.user) | def get_queryset(self):
return Project.objects.protected(self.request.user)<|docstring|>Filter projects based on user permissions.<|endoftext|> |
13b5c61b3b9ffc58dd999c989d3a62ab27f5a3ee16ba31feb8f4489276f5fc10 | def get(self, request, *args, **kwargs):
'Redirect to the canonical URL of the document.'
try:
document = self.get_queryset().get(slug=self.kwargs['slug'])
return HttpResponseRedirect('{}index.html'.format(document.get_docs_url(lang_slug=self.kwargs.get('lang'), version_slug=self.kwargs.get('version'))))
except Project.DoesNotExist:
raise Http404() | Redirect to the canonical URL of the document. | readthedocs/docsitalia/views/core_views.py | get | italia/readthedocs.org | 19 | python | def get(self, request, *args, **kwargs):
try:
document = self.get_queryset().get(slug=self.kwargs['slug'])
return HttpResponseRedirect('{}index.html'.format(document.get_docs_url(lang_slug=self.kwargs.get('lang'), version_slug=self.kwargs.get('version'))))
except Project.DoesNotExist:
raise Http404() | def get(self, request, *args, **kwargs):
try:
document = self.get_queryset().get(slug=self.kwargs['slug'])
return HttpResponseRedirect('{}index.html'.format(document.get_docs_url(lang_slug=self.kwargs.get('lang'), version_slug=self.kwargs.get('version'))))
except Project.DoesNotExist:
raise Http404()<|docstring|>Redirect to the canonical URL of the document.<|endoftext|> |
0ade71f884f4bbb8d622ed61c97aeeb9806a2079e987c1177f9bb27fd2804726 | def post(self, request, *args, **kwargs):
"\n Handler for Project import.\n\n We import the Project only after validating the mandatory metadata.\n We then connect a Project to its PublisherProject.\n Finally we need to update the Project model with the data we have in the\n document_settings.yml. We don't care much about what it's in the model\n and we consider the config file as source of truth.\n "
form = ProjectBasicsForm(request.POST, user=request.user)
if (not form.is_valid()):
return render(request, 'docsitalia/import_error.html', {'error_list': form.errors})
project = form.save()
try:
metadata = get_metadata_for_document(project)
except InvalidMetadata as exception:
log.error('Failed to import document invalid metadata %s', exception)
msg = _('Invalid document_settings.yml found in the repository')
project.delete()
return render(request, 'docsitalia/import_error.html', {'error_msg': msg})
except Exception as e:
log.error('Failed to import document metadata: %s', e)
msg = _('Failed to download document_settings.yml from the repository')
project.delete()
return render(request, 'docsitalia/import_error.html', {'error_msg': msg})
extra_fields = ProjectExtraForm.Meta.fields
for (field, value) in request.POST.items():
if (field in extra_fields):
setattr(project, field, value)
project.save()
project.users.add(request.user)
try:
remote = RemoteRepository.objects.get(project=project)
except RemoteRepository.DoesNotExist:
log.error('Missing RemoteRepository for project %s', project)
else:
pub_projects = PublisherProject.objects.filter(metadata__documents__contains=[{'repo_url': remote.html_url}])
for pub_proj in pub_projects:
pub_proj.projects.add(project)
if (not pub_projects):
log.error('No PublisherProject found for repo %s', remote.html_url)
update_project_from_metadata(project, metadata)
project_import.send(sender=project, request=self.request)
self.trigger_initial_build(project, request.user)
return redirect('projects_detail', project_slug=project.slug) | Handler for Project import.
We import the Project only after validating the mandatory metadata.
We then connect a Project to its PublisherProject.
Finally we need to update the Project model with the data we have in the
document_settings.yml. We don't care much about what it's in the model
and we consider the config file as source of truth. | readthedocs/docsitalia/views/core_views.py | post | italia/readthedocs.org | 19 | python | def post(self, request, *args, **kwargs):
"\n Handler for Project import.\n\n We import the Project only after validating the mandatory metadata.\n We then connect a Project to its PublisherProject.\n Finally we need to update the Project model with the data we have in the\n document_settings.yml. We don't care much about what it's in the model\n and we consider the config file as source of truth.\n "
form = ProjectBasicsForm(request.POST, user=request.user)
if (not form.is_valid()):
return render(request, 'docsitalia/import_error.html', {'error_list': form.errors})
project = form.save()
try:
metadata = get_metadata_for_document(project)
except InvalidMetadata as exception:
log.error('Failed to import document invalid metadata %s', exception)
msg = _('Invalid document_settings.yml found in the repository')
project.delete()
return render(request, 'docsitalia/import_error.html', {'error_msg': msg})
except Exception as e:
log.error('Failed to import document metadata: %s', e)
msg = _('Failed to download document_settings.yml from the repository')
project.delete()
return render(request, 'docsitalia/import_error.html', {'error_msg': msg})
extra_fields = ProjectExtraForm.Meta.fields
for (field, value) in request.POST.items():
if (field in extra_fields):
setattr(project, field, value)
project.save()
project.users.add(request.user)
try:
remote = RemoteRepository.objects.get(project=project)
except RemoteRepository.DoesNotExist:
log.error('Missing RemoteRepository for project %s', project)
else:
pub_projects = PublisherProject.objects.filter(metadata__documents__contains=[{'repo_url': remote.html_url}])
for pub_proj in pub_projects:
pub_proj.projects.add(project)
if (not pub_projects):
log.error('No PublisherProject found for repo %s', remote.html_url)
update_project_from_metadata(project, metadata)
project_import.send(sender=project, request=self.request)
self.trigger_initial_build(project, request.user)
return redirect('projects_detail', project_slug=project.slug) | def post(self, request, *args, **kwargs):
"\n Handler for Project import.\n\n We import the Project only after validating the mandatory metadata.\n We then connect a Project to its PublisherProject.\n Finally we need to update the Project model with the data we have in the\n document_settings.yml. We don't care much about what it's in the model\n and we consider the config file as source of truth.\n "
form = ProjectBasicsForm(request.POST, user=request.user)
if (not form.is_valid()):
return render(request, 'docsitalia/import_error.html', {'error_list': form.errors})
project = form.save()
try:
metadata = get_metadata_for_document(project)
except InvalidMetadata as exception:
log.error('Failed to import document invalid metadata %s', exception)
msg = _('Invalid document_settings.yml found in the repository')
project.delete()
return render(request, 'docsitalia/import_error.html', {'error_msg': msg})
except Exception as e:
log.error('Failed to import document metadata: %s', e)
msg = _('Failed to download document_settings.yml from the repository')
project.delete()
return render(request, 'docsitalia/import_error.html', {'error_msg': msg})
extra_fields = ProjectExtraForm.Meta.fields
for (field, value) in request.POST.items():
if (field in extra_fields):
setattr(project, field, value)
project.save()
project.users.add(request.user)
try:
remote = RemoteRepository.objects.get(project=project)
except RemoteRepository.DoesNotExist:
log.error('Missing RemoteRepository for project %s', project)
else:
pub_projects = PublisherProject.objects.filter(metadata__documents__contains=[{'repo_url': remote.html_url}])
for pub_proj in pub_projects:
pub_proj.projects.add(project)
if (not pub_projects):
log.error('No PublisherProject found for repo %s', remote.html_url)
update_project_from_metadata(project, metadata)
project_import.send(sender=project, request=self.request)
self.trigger_initial_build(project, request.user)
return redirect('projects_detail', project_slug=project.slug)<|docstring|>Handler for Project import.
We import the Project only after validating the mandatory metadata.
We then connect a Project to its PublisherProject.
Finally we need to update the Project model with the data we have in the
document_settings.yml. We don't care much about what it's in the model
and we consider the config file as source of truth.<|endoftext|> |
40946f53c03e4eaf11988f8faa92c9b1c490776b84c362690ee7a45f03daf0a9 | @click.command(short_help='Run PhISCS (CSP version).')
@click.argument('genotype_file', required=True, type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True))
@click.argument('alpha', required=True, type=float)
@click.argument('beta', required=True, type=float)
def phiscsb(genotype_file, alpha, beta):
'PhISCS-B.\n\n A combinatorial approach for subperfect\n tumor phylogeny reconstructionvia integrative use of\n single-cell and bulk sequencing data :cite:`PhISCS`.\n\n trisicell phiscsb input.SC 0.0001 0.1\n '
outfile = os.path.splitext(genotype_file)[0]
tsc.settings.verbosity = 'info'
tsc.settings.logfile = f'{outfile}.phiscsb.log'
df_in = tsc.io.read(genotype_file)
df_out = tsc.tl.phiscsb(df_in, alpha=alpha, beta=beta)
tsc.io.write(df_out, f'{outfile}.phiscsb.CFMatrix')
return None | PhISCS-B.
A combinatorial approach for subperfect
tumor phylogeny reconstructionvia integrative use of
single-cell and bulk sequencing data :cite:`PhISCS`.
trisicell phiscsb input.SC 0.0001 0.1 | trisicell/commands/_phiscs.py | phiscsb | faridrashidi/trisicell | 2 | python | @click.command(short_help='Run PhISCS (CSP version).')
@click.argument('genotype_file', required=True, type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True))
@click.argument('alpha', required=True, type=float)
@click.argument('beta', required=True, type=float)
def phiscsb(genotype_file, alpha, beta):
'PhISCS-B.\n\n A combinatorial approach for subperfect\n tumor phylogeny reconstructionvia integrative use of\n single-cell and bulk sequencing data :cite:`PhISCS`.\n\n trisicell phiscsb input.SC 0.0001 0.1\n '
outfile = os.path.splitext(genotype_file)[0]
tsc.settings.verbosity = 'info'
tsc.settings.logfile = f'{outfile}.phiscsb.log'
df_in = tsc.io.read(genotype_file)
df_out = tsc.tl.phiscsb(df_in, alpha=alpha, beta=beta)
tsc.io.write(df_out, f'{outfile}.phiscsb.CFMatrix')
return None | @click.command(short_help='Run PhISCS (CSP version).')
@click.argument('genotype_file', required=True, type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True))
@click.argument('alpha', required=True, type=float)
@click.argument('beta', required=True, type=float)
def phiscsb(genotype_file, alpha, beta):
'PhISCS-B.\n\n A combinatorial approach for subperfect\n tumor phylogeny reconstructionvia integrative use of\n single-cell and bulk sequencing data :cite:`PhISCS`.\n\n trisicell phiscsb input.SC 0.0001 0.1\n '
outfile = os.path.splitext(genotype_file)[0]
tsc.settings.verbosity = 'info'
tsc.settings.logfile = f'{outfile}.phiscsb.log'
df_in = tsc.io.read(genotype_file)
df_out = tsc.tl.phiscsb(df_in, alpha=alpha, beta=beta)
tsc.io.write(df_out, f'{outfile}.phiscsb.CFMatrix')
return None<|docstring|>PhISCS-B.
A combinatorial approach for subperfect
tumor phylogeny reconstructionvia integrative use of
single-cell and bulk sequencing data :cite:`PhISCS`.
trisicell phiscsb input.SC 0.0001 0.1<|endoftext|> |
bb45dbaad79c43298f446c2ff2feda7b2312c646c9650e97efcf7fab1972fa13 | @click.command(short_help='Run PhISCS (ILP version).')
@click.argument('genotype_file', required=True, type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True))
@click.argument('alpha', required=True, type=float)
@click.argument('beta', required=True, type=float)
@click.option('--time_limit', '-t', default=86400, type=int, show_default=True, help='Time limit of the program (in second).')
@click.option('--n_threads', '-p', default=1, type=int, show_default=True, help='Number of threads.')
def phiscsi(genotype_file, alpha, beta, time_limit, n_threads):
'PhISCS-I.\n\n A combinatorial approach for subperfect\n tumor phylogeny reconstructionvia integrative use of\n single-cell and bulk sequencing data :cite:`PhISCS`.\n\n trisicell phiscsi input.SC 0.0001 0.1 -t 3600 -p 8\n '
outfile = os.path.splitext(genotype_file)[0]
tsc.settings.verbosity = 'info'
tsc.settings.logfile = f'{outfile}.phiscsi.log'
df_in = tsc.io.read(genotype_file)
df_out = tsc.tl.phiscsi(df_in, alpha=alpha, beta=beta, time_limit=time_limit, n_threads=n_threads)
tsc.io.write(df_out, f'{outfile}.phiscsi.CFMatrix')
return None | PhISCS-I.
A combinatorial approach for subperfect
tumor phylogeny reconstructionvia integrative use of
single-cell and bulk sequencing data :cite:`PhISCS`.
trisicell phiscsi input.SC 0.0001 0.1 -t 3600 -p 8 | trisicell/commands/_phiscs.py | phiscsi | faridrashidi/trisicell | 2 | python | @click.command(short_help='Run PhISCS (ILP version).')
@click.argument('genotype_file', required=True, type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True))
@click.argument('alpha', required=True, type=float)
@click.argument('beta', required=True, type=float)
@click.option('--time_limit', '-t', default=86400, type=int, show_default=True, help='Time limit of the program (in second).')
@click.option('--n_threads', '-p', default=1, type=int, show_default=True, help='Number of threads.')
def phiscsi(genotype_file, alpha, beta, time_limit, n_threads):
'PhISCS-I.\n\n A combinatorial approach for subperfect\n tumor phylogeny reconstructionvia integrative use of\n single-cell and bulk sequencing data :cite:`PhISCS`.\n\n trisicell phiscsi input.SC 0.0001 0.1 -t 3600 -p 8\n '
outfile = os.path.splitext(genotype_file)[0]
tsc.settings.verbosity = 'info'
tsc.settings.logfile = f'{outfile}.phiscsi.log'
df_in = tsc.io.read(genotype_file)
df_out = tsc.tl.phiscsi(df_in, alpha=alpha, beta=beta, time_limit=time_limit, n_threads=n_threads)
tsc.io.write(df_out, f'{outfile}.phiscsi.CFMatrix')
return None | @click.command(short_help='Run PhISCS (ILP version).')
@click.argument('genotype_file', required=True, type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True))
@click.argument('alpha', required=True, type=float)
@click.argument('beta', required=True, type=float)
@click.option('--time_limit', '-t', default=86400, type=int, show_default=True, help='Time limit of the program (in second).')
@click.option('--n_threads', '-p', default=1, type=int, show_default=True, help='Number of threads.')
def phiscsi(genotype_file, alpha, beta, time_limit, n_threads):
'PhISCS-I.\n\n A combinatorial approach for subperfect\n tumor phylogeny reconstructionvia integrative use of\n single-cell and bulk sequencing data :cite:`PhISCS`.\n\n trisicell phiscsi input.SC 0.0001 0.1 -t 3600 -p 8\n '
outfile = os.path.splitext(genotype_file)[0]
tsc.settings.verbosity = 'info'
tsc.settings.logfile = f'{outfile}.phiscsi.log'
df_in = tsc.io.read(genotype_file)
df_out = tsc.tl.phiscsi(df_in, alpha=alpha, beta=beta, time_limit=time_limit, n_threads=n_threads)
tsc.io.write(df_out, f'{outfile}.phiscsi.CFMatrix')
return None<|docstring|>PhISCS-I.
A combinatorial approach for subperfect
tumor phylogeny reconstructionvia integrative use of
single-cell and bulk sequencing data :cite:`PhISCS`.
trisicell phiscsi input.SC 0.0001 0.1 -t 3600 -p 8<|endoftext|> |
bbe1192c8fa47142f901e2a4e8589bb3c312b5b65960e18af35a9b0fde0b2b37 | def drop_out_matrices(layers_dims, m, keep_prob):
'\n Initializes the dropout matrices that will be used in both forward prop\n and back-prop on each layer. We\'ll use random numbers from uniform\n distribution.\n\n Arguments\n ---------\n layers_dims : list\n input size and size of each layer, length: number of layers + 1.\n m : int\n number of training examples.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n\n Returns\n -------\n D : dict\n dropout matrices for each layer l. Each dropout matrix on each layer\n would have the same dimension as post activation output matrix "A".\n For example: "D1" shape: number of units x number of examples.\n '
np.random.seed(1)
D = {}
L = len(layers_dims)
for l in range(L):
D[str(l)] = np.random.rand(layers_dims[l], m)
D[str(l)] = (D[str(l)] < keep_prob[l])
assert (D[str(l)].shape == (layers_dims[l], m))
return D | Initializes the dropout matrices that will be used in both forward prop
and back-prop on each layer. We'll use random numbers from uniform
distribution.
Arguments
---------
layers_dims : list
input size and size of each layer, length: number of layers + 1.
m : int
number of training examples.
keep_prob : list
probabilities of keeping a neuron (unit) active for each layer on each
iteration.
Returns
-------
D : dict
dropout matrices for each layer l. Each dropout matrix on each layer
would have the same dimension as post activation output matrix "A".
For example: "D1" shape: number of units x number of examples. | scripts/dropout.py | drop_out_matrices | johntiger1/blog-posts | 0 | python | def drop_out_matrices(layers_dims, m, keep_prob):
'\n Initializes the dropout matrices that will be used in both forward prop\n and back-prop on each layer. We\'ll use random numbers from uniform\n distribution.\n\n Arguments\n ---------\n layers_dims : list\n input size and size of each layer, length: number of layers + 1.\n m : int\n number of training examples.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n\n Returns\n -------\n D : dict\n dropout matrices for each layer l. Each dropout matrix on each layer\n would have the same dimension as post activation output matrix "A".\n For example: "D1" shape: number of units x number of examples.\n '
np.random.seed(1)
D = {}
L = len(layers_dims)
for l in range(L):
D[str(l)] = np.random.rand(layers_dims[l], m)
D[str(l)] = (D[str(l)] < keep_prob[l])
assert (D[str(l)].shape == (layers_dims[l], m))
return D | def drop_out_matrices(layers_dims, m, keep_prob):
'\n Initializes the dropout matrices that will be used in both forward prop\n and back-prop on each layer. We\'ll use random numbers from uniform\n distribution.\n\n Arguments\n ---------\n layers_dims : list\n input size and size of each layer, length: number of layers + 1.\n m : int\n number of training examples.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n\n Returns\n -------\n D : dict\n dropout matrices for each layer l. Each dropout matrix on each layer\n would have the same dimension as post activation output matrix "A".\n For example: "D1" shape: number of units x number of examples.\n '
np.random.seed(1)
D = {}
L = len(layers_dims)
for l in range(L):
D[str(l)] = np.random.rand(layers_dims[l], m)
D[str(l)] = (D[str(l)] < keep_prob[l])
assert (D[str(l)].shape == (layers_dims[l], m))
return D<|docstring|>Initializes the dropout matrices that will be used in both forward prop
and back-prop on each layer. We'll use random numbers from uniform
distribution.
Arguments
---------
layers_dims : list
input size and size of each layer, length: number of layers + 1.
m : int
number of training examples.
keep_prob : list
probabilities of keeping a neuron (unit) active for each layer on each
iteration.
Returns
-------
D : dict
dropout matrices for each layer l. Each dropout matrix on each layer
would have the same dimension as post activation output matrix "A".
For example: "D1" shape: number of units x number of examples.<|endoftext|> |
29c108619aace1516dbb6bf6113166c96bdc76ef304c87cd1130e4862c4aa24b | def L_model_forward(X, parameters, D, keep_prob, hidden_layers_activation_fn='relu'):
'\n Computes the output layer through looping over all units in topological\n order.\n\n X : 2d-array\n input matrix of shape input_size x training_examples.\n parameters : dict\n contains all the weight matrices and bias vectors for all layers.\n D : dict\n dropout matrices for each layer l.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n hidden_layers_activation_fn : str\n activation function to be used on hidden layers: "tanh","relu".\n\n\n Returns\n -------\n AL : 2d-array\n probability vector of shape 1 x training_examples.\n caches : list\n that contains L tuples where each layer has: A_prev, W, b, Z.\n '
A = X
A = np.multiply(A, D[str(0)])
A /= keep_prob[l]
caches = []
L = (len(parameters) // 2)
for l in range(1, L):
A_prev = A
(A, cache) = linear_activation_forward(A_prev, parameters[('W' + str(l))], parameters[('b' + str(l))], hidden_layers_activation_fn)
A = np.multiply(A, D[str(l)])
A /= keep_prob[l]
caches.append(cache)
(AL, cache) = linear_activation_forward(A, parameters[('W' + str(L))], parameters[('b' + str(L))], 'sigmoid')
AL = np.multiply(AL, D[str(L)])
AL /= keep_prob[L]
caches.append(cache)
assert (AL.shape == (1, X.shape[1]))
return (AL, caches) | Computes the output layer through looping over all units in topological
order.
X : 2d-array
input matrix of shape input_size x training_examples.
parameters : dict
contains all the weight matrices and bias vectors for all layers.
D : dict
dropout matrices for each layer l.
keep_prob : list
probabilities of keeping a neuron (unit) active for each layer on each
iteration.
hidden_layers_activation_fn : str
activation function to be used on hidden layers: "tanh","relu".
Returns
-------
AL : 2d-array
probability vector of shape 1 x training_examples.
caches : list
that contains L tuples where each layer has: A_prev, W, b, Z. | scripts/dropout.py | L_model_forward | johntiger1/blog-posts | 0 | python | def L_model_forward(X, parameters, D, keep_prob, hidden_layers_activation_fn='relu'):
'\n Computes the output layer through looping over all units in topological\n order.\n\n X : 2d-array\n input matrix of shape input_size x training_examples.\n parameters : dict\n contains all the weight matrices and bias vectors for all layers.\n D : dict\n dropout matrices for each layer l.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n hidden_layers_activation_fn : str\n activation function to be used on hidden layers: "tanh","relu".\n\n\n Returns\n -------\n AL : 2d-array\n probability vector of shape 1 x training_examples.\n caches : list\n that contains L tuples where each layer has: A_prev, W, b, Z.\n '
A = X
A = np.multiply(A, D[str(0)])
A /= keep_prob[l]
caches = []
L = (len(parameters) // 2)
for l in range(1, L):
A_prev = A
(A, cache) = linear_activation_forward(A_prev, parameters[('W' + str(l))], parameters[('b' + str(l))], hidden_layers_activation_fn)
A = np.multiply(A, D[str(l)])
A /= keep_prob[l]
caches.append(cache)
(AL, cache) = linear_activation_forward(A, parameters[('W' + str(L))], parameters[('b' + str(L))], 'sigmoid')
AL = np.multiply(AL, D[str(L)])
AL /= keep_prob[L]
caches.append(cache)
assert (AL.shape == (1, X.shape[1]))
return (AL, caches) | def L_model_forward(X, parameters, D, keep_prob, hidden_layers_activation_fn='relu'):
'\n Computes the output layer through looping over all units in topological\n order.\n\n X : 2d-array\n input matrix of shape input_size x training_examples.\n parameters : dict\n contains all the weight matrices and bias vectors for all layers.\n D : dict\n dropout matrices for each layer l.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n hidden_layers_activation_fn : str\n activation function to be used on hidden layers: "tanh","relu".\n\n\n Returns\n -------\n AL : 2d-array\n probability vector of shape 1 x training_examples.\n caches : list\n that contains L tuples where each layer has: A_prev, W, b, Z.\n '
A = X
A = np.multiply(A, D[str(0)])
A /= keep_prob[l]
caches = []
L = (len(parameters) // 2)
for l in range(1, L):
A_prev = A
(A, cache) = linear_activation_forward(A_prev, parameters[('W' + str(l))], parameters[('b' + str(l))], hidden_layers_activation_fn)
A = np.multiply(A, D[str(l)])
A /= keep_prob[l]
caches.append(cache)
(AL, cache) = linear_activation_forward(A, parameters[('W' + str(L))], parameters[('b' + str(L))], 'sigmoid')
AL = np.multiply(AL, D[str(L)])
AL /= keep_prob[L]
caches.append(cache)
assert (AL.shape == (1, X.shape[1]))
return (AL, caches)<|docstring|>Computes the output layer through looping over all units in topological
order.
X : 2d-array
input matrix of shape input_size x training_examples.
parameters : dict
contains all the weight matrices and bias vectors for all layers.
D : dict
dropout matrices for each layer l.
keep_prob : list
probabilities of keeping a neuron (unit) active for each layer on each
iteration.
hidden_layers_activation_fn : str
activation function to be used on hidden layers: "tanh","relu".
Returns
-------
AL : 2d-array
probability vector of shape 1 x training_examples.
caches : list
that contains L tuples where each layer has: A_prev, W, b, Z.<|endoftext|> |
c2ad044c9f0357651b0d06ce00870ca19938de126ff095ba9d6b0286f7ec7994 | def L_model_backward(AL, Y, caches, D, keep_prob, hidden_layers_activation_fn='relu'):
'\n Computes the gradient of output layer w.r.t weights, biases, etc. starting\n on the output layer in reverse topological order.\n\n Arguments\n ---------\n AL : 2d-array\n probability vector, output of the forward propagation\n (L_model_forward()).\n y : 2d-array\n true "label" vector (containing 0 if non-cat, 1 if cat).\n caches : list\n list of caches for all layers.\n D : dict\n dropout matrices for each layer l.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n hidden_layers_activation_fn :\n activation function used on hidden layers: "tanh", "relu".\n\n Returns\n -------\n grads : dict\n gradients.\n '
Y = Y.reshape(AL.shape)
L = len(caches)
grads = {}
dAL = np.divide((AL - Y), np.multiply(AL, (1 - AL)))
dAL = np.multiply(dAL, D[str(L)])
dAL /= keep_prob[L]
(grads[('dA' + str((L - 1)))], grads[('dW' + str(L))], grads[('db' + str(L))]) = linear_activation_backward(dAL, caches[(L - 1)], 'sigmoid')
grads[('dA' + str((L - 1)))] = np.multiply(grads[('dA' + str((L - 1)))], D[str((L - 1))])
grads[('dA' + str((L - 1)))] /= keep_prob[(L - 1)]
for l in range((L - 1), 0, (- 1)):
current_cache = caches[(l - 1)]
(grads[('dA' + str((l - 1)))], grads[('dW' + str(l))], grads[('db' + str(l))]) = linear_activation_backward(grads[('dA' + str(l))], current_cache, hidden_layers_activation_fn)
grads[('dA' + str((l - 1)))] = np.multiply(grads[('dA' + str((l - 1)))], D[str((l - 1))])
grads[('dA' + str((l - 1)))] /= keep_prob[(l - 1)]
return grads | Computes the gradient of output layer w.r.t weights, biases, etc. starting
on the output layer in reverse topological order.
Arguments
---------
AL : 2d-array
probability vector, output of the forward propagation
(L_model_forward()).
y : 2d-array
true "label" vector (containing 0 if non-cat, 1 if cat).
caches : list
list of caches for all layers.
D : dict
dropout matrices for each layer l.
keep_prob : list
probabilities of keeping a neuron (unit) active for each layer on each
iteration.
hidden_layers_activation_fn :
activation function used on hidden layers: "tanh", "relu".
Returns
-------
grads : dict
gradients. | scripts/dropout.py | L_model_backward | johntiger1/blog-posts | 0 | python | def L_model_backward(AL, Y, caches, D, keep_prob, hidden_layers_activation_fn='relu'):
'\n Computes the gradient of output layer w.r.t weights, biases, etc. starting\n on the output layer in reverse topological order.\n\n Arguments\n ---------\n AL : 2d-array\n probability vector, output of the forward propagation\n (L_model_forward()).\n y : 2d-array\n true "label" vector (containing 0 if non-cat, 1 if cat).\n caches : list\n list of caches for all layers.\n D : dict\n dropout matrices for each layer l.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n hidden_layers_activation_fn :\n activation function used on hidden layers: "tanh", "relu".\n\n Returns\n -------\n grads : dict\n gradients.\n '
Y = Y.reshape(AL.shape)
L = len(caches)
grads = {}
dAL = np.divide((AL - Y), np.multiply(AL, (1 - AL)))
dAL = np.multiply(dAL, D[str(L)])
dAL /= keep_prob[L]
(grads[('dA' + str((L - 1)))], grads[('dW' + str(L))], grads[('db' + str(L))]) = linear_activation_backward(dAL, caches[(L - 1)], 'sigmoid')
grads[('dA' + str((L - 1)))] = np.multiply(grads[('dA' + str((L - 1)))], D[str((L - 1))])
grads[('dA' + str((L - 1)))] /= keep_prob[(L - 1)]
for l in range((L - 1), 0, (- 1)):
current_cache = caches[(l - 1)]
(grads[('dA' + str((l - 1)))], grads[('dW' + str(l))], grads[('db' + str(l))]) = linear_activation_backward(grads[('dA' + str(l))], current_cache, hidden_layers_activation_fn)
grads[('dA' + str((l - 1)))] = np.multiply(grads[('dA' + str((l - 1)))], D[str((l - 1))])
grads[('dA' + str((l - 1)))] /= keep_prob[(l - 1)]
return grads | def L_model_backward(AL, Y, caches, D, keep_prob, hidden_layers_activation_fn='relu'):
'\n Computes the gradient of output layer w.r.t weights, biases, etc. starting\n on the output layer in reverse topological order.\n\n Arguments\n ---------\n AL : 2d-array\n probability vector, output of the forward propagation\n (L_model_forward()).\n y : 2d-array\n true "label" vector (containing 0 if non-cat, 1 if cat).\n caches : list\n list of caches for all layers.\n D : dict\n dropout matrices for each layer l.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n hidden_layers_activation_fn :\n activation function used on hidden layers: "tanh", "relu".\n\n Returns\n -------\n grads : dict\n gradients.\n '
Y = Y.reshape(AL.shape)
L = len(caches)
grads = {}
dAL = np.divide((AL - Y), np.multiply(AL, (1 - AL)))
dAL = np.multiply(dAL, D[str(L)])
dAL /= keep_prob[L]
(grads[('dA' + str((L - 1)))], grads[('dW' + str(L))], grads[('db' + str(L))]) = linear_activation_backward(dAL, caches[(L - 1)], 'sigmoid')
grads[('dA' + str((L - 1)))] = np.multiply(grads[('dA' + str((L - 1)))], D[str((L - 1))])
grads[('dA' + str((L - 1)))] /= keep_prob[(L - 1)]
for l in range((L - 1), 0, (- 1)):
current_cache = caches[(l - 1)]
(grads[('dA' + str((l - 1)))], grads[('dW' + str(l))], grads[('db' + str(l))]) = linear_activation_backward(grads[('dA' + str(l))], current_cache, hidden_layers_activation_fn)
grads[('dA' + str((l - 1)))] = np.multiply(grads[('dA' + str((l - 1)))], D[str((l - 1))])
grads[('dA' + str((l - 1)))] /= keep_prob[(l - 1)]
return grads<|docstring|>Computes the gradient of output layer w.r.t weights, biases, etc. starting
on the output layer in reverse topological order.
Arguments
---------
AL : 2d-array
probability vector, output of the forward propagation
(L_model_forward()).
y : 2d-array
true "label" vector (containing 0 if non-cat, 1 if cat).
caches : list
list of caches for all layers.
D : dict
dropout matrices for each layer l.
keep_prob : list
probabilities of keeping a neuron (unit) active for each layer on each
iteration.
hidden_layers_activation_fn :
activation function used on hidden layers: "tanh", "relu".
Returns
-------
grads : dict
gradients.<|endoftext|> |
5d4c2c422d518dd4fb4db68ade7926bd3b468ba36d64375ef121533085cb363f | def model_with_dropout(X, Y, layers_dims, keep_prob, learning_rate=0.01, num_iterations=3000, print_cost=True, hidden_layers_activation_fn='relu'):
'\n Implements multilayer neural network with dropout using gradient descent as the\n learning algorithm.\n\n Arguments\n ---------\n X : 2d-array\n data, shape: number of examples x num_px * num_px * 3.\n y : 2d-array\n true "label" vector, shape: 1 x number of examples.\n layers_dims : list\n input size and size of each layer, length: number of layers + 1.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n learning_rate : float\n learning rate of the gradient descent update rule.\n num_iterations : int\n number of iterations of the optimization loop.\n print_cost : bool\n if True, it prints the cost every 100 steps.\n hidden_layers_activation_fn : str\n activation function to be used on hidden layers: "tanh", "relu".\n\n Returns\n -------\n parameters : dict\n parameters learnt by the model. They can then be used to predict test\n examples.\n '
m = X.shape[1]
np.random.seed(1)
parameters = initialize_parameters(layers_dims)
cost_list = []
for i in range(num_iterations):
D = drop_out_matrices(layers_dims, m, keep_prob)
(AL, caches) = L_model_forward(X, parameters, D, keep_prob, hidden_layers_activation_fn)
cost = compute_cost(AL, Y)
grads = L_model_backward(AL, Y, caches, D, keep_prob, hidden_layers_activation_fn)
parameters = update_parameters(parameters, grads, learning_rate)
if ((((i + 1) % 100) == 0) and print_cost):
print('The cost after {} iterations: {}'.format((i + 1), cost))
if ((i % 100) == 0):
cost_list.append(cost)
plt.plot(cost_list)
plt.xlabel('Iteration (per hundreds)')
plt.ylabel('Cost')
plt.title('Cost curve for the learning rate = {}'.format(learning_rate))
return parameters | Implements multilayer neural network with dropout using gradient descent as the
learning algorithm.
Arguments
---------
X : 2d-array
data, shape: number of examples x num_px * num_px * 3.
y : 2d-array
true "label" vector, shape: 1 x number of examples.
layers_dims : list
input size and size of each layer, length: number of layers + 1.
keep_prob : list
probabilities of keeping a neuron (unit) active for each layer on each
iteration.
learning_rate : float
learning rate of the gradient descent update rule.
num_iterations : int
number of iterations of the optimization loop.
print_cost : bool
if True, it prints the cost every 100 steps.
hidden_layers_activation_fn : str
activation function to be used on hidden layers: "tanh", "relu".
Returns
-------
parameters : dict
parameters learnt by the model. They can then be used to predict test
examples. | scripts/dropout.py | model_with_dropout | johntiger1/blog-posts | 0 | python | def model_with_dropout(X, Y, layers_dims, keep_prob, learning_rate=0.01, num_iterations=3000, print_cost=True, hidden_layers_activation_fn='relu'):
'\n Implements multilayer neural network with dropout using gradient descent as the\n learning algorithm.\n\n Arguments\n ---------\n X : 2d-array\n data, shape: number of examples x num_px * num_px * 3.\n y : 2d-array\n true "label" vector, shape: 1 x number of examples.\n layers_dims : list\n input size and size of each layer, length: number of layers + 1.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n learning_rate : float\n learning rate of the gradient descent update rule.\n num_iterations : int\n number of iterations of the optimization loop.\n print_cost : bool\n if True, it prints the cost every 100 steps.\n hidden_layers_activation_fn : str\n activation function to be used on hidden layers: "tanh", "relu".\n\n Returns\n -------\n parameters : dict\n parameters learnt by the model. They can then be used to predict test\n examples.\n '
m = X.shape[1]
np.random.seed(1)
parameters = initialize_parameters(layers_dims)
cost_list = []
for i in range(num_iterations):
D = drop_out_matrices(layers_dims, m, keep_prob)
(AL, caches) = L_model_forward(X, parameters, D, keep_prob, hidden_layers_activation_fn)
cost = compute_cost(AL, Y)
grads = L_model_backward(AL, Y, caches, D, keep_prob, hidden_layers_activation_fn)
parameters = update_parameters(parameters, grads, learning_rate)
if ((((i + 1) % 100) == 0) and print_cost):
print('The cost after {} iterations: {}'.format((i + 1), cost))
if ((i % 100) == 0):
cost_list.append(cost)
plt.plot(cost_list)
plt.xlabel('Iteration (per hundreds)')
plt.ylabel('Cost')
plt.title('Cost curve for the learning rate = {}'.format(learning_rate))
return parameters | def model_with_dropout(X, Y, layers_dims, keep_prob, learning_rate=0.01, num_iterations=3000, print_cost=True, hidden_layers_activation_fn='relu'):
'\n Implements multilayer neural network with dropout using gradient descent as the\n learning algorithm.\n\n Arguments\n ---------\n X : 2d-array\n data, shape: number of examples x num_px * num_px * 3.\n y : 2d-array\n true "label" vector, shape: 1 x number of examples.\n layers_dims : list\n input size and size of each layer, length: number of layers + 1.\n keep_prob : list\n probabilities of keeping a neuron (unit) active for each layer on each\n iteration.\n learning_rate : float\n learning rate of the gradient descent update rule.\n num_iterations : int\n number of iterations of the optimization loop.\n print_cost : bool\n if True, it prints the cost every 100 steps.\n hidden_layers_activation_fn : str\n activation function to be used on hidden layers: "tanh", "relu".\n\n Returns\n -------\n parameters : dict\n parameters learnt by the model. They can then be used to predict test\n examples.\n '
m = X.shape[1]
np.random.seed(1)
parameters = initialize_parameters(layers_dims)
cost_list = []
for i in range(num_iterations):
D = drop_out_matrices(layers_dims, m, keep_prob)
(AL, caches) = L_model_forward(X, parameters, D, keep_prob, hidden_layers_activation_fn)
cost = compute_cost(AL, Y)
grads = L_model_backward(AL, Y, caches, D, keep_prob, hidden_layers_activation_fn)
parameters = update_parameters(parameters, grads, learning_rate)
if ((((i + 1) % 100) == 0) and print_cost):
print('The cost after {} iterations: {}'.format((i + 1), cost))
if ((i % 100) == 0):
cost_list.append(cost)
plt.plot(cost_list)
plt.xlabel('Iteration (per hundreds)')
plt.ylabel('Cost')
plt.title('Cost curve for the learning rate = {}'.format(learning_rate))
return parameters<|docstring|>Implements multilayer neural network with dropout using gradient descent as the
learning algorithm.
Arguments
---------
X : 2d-array
data, shape: number of examples x num_px * num_px * 3.
y : 2d-array
true "label" vector, shape: 1 x number of examples.
layers_dims : list
input size and size of each layer, length: number of layers + 1.
keep_prob : list
probabilities of keeping a neuron (unit) active for each layer on each
iteration.
learning_rate : float
learning rate of the gradient descent update rule.
num_iterations : int
number of iterations of the optimization loop.
print_cost : bool
if True, it prints the cost every 100 steps.
hidden_layers_activation_fn : str
activation function to be used on hidden layers: "tanh", "relu".
Returns
-------
parameters : dict
parameters learnt by the model. They can then be used to predict test
examples.<|endoftext|> |
1b80b6191a9a41a609549603935669b23ee243daf877999ee924191f577a4c4f | def getType(self):
' Returns the type of an entity '
return self.type | Returns the type of an entity | tasksupervisor/entities/entity.py | getType | ramp-eu/Task_Supervisor | 0 | python | def getType(self):
' '
return self.type | def getType(self):
' '
return self.type<|docstring|>Returns the type of an entity<|endoftext|> |
c3e6b30fa4772b2bb409b6e1b4cb3d0329160f2a91954e53a20bfc537ddd4bad | def getId(self):
' Returns the unique ID of an entity '
return self.id | Returns the unique ID of an entity | tasksupervisor/entities/entity.py | getId | ramp-eu/Task_Supervisor | 0 | python | def getId(self):
' '
return self.id | def getId(self):
' '
return self.id<|docstring|>Returns the unique ID of an entity<|endoftext|> |
cdc9acc7509be446a50c0803f2008d93867a5ac8291564221a22f2cd7bb693ee | @abstractmethod
def forward(self, x_e: torch.FloatTensor, graph_ids: torch.LongTensor, entity_ids: Optional[torch.LongTensor]) -> FloatTensor:
'\n Obtain graph representations by aggregating node representations.\n\n :param x_e: shape: (num_nodes, dim)\n The node representations.\n :param graph_ids: shape: (num_nodes,)\n The graph ID for each node.\n :param entity_ids: shape: (num_nodes,)\n The global entity ID for each node.\n\n :return: shape: (num_graphs, dim)\n The graph representations.\n '
raise NotImplementedError | Obtain graph representations by aggregating node representations.
:param x_e: shape: (num_nodes, dim)
The node representations.
:param graph_ids: shape: (num_nodes,)
The graph ID for each node.
:param entity_ids: shape: (num_nodes,)
The global entity ID for each node.
:return: shape: (num_graphs, dim)
The graph representations. | src/mphrqe/layer/pooling.py | forward | DimitrisAlivas/StarQE | 11 | python | @abstractmethod
def forward(self, x_e: torch.FloatTensor, graph_ids: torch.LongTensor, entity_ids: Optional[torch.LongTensor]) -> FloatTensor:
'\n Obtain graph representations by aggregating node representations.\n\n :param x_e: shape: (num_nodes, dim)\n The node representations.\n :param graph_ids: shape: (num_nodes,)\n The graph ID for each node.\n :param entity_ids: shape: (num_nodes,)\n The global entity ID for each node.\n\n :return: shape: (num_graphs, dim)\n The graph representations.\n '
raise NotImplementedError | @abstractmethod
def forward(self, x_e: torch.FloatTensor, graph_ids: torch.LongTensor, entity_ids: Optional[torch.LongTensor]) -> FloatTensor:
'\n Obtain graph representations by aggregating node representations.\n\n :param x_e: shape: (num_nodes, dim)\n The node representations.\n :param graph_ids: shape: (num_nodes,)\n The graph ID for each node.\n :param entity_ids: shape: (num_nodes,)\n The global entity ID for each node.\n\n :return: shape: (num_graphs, dim)\n The graph representations.\n '
raise NotImplementedError<|docstring|>Obtain graph representations by aggregating node representations.
:param x_e: shape: (num_nodes, dim)
The node representations.
:param graph_ids: shape: (num_nodes,)
The graph ID for each node.
:param entity_ids: shape: (num_nodes,)
The global entity ID for each node.
:return: shape: (num_graphs, dim)
The graph representations.<|endoftext|> |
63f8416d28eab338a50873b85983828d2f3f84fb712cf82eb951b49cdbe8e173 | def forward(self, x_e: torch.FloatTensor, graph_ids: torch.LongTensor, entity_ids: Optional[torch.LongTensor]=None) -> FloatTensor:
'\n graph_ids: binary mask\n '
assert (entity_ids is not None)
mask = (entity_ids == (get_entity_mapper().highest_entity_index + 1))
assert (mask.sum() == graph_ids.unique().shape[0]), 'There should be exactly one target node per graph.'
return x_e[mask] | graph_ids: binary mask | src/mphrqe/layer/pooling.py | forward | DimitrisAlivas/StarQE | 11 | python | def forward(self, x_e: torch.FloatTensor, graph_ids: torch.LongTensor, entity_ids: Optional[torch.LongTensor]=None) -> FloatTensor:
'\n \n '
assert (entity_ids is not None)
mask = (entity_ids == (get_entity_mapper().highest_entity_index + 1))
assert (mask.sum() == graph_ids.unique().shape[0]), 'There should be exactly one target node per graph.'
return x_e[mask] | def forward(self, x_e: torch.FloatTensor, graph_ids: torch.LongTensor, entity_ids: Optional[torch.LongTensor]=None) -> FloatTensor:
'\n \n '
assert (entity_ids is not None)
mask = (entity_ids == (get_entity_mapper().highest_entity_index + 1))
assert (mask.sum() == graph_ids.unique().shape[0]), 'There should be exactly one target node per graph.'
return x_e[mask]<|docstring|>graph_ids: binary mask<|endoftext|> |
b944b2e6b172a132d61b061861cafbb95226c80c63f2d69f28e5a200ce61f9e4 | def init_application():
'Main entry point for initializing the Deckhand API service.\n\n Create routes for the v1.0 API and sets up logging.\n '
config_files = _get_config_files()
paste_file = config_files[(- 1)]
CONF([], project='deckhand', default_config_files=config_files)
setup_logging(CONF)
policy.Enforcer(CONF)
LOG.debug('Starting WSGI application using %s configuration file.', paste_file)
db_api.drop_db()
db_api.setup_db(CONF.database.connection)
app = deploy.loadapp(('config:%s' % paste_file), name='deckhand_api')
return app | Main entry point for initializing the Deckhand API service.
Create routes for the v1.0 API and sets up logging. | deckhand/control/api.py | init_application | att-comdev/test-submit | 0 | python | def init_application():
'Main entry point for initializing the Deckhand API service.\n\n Create routes for the v1.0 API and sets up logging.\n '
config_files = _get_config_files()
paste_file = config_files[(- 1)]
CONF([], project='deckhand', default_config_files=config_files)
setup_logging(CONF)
policy.Enforcer(CONF)
LOG.debug('Starting WSGI application using %s configuration file.', paste_file)
db_api.drop_db()
db_api.setup_db(CONF.database.connection)
app = deploy.loadapp(('config:%s' % paste_file), name='deckhand_api')
return app | def init_application():
'Main entry point for initializing the Deckhand API service.\n\n Create routes for the v1.0 API and sets up logging.\n '
config_files = _get_config_files()
paste_file = config_files[(- 1)]
CONF([], project='deckhand', default_config_files=config_files)
setup_logging(CONF)
policy.Enforcer(CONF)
LOG.debug('Starting WSGI application using %s configuration file.', paste_file)
db_api.drop_db()
db_api.setup_db(CONF.database.connection)
app = deploy.loadapp(('config:%s' % paste_file), name='deckhand_api')
return app<|docstring|>Main entry point for initializing the Deckhand API service.
Create routes for the v1.0 API and sets up logging.<|endoftext|> |
3f774de91682eb63d50f7ff9ae1fa53d7c7927c9bebf8528789d8ca00756a2d3 | def add_logging_level(levelName: str, levelNum: int, methodName: Optional[str]=None) -> None:
'Comprehensively adds a new logging level to the `logging` module and the currently configured logging class.\n\n `levelName` becomes an attribute of the `logging` module with the value\n `levelNum`. `methodName` becomes a convenience method for both `logging`\n itself and the class returned by `logging.getLoggerClass()` (usually just\n `logging.Logger`).\n\n To avoid accidental clobbering of existing attributes, this method will\n raise an `AttributeError` if the level name is already an attribute of the\n `logging` module or if the method name is already present\n\n Credit: https://stackoverflow.com/a/35804945\n\n Args:\n levelName (str): The name of the new logging level (in all caps).\n levelNum (int): The priority value of the logging level, lower=more verbose.\n methodName (str): The name of the method used to log using this.\n If `methodName` is not specified, `levelName.lower()` is used.\n\n Example:\n ::\n >>> add_logging_level(\'TRACE\', logging.DEBUG - 5)\n >>> logging.getLogger(__name__).setLevel("TRACE")\n >>> logging.getLogger(__name__).trace(\'that worked\')\n >>> logging.trace(\'so did this\')\n >>> logging.TRACE\n 5\n\n '
if (not methodName):
methodName = levelName.lower()
if hasattr(logging, levelName):
raise AttributeError('{} already defined in logging module'.format(levelName))
if hasattr(logging, methodName):
raise AttributeError('{} already defined in logging module'.format(methodName))
if hasattr(logging.getLoggerClass(), methodName):
raise AttributeError('{} already defined in logger class'.format(methodName))
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(levelNum):
self._log(levelNum, message, args, **kwargs)
def logToRoot(message, *args, **kwargs):
logging.log(levelNum, message, *args, **kwargs)
logging.addLevelName(levelNum, levelName)
setattr(logging, levelName, levelNum)
setattr(logging.getLoggerClass(), methodName, logForLevel)
setattr(logging, methodName, logToRoot) | Comprehensively adds a new logging level to the `logging` module and the currently configured logging class.
`levelName` becomes an attribute of the `logging` module with the value
`levelNum`. `methodName` becomes a convenience method for both `logging`
itself and the class returned by `logging.getLoggerClass()` (usually just
`logging.Logger`).
To avoid accidental clobbering of existing attributes, this method will
raise an `AttributeError` if the level name is already an attribute of the
`logging` module or if the method name is already present
Credit: https://stackoverflow.com/a/35804945
Args:
levelName (str): The name of the new logging level (in all caps).
levelNum (int): The priority value of the logging level, lower=more verbose.
methodName (str): The name of the method used to log using this.
If `methodName` is not specified, `levelName.lower()` is used.
Example:
::
>>> add_logging_level('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5 | unifi_protect_backup/unifi_protect_backup.py | add_logging_level | roastlechon/unifi-protect-backup | 0 | python | def add_logging_level(levelName: str, levelNum: int, methodName: Optional[str]=None) -> None:
'Comprehensively adds a new logging level to the `logging` module and the currently configured logging class.\n\n `levelName` becomes an attribute of the `logging` module with the value\n `levelNum`. `methodName` becomes a convenience method for both `logging`\n itself and the class returned by `logging.getLoggerClass()` (usually just\n `logging.Logger`).\n\n To avoid accidental clobbering of existing attributes, this method will\n raise an `AttributeError` if the level name is already an attribute of the\n `logging` module or if the method name is already present\n\n Credit: https://stackoverflow.com/a/35804945\n\n Args:\n levelName (str): The name of the new logging level (in all caps).\n levelNum (int): The priority value of the logging level, lower=more verbose.\n methodName (str): The name of the method used to log using this.\n If `methodName` is not specified, `levelName.lower()` is used.\n\n Example:\n ::\n >>> add_logging_level(\'TRACE\', logging.DEBUG - 5)\n >>> logging.getLogger(__name__).setLevel("TRACE")\n >>> logging.getLogger(__name__).trace(\'that worked\')\n >>> logging.trace(\'so did this\')\n >>> logging.TRACE\n 5\n\n '
if (not methodName):
methodName = levelName.lower()
if hasattr(logging, levelName):
raise AttributeError('{} already defined in logging module'.format(levelName))
if hasattr(logging, methodName):
raise AttributeError('{} already defined in logging module'.format(methodName))
if hasattr(logging.getLoggerClass(), methodName):
raise AttributeError('{} already defined in logger class'.format(methodName))
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(levelNum):
self._log(levelNum, message, args, **kwargs)
def logToRoot(message, *args, **kwargs):
logging.log(levelNum, message, *args, **kwargs)
logging.addLevelName(levelNum, levelName)
setattr(logging, levelName, levelNum)
setattr(logging.getLoggerClass(), methodName, logForLevel)
setattr(logging, methodName, logToRoot) | def add_logging_level(levelName: str, levelNum: int, methodName: Optional[str]=None) -> None:
'Comprehensively adds a new logging level to the `logging` module and the currently configured logging class.\n\n `levelName` becomes an attribute of the `logging` module with the value\n `levelNum`. `methodName` becomes a convenience method for both `logging`\n itself and the class returned by `logging.getLoggerClass()` (usually just\n `logging.Logger`).\n\n To avoid accidental clobbering of existing attributes, this method will\n raise an `AttributeError` if the level name is already an attribute of the\n `logging` module or if the method name is already present\n\n Credit: https://stackoverflow.com/a/35804945\n\n Args:\n levelName (str): The name of the new logging level (in all caps).\n levelNum (int): The priority value of the logging level, lower=more verbose.\n methodName (str): The name of the method used to log using this.\n If `methodName` is not specified, `levelName.lower()` is used.\n\n Example:\n ::\n >>> add_logging_level(\'TRACE\', logging.DEBUG - 5)\n >>> logging.getLogger(__name__).setLevel("TRACE")\n >>> logging.getLogger(__name__).trace(\'that worked\')\n >>> logging.trace(\'so did this\')\n >>> logging.TRACE\n 5\n\n '
if (not methodName):
methodName = levelName.lower()
if hasattr(logging, levelName):
raise AttributeError('{} already defined in logging module'.format(levelName))
if hasattr(logging, methodName):
raise AttributeError('{} already defined in logging module'.format(methodName))
if hasattr(logging.getLoggerClass(), methodName):
raise AttributeError('{} already defined in logger class'.format(methodName))
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(levelNum):
self._log(levelNum, message, args, **kwargs)
def logToRoot(message, *args, **kwargs):
logging.log(levelNum, message, *args, **kwargs)
logging.addLevelName(levelNum, levelName)
setattr(logging, levelName, levelNum)
setattr(logging.getLoggerClass(), methodName, logForLevel)
setattr(logging, methodName, logToRoot)<|docstring|>Comprehensively adds a new logging level to the `logging` module and the currently configured logging class.
`levelName` becomes an attribute of the `logging` module with the value
`levelNum`. `methodName` becomes a convenience method for both `logging`
itself and the class returned by `logging.getLoggerClass()` (usually just
`logging.Logger`).
To avoid accidental clobbering of existing attributes, this method will
raise an `AttributeError` if the level name is already an attribute of the
`logging` module or if the method name is already present
Credit: https://stackoverflow.com/a/35804945
Args:
levelName (str): The name of the new logging level (in all caps).
levelNum (int): The priority value of the logging level, lower=more verbose.
methodName (str): The name of the method used to log using this.
If `methodName` is not specified, `levelName.lower()` is used.
Example:
::
>>> add_logging_level('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5<|endoftext|> |
2d439cfce5c6cf51114181c556fbab6efdbf9308c9b74fca150a7abad1088363 | def setup_logging(verbosity: int) -> None:
'Configures loggers to provided the desired level of verbosity.\n\n Verbosity 0: Only log info messages created by `unifi-protect-backup`, and all warnings\n verbosity 1: Only log info & debug messages created by `unifi-protect-backup`, and all warnings\n verbosity 2: Log info & debug messages created by `unifi-protect-backup`, command output, and\n all warnings\n Verbosity 3: Log debug messages created by `unifi-protect-backup`, command output, all info\n messages, and all warnings\n Verbosity 4: Log debug messages created by `unifi-protect-backup` command output, all info\n messages, all warnings, and websocket data\n Verbosity 5: Log websocket data, command output, all debug messages, all info messages and all\n warnings\n\n Args:\n verbosity (int): The desired level of verbosity\n\n '
add_logging_level('EXTRA_DEBUG', (logging.DEBUG - 1))
add_logging_level('WEBSOCKET_DATA', (logging.DEBUG - 2))
format = '{asctime} [{levelname}]:{name: <20}:\t{message}'
date_format = '%Y-%m-%d %H:%M:%S'
style = '{'
if (verbosity == 0):
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.INFO)
elif (verbosity == 1):
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.DEBUG)
elif (verbosity == 2):
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.EXTRA_DEBUG)
elif (verbosity == 3):
logging.basicConfig(level=logging.INFO, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.EXTRA_DEBUG)
elif (verbosity == 4):
logging.basicConfig(level=logging.INFO, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.WEBSOCKET_DATA)
elif (verbosity == 5):
logging.basicConfig(level=logging.DEBUG, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.WEBSOCKET_DATA) | Configures loggers to provided the desired level of verbosity.
Verbosity 0: Only log info messages created by `unifi-protect-backup`, and all warnings
verbosity 1: Only log info & debug messages created by `unifi-protect-backup`, and all warnings
verbosity 2: Log info & debug messages created by `unifi-protect-backup`, command output, and
all warnings
Verbosity 3: Log debug messages created by `unifi-protect-backup`, command output, all info
messages, and all warnings
Verbosity 4: Log debug messages created by `unifi-protect-backup` command output, all info
messages, all warnings, and websocket data
Verbosity 5: Log websocket data, command output, all debug messages, all info messages and all
warnings
Args:
verbosity (int): The desired level of verbosity | unifi_protect_backup/unifi_protect_backup.py | setup_logging | roastlechon/unifi-protect-backup | 0 | python | def setup_logging(verbosity: int) -> None:
'Configures loggers to provided the desired level of verbosity.\n\n Verbosity 0: Only log info messages created by `unifi-protect-backup`, and all warnings\n verbosity 1: Only log info & debug messages created by `unifi-protect-backup`, and all warnings\n verbosity 2: Log info & debug messages created by `unifi-protect-backup`, command output, and\n all warnings\n Verbosity 3: Log debug messages created by `unifi-protect-backup`, command output, all info\n messages, and all warnings\n Verbosity 4: Log debug messages created by `unifi-protect-backup` command output, all info\n messages, all warnings, and websocket data\n Verbosity 5: Log websocket data, command output, all debug messages, all info messages and all\n warnings\n\n Args:\n verbosity (int): The desired level of verbosity\n\n '
add_logging_level('EXTRA_DEBUG', (logging.DEBUG - 1))
add_logging_level('WEBSOCKET_DATA', (logging.DEBUG - 2))
format = '{asctime} [{levelname}]:{name: <20}:\t{message}'
date_format = '%Y-%m-%d %H:%M:%S'
style = '{'
if (verbosity == 0):
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.INFO)
elif (verbosity == 1):
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.DEBUG)
elif (verbosity == 2):
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.EXTRA_DEBUG)
elif (verbosity == 3):
logging.basicConfig(level=logging.INFO, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.EXTRA_DEBUG)
elif (verbosity == 4):
logging.basicConfig(level=logging.INFO, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.WEBSOCKET_DATA)
elif (verbosity == 5):
logging.basicConfig(level=logging.DEBUG, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.WEBSOCKET_DATA) | def setup_logging(verbosity: int) -> None:
'Configures loggers to provided the desired level of verbosity.\n\n Verbosity 0: Only log info messages created by `unifi-protect-backup`, and all warnings\n verbosity 1: Only log info & debug messages created by `unifi-protect-backup`, and all warnings\n verbosity 2: Log info & debug messages created by `unifi-protect-backup`, command output, and\n all warnings\n Verbosity 3: Log debug messages created by `unifi-protect-backup`, command output, all info\n messages, and all warnings\n Verbosity 4: Log debug messages created by `unifi-protect-backup` command output, all info\n messages, all warnings, and websocket data\n Verbosity 5: Log websocket data, command output, all debug messages, all info messages and all\n warnings\n\n Args:\n verbosity (int): The desired level of verbosity\n\n '
add_logging_level('EXTRA_DEBUG', (logging.DEBUG - 1))
add_logging_level('WEBSOCKET_DATA', (logging.DEBUG - 2))
format = '{asctime} [{levelname}]:{name: <20}:\t{message}'
date_format = '%Y-%m-%d %H:%M:%S'
style = '{'
if (verbosity == 0):
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.INFO)
elif (verbosity == 1):
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.DEBUG)
elif (verbosity == 2):
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.EXTRA_DEBUG)
elif (verbosity == 3):
logging.basicConfig(level=logging.INFO, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.EXTRA_DEBUG)
elif (verbosity == 4):
logging.basicConfig(level=logging.INFO, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.WEBSOCKET_DATA)
elif (verbosity == 5):
logging.basicConfig(level=logging.DEBUG, format=format, style=style, datefmt=date_format)
logger.setLevel(logging.WEBSOCKET_DATA)<|docstring|>Configures loggers to provided the desired level of verbosity.
Verbosity 0: Only log info messages created by `unifi-protect-backup`, and all warnings
verbosity 1: Only log info & debug messages created by `unifi-protect-backup`, and all warnings
verbosity 2: Log info & debug messages created by `unifi-protect-backup`, command output, and
all warnings
Verbosity 3: Log debug messages created by `unifi-protect-backup`, command output, all info
messages, and all warnings
Verbosity 4: Log debug messages created by `unifi-protect-backup` command output, all info
messages, all warnings, and websocket data
Verbosity 5: Log websocket data, command output, all debug messages, all info messages and all
warnings
Args:
verbosity (int): The desired level of verbosity<|endoftext|> |
0d2d9eb04ff12b37c07ffdf2e3937a0f4e74dc312f5ccfc64d002d7d85b070d6 | def human_readable_size(num):
'Turns a number into a human readable number with ISO/IEC 80000 binary prefixes.\n\n Based on: https://stackoverflow.com/a/1094933\n\n Args:\n num (int): The number to be converted into human readable format\n '
for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']:
if (abs(num) < 1024.0):
return f'{num:3.1f}{unit}'
num /= 1024.0
raise ValueError('`num` too large, ran out of prefixes') | Turns a number into a human readable number with ISO/IEC 80000 binary prefixes.
Based on: https://stackoverflow.com/a/1094933
Args:
num (int): The number to be converted into human readable format | unifi_protect_backup/unifi_protect_backup.py | human_readable_size | roastlechon/unifi-protect-backup | 0 | python | def human_readable_size(num):
'Turns a number into a human readable number with ISO/IEC 80000 binary prefixes.\n\n Based on: https://stackoverflow.com/a/1094933\n\n Args:\n num (int): The number to be converted into human readable format\n '
for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']:
if (abs(num) < 1024.0):
return f'{num:3.1f}{unit}'
num /= 1024.0
raise ValueError('`num` too large, ran out of prefixes') | def human_readable_size(num):
'Turns a number into a human readable number with ISO/IEC 80000 binary prefixes.\n\n Based on: https://stackoverflow.com/a/1094933\n\n Args:\n num (int): The number to be converted into human readable format\n '
for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']:
if (abs(num) < 1024.0):
return f'{num:3.1f}{unit}'
num /= 1024.0
raise ValueError('`num` too large, ran out of prefixes')<|docstring|>Turns a number into a human readable number with ISO/IEC 80000 binary prefixes.
Based on: https://stackoverflow.com/a/1094933
Args:
num (int): The number to be converted into human readable format<|endoftext|> |
d044ea67851c969777f0b36cf23547890c4af69895cd01ab5603c9a1882f8cfb | def __init__(self, stdout, stderr, returncode):
'Exception class for when rclone does not exit with `0`.\n\n Args:\n stdout (str): What rclone output to stdout\n stderr (str): What rclone output to stderr\n returncode (str): The return code of the rclone process\n '
super().__init__()
self.stdout: str = stdout
self.stderr: str = stderr
self.returncode: int = returncode | Exception class for when rclone does not exit with `0`.
Args:
stdout (str): What rclone output to stdout
stderr (str): What rclone output to stderr
returncode (str): The return code of the rclone process | unifi_protect_backup/unifi_protect_backup.py | __init__ | roastlechon/unifi-protect-backup | 0 | python | def __init__(self, stdout, stderr, returncode):
'Exception class for when rclone does not exit with `0`.\n\n Args:\n stdout (str): What rclone output to stdout\n stderr (str): What rclone output to stderr\n returncode (str): The return code of the rclone process\n '
super().__init__()
self.stdout: str = stdout
self.stderr: str = stderr
self.returncode: int = returncode | def __init__(self, stdout, stderr, returncode):
'Exception class for when rclone does not exit with `0`.\n\n Args:\n stdout (str): What rclone output to stdout\n stderr (str): What rclone output to stderr\n returncode (str): The return code of the rclone process\n '
super().__init__()
self.stdout: str = stdout
self.stderr: str = stderr
self.returncode: int = returncode<|docstring|>Exception class for when rclone does not exit with `0`.
Args:
stdout (str): What rclone output to stdout
stderr (str): What rclone output to stderr
returncode (str): The return code of the rclone process<|endoftext|> |
8cc882511f3630b3ba4b04177f02c5d6ec9f19b1047efb757db92ebb8088329b | def __str__(self):
'Turns excpetion into a human readable form.'
return f'''Return Code: {self.returncode}
Stdout:
{self.stdout}
Stderr:
{self.stderr}''' | Turns excpetion into a human readable form. | unifi_protect_backup/unifi_protect_backup.py | __str__ | roastlechon/unifi-protect-backup | 0 | python | def __str__(self):
return f'Return Code: {self.returncode}
Stdout:
{self.stdout}
Stderr:
{self.stderr}' | def __str__(self):
return f'Return Code: {self.returncode}
Stdout:
{self.stdout}
Stderr:
{self.stderr}'<|docstring|>Turns excpetion into a human readable form.<|endoftext|> |
78c1d81c4fd8de4f0556ae6dd96ecc7e44bf0b1afc5149b737f15750a93746c6 | def __init__(self, address: str, username: str, password: str, verify_ssl: bool, rclone_destination: str, retention: str, rclone_args: str, ignore_cameras: List[str], verbose: int, port: int=443):
'Will configure logging settings and the Unifi Protect API (but not actually connect).\n\n Args:\n address (str): Base address of the Unifi Protect instance\n port (int): Post of the Unifi Protect instance, usually 443\n username (str): Username to log into Unifi Protect instance\n password (str): Password for Unifi Protect user\n verify_ssl (bool): Flag for if SSL certificates should be validated\n rclone_destination (str): `rclone` destination path in the format\n {rclone remote}:{path on remote}. E.g.\n `gdrive:/backups/unifi_protect`\n retention (str): How long should event clips be backed up for. Format as per the\n `--max-age` argument of `rclone`\n (https://rclone.org/filtering/#max-age-don-t-transfer-any-file-older-than-this)\n rclone_args (str): A bandwidth limit which is passed to the `--bwlimit` argument of\n `rclone` (https://rclone.org/docs/#bwlimit-bandwidth-spec)\n ignore_cameras (List[str]): List of camera IDs for which to not backup events\n verbose (int): How verbose to setup logging, see :func:`setup_logging` for details.\n '
setup_logging(verbose)
logger.debug('Config:')
logger.debug(f' address={address!r}')
logger.debug(f' port={port!r}')
logger.debug(f' username={username!r}')
if (verbose < 5):
logger.debug(' password=REDACTED')
else:
logger.debug(f' password={password!r}')
logger.debug(f' verify_ssl={verify_ssl!r}')
logger.debug(f' rclone_destination={rclone_destination!r}')
logger.debug(f' retention={retention!r}')
logger.debug(f' rclone_args={rclone_args!r}')
logger.debug(f' ignore_cameras={ignore_cameras!r}')
logger.debug(f' verbose={verbose!r}')
self.rclone_destination = rclone_destination
self.retention = retention
self.rclone_args = rclone_args
self.address = address
self.port = port
self.username = username
self.password = password
self.verify_ssl = verify_ssl
self._protect = ProtectApiClient(self.address, self.port, self.username, self.password, verify_ssl=self.verify_ssl, subscribed_models={ModelType.EVENT})
self.ignore_cameras = ignore_cameras
self._download_queue: asyncio.Queue = asyncio.Queue()
self._unsub: Callable[([], None)]
self._has_ffprobe = False | Will configure logging settings and the Unifi Protect API (but not actually connect).
Args:
address (str): Base address of the Unifi Protect instance
port (int): Post of the Unifi Protect instance, usually 443
username (str): Username to log into Unifi Protect instance
password (str): Password for Unifi Protect user
verify_ssl (bool): Flag for if SSL certificates should be validated
rclone_destination (str): `rclone` destination path in the format
{rclone remote}:{path on remote}. E.g.
`gdrive:/backups/unifi_protect`
retention (str): How long should event clips be backed up for. Format as per the
`--max-age` argument of `rclone`
(https://rclone.org/filtering/#max-age-don-t-transfer-any-file-older-than-this)
rclone_args (str): A bandwidth limit which is passed to the `--bwlimit` argument of
`rclone` (https://rclone.org/docs/#bwlimit-bandwidth-spec)
ignore_cameras (List[str]): List of camera IDs for which to not backup events
verbose (int): How verbose to setup logging, see :func:`setup_logging` for details. | unifi_protect_backup/unifi_protect_backup.py | __init__ | roastlechon/unifi-protect-backup | 0 | python | def __init__(self, address: str, username: str, password: str, verify_ssl: bool, rclone_destination: str, retention: str, rclone_args: str, ignore_cameras: List[str], verbose: int, port: int=443):
'Will configure logging settings and the Unifi Protect API (but not actually connect).\n\n Args:\n address (str): Base address of the Unifi Protect instance\n port (int): Post of the Unifi Protect instance, usually 443\n username (str): Username to log into Unifi Protect instance\n password (str): Password for Unifi Protect user\n verify_ssl (bool): Flag for if SSL certificates should be validated\n rclone_destination (str): `rclone` destination path in the format\n {rclone remote}:{path on remote}. E.g.\n `gdrive:/backups/unifi_protect`\n retention (str): How long should event clips be backed up for. Format as per the\n `--max-age` argument of `rclone`\n (https://rclone.org/filtering/#max-age-don-t-transfer-any-file-older-than-this)\n rclone_args (str): A bandwidth limit which is passed to the `--bwlimit` argument of\n `rclone` (https://rclone.org/docs/#bwlimit-bandwidth-spec)\n ignore_cameras (List[str]): List of camera IDs for which to not backup events\n verbose (int): How verbose to setup logging, see :func:`setup_logging` for details.\n '
setup_logging(verbose)
logger.debug('Config:')
logger.debug(f' address={address!r}')
logger.debug(f' port={port!r}')
logger.debug(f' username={username!r}')
if (verbose < 5):
logger.debug(' password=REDACTED')
else:
logger.debug(f' password={password!r}')
logger.debug(f' verify_ssl={verify_ssl!r}')
logger.debug(f' rclone_destination={rclone_destination!r}')
logger.debug(f' retention={retention!r}')
logger.debug(f' rclone_args={rclone_args!r}')
logger.debug(f' ignore_cameras={ignore_cameras!r}')
logger.debug(f' verbose={verbose!r}')
self.rclone_destination = rclone_destination
self.retention = retention
self.rclone_args = rclone_args
self.address = address
self.port = port
self.username = username
self.password = password
self.verify_ssl = verify_ssl
self._protect = ProtectApiClient(self.address, self.port, self.username, self.password, verify_ssl=self.verify_ssl, subscribed_models={ModelType.EVENT})
self.ignore_cameras = ignore_cameras
self._download_queue: asyncio.Queue = asyncio.Queue()
self._unsub: Callable[([], None)]
self._has_ffprobe = False | def __init__(self, address: str, username: str, password: str, verify_ssl: bool, rclone_destination: str, retention: str, rclone_args: str, ignore_cameras: List[str], verbose: int, port: int=443):
'Will configure logging settings and the Unifi Protect API (but not actually connect).\n\n Args:\n address (str): Base address of the Unifi Protect instance\n port (int): Post of the Unifi Protect instance, usually 443\n username (str): Username to log into Unifi Protect instance\n password (str): Password for Unifi Protect user\n verify_ssl (bool): Flag for if SSL certificates should be validated\n rclone_destination (str): `rclone` destination path in the format\n {rclone remote}:{path on remote}. E.g.\n `gdrive:/backups/unifi_protect`\n retention (str): How long should event clips be backed up for. Format as per the\n `--max-age` argument of `rclone`\n (https://rclone.org/filtering/#max-age-don-t-transfer-any-file-older-than-this)\n rclone_args (str): A bandwidth limit which is passed to the `--bwlimit` argument of\n `rclone` (https://rclone.org/docs/#bwlimit-bandwidth-spec)\n ignore_cameras (List[str]): List of camera IDs for which to not backup events\n verbose (int): How verbose to setup logging, see :func:`setup_logging` for details.\n '
setup_logging(verbose)
logger.debug('Config:')
logger.debug(f' address={address!r}')
logger.debug(f' port={port!r}')
logger.debug(f' username={username!r}')
if (verbose < 5):
logger.debug(' password=REDACTED')
else:
logger.debug(f' password={password!r}')
logger.debug(f' verify_ssl={verify_ssl!r}')
logger.debug(f' rclone_destination={rclone_destination!r}')
logger.debug(f' retention={retention!r}')
logger.debug(f' rclone_args={rclone_args!r}')
logger.debug(f' ignore_cameras={ignore_cameras!r}')
logger.debug(f' verbose={verbose!r}')
self.rclone_destination = rclone_destination
self.retention = retention
self.rclone_args = rclone_args
self.address = address
self.port = port
self.username = username
self.password = password
self.verify_ssl = verify_ssl
self._protect = ProtectApiClient(self.address, self.port, self.username, self.password, verify_ssl=self.verify_ssl, subscribed_models={ModelType.EVENT})
self.ignore_cameras = ignore_cameras
self._download_queue: asyncio.Queue = asyncio.Queue()
self._unsub: Callable[([], None)]
self._has_ffprobe = False<|docstring|>Will configure logging settings and the Unifi Protect API (but not actually connect).
Args:
address (str): Base address of the Unifi Protect instance
port (int): Post of the Unifi Protect instance, usually 443
username (str): Username to log into Unifi Protect instance
password (str): Password for Unifi Protect user
verify_ssl (bool): Flag for if SSL certificates should be validated
rclone_destination (str): `rclone` destination path in the format
{rclone remote}:{path on remote}. E.g.
`gdrive:/backups/unifi_protect`
retention (str): How long should event clips be backed up for. Format as per the
`--max-age` argument of `rclone`
(https://rclone.org/filtering/#max-age-don-t-transfer-any-file-older-than-this)
rclone_args (str): A bandwidth limit which is passed to the `--bwlimit` argument of
`rclone` (https://rclone.org/docs/#bwlimit-bandwidth-spec)
ignore_cameras (List[str]): List of camera IDs for which to not backup events
verbose (int): How verbose to setup logging, see :func:`setup_logging` for details.<|endoftext|> |
2dfc50cb2f99104f5746b7ef1848d8db46593968fa467e9d753919655eb5ae44 | async def start(self):
'Bootstrap the backup process and kick off the main loop.\n\n You should run this to start the realtime backup of Unifi Protect clips as they are created\n\n '
logger.info('Starting...')
logger.info('Checking rclone configuration...')
(await self._check_rclone())
ffprobe = shutil.which('ffprobe')
if (ffprobe is not None):
logger.debug(f'ffprobe found: {ffprobe}')
self._has_ffprobe = True
logger.info('Connecting to Unifi Protect...')
(await self._protect.update())
logger.info('Found cameras:')
for camera in self._protect.bootstrap.cameras.values():
logger.info(f' - {camera.id}: {camera.name}')
self._unsub = self._protect.subscribe_websocket(self._websocket_callback)
logger.info('Setting up purge task...')
@aiocron.crontab('0 0 * * *')
async def rclone_purge_old():
logger.info('Deleting old files...')
cmd = f"rclone delete -vv --min-age {self.retention} '{self.rclone_destination}'"
cmd += f" && rclone rmdirs -vv --leave-root '{self.rclone_destination}'"
proc = (await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(stdout, stderr) = (await proc.communicate())
if (proc.returncode == 0):
logger.extra_debug(f'''stdout:
{stdout.decode()}''')
logger.extra_debug(f'''stderr:
{stderr.decode()}''')
logger.info('Successfully deleted old files')
else:
logger.warn('Failed to purge old files')
logger.warn(f'''stdout:
{stdout.decode()}''')
logger.warn(f'''stderr:
{stderr.decode()}''')
@aiocron.crontab('* * * * *')
async def check_websocket_and_reconnect():
logger.extra_debug('Checking the status of the websocket...')
if self._protect.check_ws():
logger.extra_debug('Websocket is connected.')
else:
logger.warn('Lost connection to Unifi Protect.')
self._unsub()
(await self._protect.close_session())
while True:
logger.warn('Attempting reconnect...')
try:
replacement_protect = ProtectApiClient(self.address, self.port, self.username, self.password, verify_ssl=self.verify_ssl, subscribed_models={ModelType.EVENT})
(await replacement_protect.update())
if replacement_protect.check_ws():
self._protect = replacement_protect
self._unsub = self._protect.subscribe_websocket(self._websocket_callback)
break
else:
logger.warn('Unable to establish connection to Unifi Protect')
except Exception as e:
logger.warn('Unexpected exception occurred while trying to reconnect:')
logger.exception(e)
finally:
(await replacement_protect.close_session())
(await asyncio.sleep(10))
logger.info('Re-established connection to Unifi Protect and to the websocket.')
logger.info('Listening for events...')
(await self._backup_events())
logger.info('Stopping...')
self._unsub() | Bootstrap the backup process and kick off the main loop.
You should run this to start the realtime backup of Unifi Protect clips as they are created | unifi_protect_backup/unifi_protect_backup.py | start | roastlechon/unifi-protect-backup | 0 | python | async def start(self):
'Bootstrap the backup process and kick off the main loop.\n\n You should run this to start the realtime backup of Unifi Protect clips as they are created\n\n '
logger.info('Starting...')
logger.info('Checking rclone configuration...')
(await self._check_rclone())
ffprobe = shutil.which('ffprobe')
if (ffprobe is not None):
logger.debug(f'ffprobe found: {ffprobe}')
self._has_ffprobe = True
logger.info('Connecting to Unifi Protect...')
(await self._protect.update())
logger.info('Found cameras:')
for camera in self._protect.bootstrap.cameras.values():
logger.info(f' - {camera.id}: {camera.name}')
self._unsub = self._protect.subscribe_websocket(self._websocket_callback)
logger.info('Setting up purge task...')
@aiocron.crontab('0 0 * * *')
async def rclone_purge_old():
logger.info('Deleting old files...')
cmd = f"rclone delete -vv --min-age {self.retention} '{self.rclone_destination}'"
cmd += f" && rclone rmdirs -vv --leave-root '{self.rclone_destination}'"
proc = (await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(stdout, stderr) = (await proc.communicate())
if (proc.returncode == 0):
logger.extra_debug(f'stdout:
{stdout.decode()}')
logger.extra_debug(f'stderr:
{stderr.decode()}')
logger.info('Successfully deleted old files')
else:
logger.warn('Failed to purge old files')
logger.warn(f'stdout:
{stdout.decode()}')
logger.warn(f'stderr:
{stderr.decode()}')
@aiocron.crontab('* * * * *')
async def check_websocket_and_reconnect():
logger.extra_debug('Checking the status of the websocket...')
if self._protect.check_ws():
logger.extra_debug('Websocket is connected.')
else:
logger.warn('Lost connection to Unifi Protect.')
self._unsub()
(await self._protect.close_session())
while True:
logger.warn('Attempting reconnect...')
try:
replacement_protect = ProtectApiClient(self.address, self.port, self.username, self.password, verify_ssl=self.verify_ssl, subscribed_models={ModelType.EVENT})
(await replacement_protect.update())
if replacement_protect.check_ws():
self._protect = replacement_protect
self._unsub = self._protect.subscribe_websocket(self._websocket_callback)
break
else:
logger.warn('Unable to establish connection to Unifi Protect')
except Exception as e:
logger.warn('Unexpected exception occurred while trying to reconnect:')
logger.exception(e)
finally:
(await replacement_protect.close_session())
(await asyncio.sleep(10))
logger.info('Re-established connection to Unifi Protect and to the websocket.')
logger.info('Listening for events...')
(await self._backup_events())
logger.info('Stopping...')
self._unsub() | async def start(self):
'Bootstrap the backup process and kick off the main loop.\n\n You should run this to start the realtime backup of Unifi Protect clips as they are created\n\n '
logger.info('Starting...')
logger.info('Checking rclone configuration...')
(await self._check_rclone())
ffprobe = shutil.which('ffprobe')
if (ffprobe is not None):
logger.debug(f'ffprobe found: {ffprobe}')
self._has_ffprobe = True
logger.info('Connecting to Unifi Protect...')
(await self._protect.update())
logger.info('Found cameras:')
for camera in self._protect.bootstrap.cameras.values():
logger.info(f' - {camera.id}: {camera.name}')
self._unsub = self._protect.subscribe_websocket(self._websocket_callback)
logger.info('Setting up purge task...')
@aiocron.crontab('0 0 * * *')
async def rclone_purge_old():
logger.info('Deleting old files...')
cmd = f"rclone delete -vv --min-age {self.retention} '{self.rclone_destination}'"
cmd += f" && rclone rmdirs -vv --leave-root '{self.rclone_destination}'"
proc = (await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(stdout, stderr) = (await proc.communicate())
if (proc.returncode == 0):
logger.extra_debug(f'stdout:
{stdout.decode()}')
logger.extra_debug(f'stderr:
{stderr.decode()}')
logger.info('Successfully deleted old files')
else:
logger.warn('Failed to purge old files')
logger.warn(f'stdout:
{stdout.decode()}')
logger.warn(f'stderr:
{stderr.decode()}')
@aiocron.crontab('* * * * *')
async def check_websocket_and_reconnect():
logger.extra_debug('Checking the status of the websocket...')
if self._protect.check_ws():
logger.extra_debug('Websocket is connected.')
else:
logger.warn('Lost connection to Unifi Protect.')
self._unsub()
(await self._protect.close_session())
while True:
logger.warn('Attempting reconnect...')
try:
replacement_protect = ProtectApiClient(self.address, self.port, self.username, self.password, verify_ssl=self.verify_ssl, subscribed_models={ModelType.EVENT})
(await replacement_protect.update())
if replacement_protect.check_ws():
self._protect = replacement_protect
self._unsub = self._protect.subscribe_websocket(self._websocket_callback)
break
else:
logger.warn('Unable to establish connection to Unifi Protect')
except Exception as e:
logger.warn('Unexpected exception occurred while trying to reconnect:')
logger.exception(e)
finally:
(await replacement_protect.close_session())
(await asyncio.sleep(10))
logger.info('Re-established connection to Unifi Protect and to the websocket.')
logger.info('Listening for events...')
(await self._backup_events())
logger.info('Stopping...')
self._unsub()<|docstring|>Bootstrap the backup process and kick off the main loop.
You should run this to start the realtime backup of Unifi Protect clips as they are created<|endoftext|> |
4e581efdf88b0e357b2b4d0732c6e5ebfb742416975ab2a5da6bb436e9c12b47 | async def _check_rclone(self) -> None:
'Check if rclone is installed and the specified remote is configured.\n\n Raises:\n SubprocessException: If rclone is not installed or it failed to list remotes\n ValueError: The given rclone destination is for a remote that is not configured\n\n '
rclone = shutil.which('rclone')
if (not rclone):
raise RuntimeError('`rclone` is not installed on this system')
logger.debug(f'rclone found: {rclone}')
cmd = 'rclone listremotes -vv'
proc = (await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(stdout, stderr) = (await proc.communicate())
logger.extra_debug(f'''stdout:
{stdout.decode()}''')
logger.extra_debug(f'''stderr:
{stderr.decode()}''')
if (proc.returncode != 0):
raise SubprocessException(stdout.decode(), stderr.decode(), proc.returncode)
for line in stdout.splitlines():
if self.rclone_destination.startswith(line.decode()):
break
else:
remote = self.rclone_destination.split(':')[0]
raise ValueError(f'rclone does not have a remote called `{remote}`') | Check if rclone is installed and the specified remote is configured.
Raises:
SubprocessException: If rclone is not installed or it failed to list remotes
ValueError: The given rclone destination is for a remote that is not configured | unifi_protect_backup/unifi_protect_backup.py | _check_rclone | roastlechon/unifi-protect-backup | 0 | python | async def _check_rclone(self) -> None:
'Check if rclone is installed and the specified remote is configured.\n\n Raises:\n SubprocessException: If rclone is not installed or it failed to list remotes\n ValueError: The given rclone destination is for a remote that is not configured\n\n '
rclone = shutil.which('rclone')
if (not rclone):
raise RuntimeError('`rclone` is not installed on this system')
logger.debug(f'rclone found: {rclone}')
cmd = 'rclone listremotes -vv'
proc = (await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(stdout, stderr) = (await proc.communicate())
logger.extra_debug(f'stdout:
{stdout.decode()}')
logger.extra_debug(f'stderr:
{stderr.decode()}')
if (proc.returncode != 0):
raise SubprocessException(stdout.decode(), stderr.decode(), proc.returncode)
for line in stdout.splitlines():
if self.rclone_destination.startswith(line.decode()):
break
else:
remote = self.rclone_destination.split(':')[0]
raise ValueError(f'rclone does not have a remote called `{remote}`') | async def _check_rclone(self) -> None:
'Check if rclone is installed and the specified remote is configured.\n\n Raises:\n SubprocessException: If rclone is not installed or it failed to list remotes\n ValueError: The given rclone destination is for a remote that is not configured\n\n '
rclone = shutil.which('rclone')
if (not rclone):
raise RuntimeError('`rclone` is not installed on this system')
logger.debug(f'rclone found: {rclone}')
cmd = 'rclone listremotes -vv'
proc = (await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(stdout, stderr) = (await proc.communicate())
logger.extra_debug(f'stdout:
{stdout.decode()}')
logger.extra_debug(f'stderr:
{stderr.decode()}')
if (proc.returncode != 0):
raise SubprocessException(stdout.decode(), stderr.decode(), proc.returncode)
for line in stdout.splitlines():
if self.rclone_destination.startswith(line.decode()):
break
else:
remote = self.rclone_destination.split(':')[0]
raise ValueError(f'rclone does not have a remote called `{remote}`')<|docstring|>Check if rclone is installed and the specified remote is configured.
Raises:
SubprocessException: If rclone is not installed or it failed to list remotes
ValueError: The given rclone destination is for a remote that is not configured<|endoftext|> |
885ae6a498b94fcd3bc87f9c43d3fb38192da9610cde71587d7b57392949601f | def _websocket_callback(self, msg: WSSubscriptionMessage) -> None:
'Callback for "EVENT" websocket messages.\n\n Filters the incoming events, and puts completed events onto the download queue\n\n Args:\n msg (Event): Incoming event data\n '
logger.websocket_data(msg)
assert isinstance(msg.new_obj, Event)
if (msg.action != WSAction.UPDATE):
return
if (msg.new_obj.camera_id in self.ignore_cameras):
return
if (msg.new_obj.end is None):
return
if (msg.new_obj.type not in {EventType.MOTION, EventType.SMART_DETECT}):
return
self._download_queue.put_nowait(msg.new_obj)
logger.debug(f'Adding event {msg.new_obj.id} to queue (Current queue={self._download_queue.qsize()})') | Callback for "EVENT" websocket messages.
Filters the incoming events, and puts completed events onto the download queue
Args:
msg (Event): Incoming event data | unifi_protect_backup/unifi_protect_backup.py | _websocket_callback | roastlechon/unifi-protect-backup | 0 | python | def _websocket_callback(self, msg: WSSubscriptionMessage) -> None:
'Callback for "EVENT" websocket messages.\n\n Filters the incoming events, and puts completed events onto the download queue\n\n Args:\n msg (Event): Incoming event data\n '
logger.websocket_data(msg)
assert isinstance(msg.new_obj, Event)
if (msg.action != WSAction.UPDATE):
return
if (msg.new_obj.camera_id in self.ignore_cameras):
return
if (msg.new_obj.end is None):
return
if (msg.new_obj.type not in {EventType.MOTION, EventType.SMART_DETECT}):
return
self._download_queue.put_nowait(msg.new_obj)
logger.debug(f'Adding event {msg.new_obj.id} to queue (Current queue={self._download_queue.qsize()})') | def _websocket_callback(self, msg: WSSubscriptionMessage) -> None:
'Callback for "EVENT" websocket messages.\n\n Filters the incoming events, and puts completed events onto the download queue\n\n Args:\n msg (Event): Incoming event data\n '
logger.websocket_data(msg)
assert isinstance(msg.new_obj, Event)
if (msg.action != WSAction.UPDATE):
return
if (msg.new_obj.camera_id in self.ignore_cameras):
return
if (msg.new_obj.end is None):
return
if (msg.new_obj.type not in {EventType.MOTION, EventType.SMART_DETECT}):
return
self._download_queue.put_nowait(msg.new_obj)
logger.debug(f'Adding event {msg.new_obj.id} to queue (Current queue={self._download_queue.qsize()})')<|docstring|>Callback for "EVENT" websocket messages.
Filters the incoming events, and puts completed events onto the download queue
Args:
msg (Event): Incoming event data<|endoftext|> |
52775b06d7a72d1d75df7ab4ff2ed962a13271ffe12cb4703d08fac3615093d4 | async def _backup_events(self) -> None:
'Main loop for backing up events.\n\n Waits for an event in the queue, then downloads the corresponding clip and uploads it using rclone.\n If errors occur it will simply log the errors and wait for the next event. In a future release,\n retries will be added.\n\n '
while True:
try:
event = (await self._download_queue.get())
logger.info(f'Backing up event: {event.id}')
logger.debug(f'Remaining Queue: {self._download_queue.qsize()}')
logger.debug(f' Camera: {(await self._get_camera_name(event.camera_id))}')
logger.debug(f' Type: {event.type}')
logger.debug(f" Start: {event.start.strftime('%Y-%m-%dT%H-%M-%S')} ({event.start.timestamp()})")
logger.debug(f" End: {event.end.strftime('%Y-%m-%dT%H-%M-%S')} ({event.end.timestamp()})")
duration = (event.end - event.start).total_seconds()
logger.debug(f' Duration: {duration}')
time_since_event_ended = (datetime.utcnow().replace(tzinfo=timezone.utc) - event.end)
sleep_time = (timedelta(seconds=(5 * 1.5)) - time_since_event_ended).total_seconds()
if (sleep_time > 0):
logger.debug(f' Sleeping ({sleep_time}s) to ensure clip is ready to download...')
(await asyncio.sleep(sleep_time))
logger.debug(' Downloading video...')
for x in range(5):
try:
video = (await self._protect.get_camera_video(event.camera_id, event.start, event.end))
assert isinstance(video, bytes)
break
except (AssertionError, ClientPayloadError, TimeoutError) as e:
logger.warn(f' Failed download attempt {(x + 1)}, retying in 1s')
logger.exception(e)
(await asyncio.sleep(1))
else:
logger.warn(f'Download failed after 5 attempts, abandoning event {event.id}:')
continue
destination = (await self.generate_file_path(event))
if self._has_ffprobe:
try:
downloaded_duration = (await self._get_video_length(video))
msg = f' Downloaded video length: {downloaded_duration:.3f}s({(downloaded_duration - duration):+.3f}s)'
if (downloaded_duration < duration):
logger.warning(msg)
else:
logger.debug(msg)
except SubprocessException as e:
logger.warn(' `ffprobe` failed')
logger.exception(e)
logger.debug(' Uploading video via rclone...')
logger.debug(f' To: {destination}')
logger.debug(f' Size: {human_readable_size(len(video))}')
for x in range(5):
try:
(await self._upload_video(video, destination, self.rclone_args))
break
except SubprocessException as e:
logger.warn(f' Failed upload attempt {(x + 1)}, retying in 1s')
logger.exception(e)
(await asyncio.sleep(1))
else:
logger.warn(f'Upload failed after 5 attempts, abandoning event {event.id}:')
continue
logger.info('Backed up successfully!')
except Exception as e:
logger.warn(f'Unexpected exception occurred, abandoning event {event.id}:')
logger.exception(e) | Main loop for backing up events.
Waits for an event in the queue, then downloads the corresponding clip and uploads it using rclone.
If errors occur it will simply log the errors and wait for the next event. In a future release,
retries will be added. | unifi_protect_backup/unifi_protect_backup.py | _backup_events | roastlechon/unifi-protect-backup | 0 | python | async def _backup_events(self) -> None:
'Main loop for backing up events.\n\n Waits for an event in the queue, then downloads the corresponding clip and uploads it using rclone.\n If errors occur it will simply log the errors and wait for the next event. In a future release,\n retries will be added.\n\n '
while True:
try:
event = (await self._download_queue.get())
logger.info(f'Backing up event: {event.id}')
logger.debug(f'Remaining Queue: {self._download_queue.qsize()}')
logger.debug(f' Camera: {(await self._get_camera_name(event.camera_id))}')
logger.debug(f' Type: {event.type}')
logger.debug(f" Start: {event.start.strftime('%Y-%m-%dT%H-%M-%S')} ({event.start.timestamp()})")
logger.debug(f" End: {event.end.strftime('%Y-%m-%dT%H-%M-%S')} ({event.end.timestamp()})")
duration = (event.end - event.start).total_seconds()
logger.debug(f' Duration: {duration}')
time_since_event_ended = (datetime.utcnow().replace(tzinfo=timezone.utc) - event.end)
sleep_time = (timedelta(seconds=(5 * 1.5)) - time_since_event_ended).total_seconds()
if (sleep_time > 0):
logger.debug(f' Sleeping ({sleep_time}s) to ensure clip is ready to download...')
(await asyncio.sleep(sleep_time))
logger.debug(' Downloading video...')
for x in range(5):
try:
video = (await self._protect.get_camera_video(event.camera_id, event.start, event.end))
assert isinstance(video, bytes)
break
except (AssertionError, ClientPayloadError, TimeoutError) as e:
logger.warn(f' Failed download attempt {(x + 1)}, retying in 1s')
logger.exception(e)
(await asyncio.sleep(1))
else:
logger.warn(f'Download failed after 5 attempts, abandoning event {event.id}:')
continue
destination = (await self.generate_file_path(event))
if self._has_ffprobe:
try:
downloaded_duration = (await self._get_video_length(video))
msg = f' Downloaded video length: {downloaded_duration:.3f}s({(downloaded_duration - duration):+.3f}s)'
if (downloaded_duration < duration):
logger.warning(msg)
else:
logger.debug(msg)
except SubprocessException as e:
logger.warn(' `ffprobe` failed')
logger.exception(e)
logger.debug(' Uploading video via rclone...')
logger.debug(f' To: {destination}')
logger.debug(f' Size: {human_readable_size(len(video))}')
for x in range(5):
try:
(await self._upload_video(video, destination, self.rclone_args))
break
except SubprocessException as e:
logger.warn(f' Failed upload attempt {(x + 1)}, retying in 1s')
logger.exception(e)
(await asyncio.sleep(1))
else:
logger.warn(f'Upload failed after 5 attempts, abandoning event {event.id}:')
continue
logger.info('Backed up successfully!')
except Exception as e:
logger.warn(f'Unexpected exception occurred, abandoning event {event.id}:')
logger.exception(e) | async def _backup_events(self) -> None:
'Main loop for backing up events.\n\n Waits for an event in the queue, then downloads the corresponding clip and uploads it using rclone.\n If errors occur it will simply log the errors and wait for the next event. In a future release,\n retries will be added.\n\n '
while True:
try:
event = (await self._download_queue.get())
logger.info(f'Backing up event: {event.id}')
logger.debug(f'Remaining Queue: {self._download_queue.qsize()}')
logger.debug(f' Camera: {(await self._get_camera_name(event.camera_id))}')
logger.debug(f' Type: {event.type}')
logger.debug(f" Start: {event.start.strftime('%Y-%m-%dT%H-%M-%S')} ({event.start.timestamp()})")
logger.debug(f" End: {event.end.strftime('%Y-%m-%dT%H-%M-%S')} ({event.end.timestamp()})")
duration = (event.end - event.start).total_seconds()
logger.debug(f' Duration: {duration}')
time_since_event_ended = (datetime.utcnow().replace(tzinfo=timezone.utc) - event.end)
sleep_time = (timedelta(seconds=(5 * 1.5)) - time_since_event_ended).total_seconds()
if (sleep_time > 0):
logger.debug(f' Sleeping ({sleep_time}s) to ensure clip is ready to download...')
(await asyncio.sleep(sleep_time))
logger.debug(' Downloading video...')
for x in range(5):
try:
video = (await self._protect.get_camera_video(event.camera_id, event.start, event.end))
assert isinstance(video, bytes)
break
except (AssertionError, ClientPayloadError, TimeoutError) as e:
logger.warn(f' Failed download attempt {(x + 1)}, retying in 1s')
logger.exception(e)
(await asyncio.sleep(1))
else:
logger.warn(f'Download failed after 5 attempts, abandoning event {event.id}:')
continue
destination = (await self.generate_file_path(event))
if self._has_ffprobe:
try:
downloaded_duration = (await self._get_video_length(video))
msg = f' Downloaded video length: {downloaded_duration:.3f}s({(downloaded_duration - duration):+.3f}s)'
if (downloaded_duration < duration):
logger.warning(msg)
else:
logger.debug(msg)
except SubprocessException as e:
logger.warn(' `ffprobe` failed')
logger.exception(e)
logger.debug(' Uploading video via rclone...')
logger.debug(f' To: {destination}')
logger.debug(f' Size: {human_readable_size(len(video))}')
for x in range(5):
try:
(await self._upload_video(video, destination, self.rclone_args))
break
except SubprocessException as e:
logger.warn(f' Failed upload attempt {(x + 1)}, retying in 1s')
logger.exception(e)
(await asyncio.sleep(1))
else:
logger.warn(f'Upload failed after 5 attempts, abandoning event {event.id}:')
continue
logger.info('Backed up successfully!')
except Exception as e:
logger.warn(f'Unexpected exception occurred, abandoning event {event.id}:')
logger.exception(e)<|docstring|>Main loop for backing up events.
Waits for an event in the queue, then downloads the corresponding clip and uploads it using rclone.
If errors occur it will simply log the errors and wait for the next event. In a future release,
retries will be added.<|endoftext|> |
2eeb84df8b4dfbf502034ac9440aaba5ef5896c30000b0f6fa347eecba6fb807 | async def _upload_video(self, video: bytes, destination: pathlib.Path, rclone_args: str):
'Upload video using rclone.\n\n In order to avoid writing to disk, the video file data is piped directly\n to the rclone process and uploaded using the `rcat` function of rclone.\n\n Args:\n video (bytes): The data to be written to the file\n destination (pathlib.Path): Where rclone should write the file\n rclone_args (str): Optional extra arguments to pass to `rclone`\n\n Raises:\n RuntimeError: If rclone returns a non-zero exit code\n '
cmd = f"rclone rcat -vv {rclone_args} '{destination}'"
proc = (await asyncio.create_subprocess_shell(cmd, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(stdout, stderr) = (await proc.communicate(video))
if (proc.returncode == 0):
logger.extra_debug(f'''stdout:
{stdout.decode()}''')
logger.extra_debug(f'''stderr:
{stderr.decode()}''')
else:
raise SubprocessException(stdout.decode(), stderr.decode(), proc.returncode) | Upload video using rclone.
In order to avoid writing to disk, the video file data is piped directly
to the rclone process and uploaded using the `rcat` function of rclone.
Args:
video (bytes): The data to be written to the file
destination (pathlib.Path): Where rclone should write the file
rclone_args (str): Optional extra arguments to pass to `rclone`
Raises:
RuntimeError: If rclone returns a non-zero exit code | unifi_protect_backup/unifi_protect_backup.py | _upload_video | roastlechon/unifi-protect-backup | 0 | python | async def _upload_video(self, video: bytes, destination: pathlib.Path, rclone_args: str):
'Upload video using rclone.\n\n In order to avoid writing to disk, the video file data is piped directly\n to the rclone process and uploaded using the `rcat` function of rclone.\n\n Args:\n video (bytes): The data to be written to the file\n destination (pathlib.Path): Where rclone should write the file\n rclone_args (str): Optional extra arguments to pass to `rclone`\n\n Raises:\n RuntimeError: If rclone returns a non-zero exit code\n '
cmd = f"rclone rcat -vv {rclone_args} '{destination}'"
proc = (await asyncio.create_subprocess_shell(cmd, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(stdout, stderr) = (await proc.communicate(video))
if (proc.returncode == 0):
logger.extra_debug(f'stdout:
{stdout.decode()}')
logger.extra_debug(f'stderr:
{stderr.decode()}')
else:
raise SubprocessException(stdout.decode(), stderr.decode(), proc.returncode) | async def _upload_video(self, video: bytes, destination: pathlib.Path, rclone_args: str):
'Upload video using rclone.\n\n In order to avoid writing to disk, the video file data is piped directly\n to the rclone process and uploaded using the `rcat` function of rclone.\n\n Args:\n video (bytes): The data to be written to the file\n destination (pathlib.Path): Where rclone should write the file\n rclone_args (str): Optional extra arguments to pass to `rclone`\n\n Raises:\n RuntimeError: If rclone returns a non-zero exit code\n '
cmd = f"rclone rcat -vv {rclone_args} '{destination}'"
proc = (await asyncio.create_subprocess_shell(cmd, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE))
(stdout, stderr) = (await proc.communicate(video))
if (proc.returncode == 0):
logger.extra_debug(f'stdout:
{stdout.decode()}')
logger.extra_debug(f'stderr:
{stderr.decode()}')
else:
raise SubprocessException(stdout.decode(), stderr.decode(), proc.returncode)<|docstring|>Upload video using rclone.
In order to avoid writing to disk, the video file data is piped directly
to the rclone process and uploaded using the `rcat` function of rclone.
Args:
video (bytes): The data to be written to the file
destination (pathlib.Path): Where rclone should write the file
rclone_args (str): Optional extra arguments to pass to `rclone`
Raises:
RuntimeError: If rclone returns a non-zero exit code<|endoftext|> |
fe280a857498bb6055d85d772d26084049a4fbf0210cce3f9f83053034dbec46 | async def generate_file_path(self, event: Event) -> pathlib.Path:
'Generates the rclone destination path for the provided event.\n\n Generates paths in the following structure:\n ::\n rclone_destination\n |- Camera Name\n |- {Date}\n |- {start timestamp} {event type} ({detections}).mp4\n\n Args:\n event: The event for which to create an output path\n\n Returns:\n pathlib.Path: The rclone path the event should be backed up to\n\n '
path = pathlib.Path(self.rclone_destination)
assert isinstance(event.camera_id, str)
path /= (await self._get_camera_name(event.camera_id))
path /= event.start.strftime('%Y-%m-%d')
file_name = f"{event.start.strftime('%Y-%m-%dT%H-%M-%S')} {event.type}"
if event.smart_detect_types:
detections = ' '.join(event.smart_detect_types)
file_name += f' ({detections})'
file_name += '.mp4'
path /= file_name
return path | Generates the rclone destination path for the provided event.
Generates paths in the following structure:
::
rclone_destination
|- Camera Name
|- {Date}
|- {start timestamp} {event type} ({detections}).mp4
Args:
event: The event for which to create an output path
Returns:
pathlib.Path: The rclone path the event should be backed up to | unifi_protect_backup/unifi_protect_backup.py | generate_file_path | roastlechon/unifi-protect-backup | 0 | python | async def generate_file_path(self, event: Event) -> pathlib.Path:
'Generates the rclone destination path for the provided event.\n\n Generates paths in the following structure:\n ::\n rclone_destination\n |- Camera Name\n |- {Date}\n |- {start timestamp} {event type} ({detections}).mp4\n\n Args:\n event: The event for which to create an output path\n\n Returns:\n pathlib.Path: The rclone path the event should be backed up to\n\n '
path = pathlib.Path(self.rclone_destination)
assert isinstance(event.camera_id, str)
path /= (await self._get_camera_name(event.camera_id))
path /= event.start.strftime('%Y-%m-%d')
file_name = f"{event.start.strftime('%Y-%m-%dT%H-%M-%S')} {event.type}"
if event.smart_detect_types:
detections = ' '.join(event.smart_detect_types)
file_name += f' ({detections})'
file_name += '.mp4'
path /= file_name
return path | async def generate_file_path(self, event: Event) -> pathlib.Path:
'Generates the rclone destination path for the provided event.\n\n Generates paths in the following structure:\n ::\n rclone_destination\n |- Camera Name\n |- {Date}\n |- {start timestamp} {event type} ({detections}).mp4\n\n Args:\n event: The event for which to create an output path\n\n Returns:\n pathlib.Path: The rclone path the event should be backed up to\n\n '
path = pathlib.Path(self.rclone_destination)
assert isinstance(event.camera_id, str)
path /= (await self._get_camera_name(event.camera_id))
path /= event.start.strftime('%Y-%m-%d')
file_name = f"{event.start.strftime('%Y-%m-%dT%H-%M-%S')} {event.type}"
if event.smart_detect_types:
detections = ' '.join(event.smart_detect_types)
file_name += f' ({detections})'
file_name += '.mp4'
path /= file_name
return path<|docstring|>Generates the rclone destination path for the provided event.
Generates paths in the following structure:
::
rclone_destination
|- Camera Name
|- {Date}
|- {start timestamp} {event type} ({detections}).mp4
Args:
event: The event for which to create an output path
Returns:
pathlib.Path: The rclone path the event should be backed up to<|endoftext|> |
08d134c5817bbf67da4b6d18c4bacb5b6262741adf2679b064f4327d40a0de49 | def setup_filepaths():
'Setup full file paths for functional net and BIOGRID'
if (organism == 'cerevisiae'):
biogridpath = os.path.join('..', 'data', 'BIOGRID-3.4.130-yeast-post2006.txt')
fnetpath = os.path.join('..', 'data', 'YeastNetDataFrame.pkl')
elif (organism == 'sapiens'):
biogridpath = os.path.join('..', '..', 'DataDownload', 'BIOGRID', 'BIOGRID-ORGANISM-3.4.130.tab2', 'BIOGRID-ORGANISM-Homo_sapiens-3.4.130.tab2.txt')
fnetpath = os.path.join('..', 'data', 'HumanNetDataFrame.pkl')
elif (organism == 'melanogaster'):
biogridpath = os.path.join('..', '..', 'DataDownload', 'BIOGRID', 'BIOGRID-ORGANISM-3.4.130.tab2', 'BIOGRID-ORGANISM-Drosophila_melanogaster-3.4.130.tab2.txt')
fnetpath = os.path.join('..', 'data', 'FlyNetDataFrame.pkl')
else:
print('ORGANISM NOT FOUND! Exiting...')
sys.exit()
return (biogridpath, fnetpath) | Setup full file paths for functional net and BIOGRID | src/explorenet.py | setup_filepaths | jon-young/genetic_interact | 0 | python | def setup_filepaths():
if (organism == 'cerevisiae'):
biogridpath = os.path.join('..', 'data', 'BIOGRID-3.4.130-yeast-post2006.txt')
fnetpath = os.path.join('..', 'data', 'YeastNetDataFrame.pkl')
elif (organism == 'sapiens'):
biogridpath = os.path.join('..', '..', 'DataDownload', 'BIOGRID', 'BIOGRID-ORGANISM-3.4.130.tab2', 'BIOGRID-ORGANISM-Homo_sapiens-3.4.130.tab2.txt')
fnetpath = os.path.join('..', 'data', 'HumanNetDataFrame.pkl')
elif (organism == 'melanogaster'):
biogridpath = os.path.join('..', '..', 'DataDownload', 'BIOGRID', 'BIOGRID-ORGANISM-3.4.130.tab2', 'BIOGRID-ORGANISM-Drosophila_melanogaster-3.4.130.tab2.txt')
fnetpath = os.path.join('..', 'data', 'FlyNetDataFrame.pkl')
else:
print('ORGANISM NOT FOUND! Exiting...')
sys.exit()
return (biogridpath, fnetpath) | def setup_filepaths():
if (organism == 'cerevisiae'):
biogridpath = os.path.join('..', 'data', 'BIOGRID-3.4.130-yeast-post2006.txt')
fnetpath = os.path.join('..', 'data', 'YeastNetDataFrame.pkl')
elif (organism == 'sapiens'):
biogridpath = os.path.join('..', '..', 'DataDownload', 'BIOGRID', 'BIOGRID-ORGANISM-3.4.130.tab2', 'BIOGRID-ORGANISM-Homo_sapiens-3.4.130.tab2.txt')
fnetpath = os.path.join('..', 'data', 'HumanNetDataFrame.pkl')
elif (organism == 'melanogaster'):
biogridpath = os.path.join('..', '..', 'DataDownload', 'BIOGRID', 'BIOGRID-ORGANISM-3.4.130.tab2', 'BIOGRID-ORGANISM-Drosophila_melanogaster-3.4.130.tab2.txt')
fnetpath = os.path.join('..', 'data', 'FlyNetDataFrame.pkl')
else:
print('ORGANISM NOT FOUND! Exiting...')
sys.exit()
return (biogridpath, fnetpath)<|docstring|>Setup full file paths for functional net and BIOGRID<|endoftext|> |
a9a6bd6ab1445866bb77781be3a52d9d8f19b9fd59b6a153e5ca2605bc894923 | def determine_col():
'Determine which gene column in the BIOGRID file to read'
entrezRegEx = re.compile('\\d+')
if (organism == 'cerevisiae'):
sysNameRegEx = re.compile('Y[A-Z][A-Z]\\d+')
ofcSymRegEx = re.compile('[A-Z]+')
elif (organism == 'sapiens'):
sysNameRegEx = re.compile('\\w+')
ofcSymRegEx = re.compile('[A-Za-z]+.')
else:
sysNameRegEx = re.compile('Dmel.')
ofcSymRegEx = re.compile('\\w+')
if (entrezRegEx.match(geneExample) is not None):
colName = 'Entrez Gene Interactor A'
elif (sysNameRegEx.match(geneExample) is not None):
colName = 'Systematic Name Interactor A'
elif (ofcSymRegEx.match(geneExample) is not None):
colName = 'Official Symbol Interactor A'
else:
print('ERROR: Unable to match ID type! Exiting...')
sys.exit()
return colName | Determine which gene column in the BIOGRID file to read | src/explorenet.py | determine_col | jon-young/genetic_interact | 0 | python | def determine_col():
entrezRegEx = re.compile('\\d+')
if (organism == 'cerevisiae'):
sysNameRegEx = re.compile('Y[A-Z][A-Z]\\d+')
ofcSymRegEx = re.compile('[A-Z]+')
elif (organism == 'sapiens'):
sysNameRegEx = re.compile('\\w+')
ofcSymRegEx = re.compile('[A-Za-z]+.')
else:
sysNameRegEx = re.compile('Dmel.')
ofcSymRegEx = re.compile('\\w+')
if (entrezRegEx.match(geneExample) is not None):
colName = 'Entrez Gene Interactor A'
elif (sysNameRegEx.match(geneExample) is not None):
colName = 'Systematic Name Interactor A'
elif (ofcSymRegEx.match(geneExample) is not None):
colName = 'Official Symbol Interactor A'
else:
print('ERROR: Unable to match ID type! Exiting...')
sys.exit()
return colName | def determine_col():
entrezRegEx = re.compile('\\d+')
if (organism == 'cerevisiae'):
sysNameRegEx = re.compile('Y[A-Z][A-Z]\\d+')
ofcSymRegEx = re.compile('[A-Z]+')
elif (organism == 'sapiens'):
sysNameRegEx = re.compile('\\w+')
ofcSymRegEx = re.compile('[A-Za-z]+.')
else:
sysNameRegEx = re.compile('Dmel.')
ofcSymRegEx = re.compile('\\w+')
if (entrezRegEx.match(geneExample) is not None):
colName = 'Entrez Gene Interactor A'
elif (sysNameRegEx.match(geneExample) is not None):
colName = 'Systematic Name Interactor A'
elif (ofcSymRegEx.match(geneExample) is not None):
colName = 'Official Symbol Interactor A'
else:
print('ERROR: Unable to match ID type! Exiting...')
sys.exit()
return colName<|docstring|>Determine which gene column in the BIOGRID file to read<|endoftext|> |
83a617657c8e37796e99b46861221c4294aa4577b5008479ddec6dafef8b9a36 | def get_path(self, path, *, relative_to, package=None):
"Return *path* relative to *relative_to* location.\n\n :param pathlike path:\n A path relative to bundle source root.\n\n :param str relative_to:\n Location name. Can be one of:\n - ``'sourceroot'``: bundle source root\n - ``'pkgsource'``: package source directory\n - ``'pkgbuild'``: package build directory\n - ``None``: filesystem root (makes path absolute)\n\n :return:\n Path relative to the specified location.\n "
if (relative_to == 'sourceroot'):
return pathlib.Path(path)
elif (relative_to == 'buildroot'):
return (pathlib.Path('..') / path)
elif (relative_to == 'pkgsource'):
if ((package is not None) and (package.name == self.root_package.name_slot)):
return pathlib.Path(path)
else:
return ((pathlib.Path('..') / '..') / path)
elif (relative_to == 'pkgbuild'):
return (((pathlib.Path('..') / '..') / '..') / path)
elif (relative_to is None):
return (self.get_source_abspath() / path).resolve()
else:
raise ValueError(f'invalid relative_to argument: {relative_to}') | Return *path* relative to *relative_to* location.
:param pathlike path:
A path relative to bundle source root.
:param str relative_to:
Location name. Can be one of:
- ``'sourceroot'``: bundle source root
- ``'pkgsource'``: package source directory
- ``'pkgbuild'``: package build directory
- ``None``: filesystem root (makes path absolute)
:return:
Path relative to the specified location. | metapkg/targets/generic/build.py | get_path | fantix/metapkg | 0 | python | def get_path(self, path, *, relative_to, package=None):
"Return *path* relative to *relative_to* location.\n\n :param pathlike path:\n A path relative to bundle source root.\n\n :param str relative_to:\n Location name. Can be one of:\n - ``'sourceroot'``: bundle source root\n - ``'pkgsource'``: package source directory\n - ``'pkgbuild'``: package build directory\n - ``None``: filesystem root (makes path absolute)\n\n :return:\n Path relative to the specified location.\n "
if (relative_to == 'sourceroot'):
return pathlib.Path(path)
elif (relative_to == 'buildroot'):
return (pathlib.Path('..') / path)
elif (relative_to == 'pkgsource'):
if ((package is not None) and (package.name == self.root_package.name_slot)):
return pathlib.Path(path)
else:
return ((pathlib.Path('..') / '..') / path)
elif (relative_to == 'pkgbuild'):
return (((pathlib.Path('..') / '..') / '..') / path)
elif (relative_to is None):
return (self.get_source_abspath() / path).resolve()
else:
raise ValueError(f'invalid relative_to argument: {relative_to}') | def get_path(self, path, *, relative_to, package=None):
"Return *path* relative to *relative_to* location.\n\n :param pathlike path:\n A path relative to bundle source root.\n\n :param str relative_to:\n Location name. Can be one of:\n - ``'sourceroot'``: bundle source root\n - ``'pkgsource'``: package source directory\n - ``'pkgbuild'``: package build directory\n - ``None``: filesystem root (makes path absolute)\n\n :return:\n Path relative to the specified location.\n "
if (relative_to == 'sourceroot'):
return pathlib.Path(path)
elif (relative_to == 'buildroot'):
return (pathlib.Path('..') / path)
elif (relative_to == 'pkgsource'):
if ((package is not None) and (package.name == self.root_package.name_slot)):
return pathlib.Path(path)
else:
return ((pathlib.Path('..') / '..') / path)
elif (relative_to == 'pkgbuild'):
return (((pathlib.Path('..') / '..') / '..') / path)
elif (relative_to is None):
return (self.get_source_abspath() / path).resolve()
else:
raise ValueError(f'invalid relative_to argument: {relative_to}')<|docstring|>Return *path* relative to *relative_to* location.
:param pathlike path:
A path relative to bundle source root.
:param str relative_to:
Location name. Can be one of:
- ``'sourceroot'``: bundle source root
- ``'pkgsource'``: package source directory
- ``'pkgbuild'``: package build directory
- ``None``: filesystem root (makes path absolute)
:return:
Path relative to the specified location.<|endoftext|> |
7f2bff1f697e8cd9eda30a2c0b79b0238ced2c97f30ba8a0b75c3874aca70529 | @authentication_classes([IsAuthenticated])
@permission_classes([IsAuthenticated])
@api_view(['POST'])
def accept_ride(request):
'\n Creating trip object and setting passenger is_searching to false\n :param request:\n :return:\n '
data = request.data
driver_obj = request.user.driver
passenger_obj = Passenger.objects.get(pk=data['passenger_id'])
trip_obj = Trip.objects.create(passenger=passenger_obj, driver=driver_obj, start_location=passenger_obj.current_location, end_location=passenger_obj.destination_location, status='IS_ACTIVE')
passenger_obj.is_searching = False
passenger_obj.save()
response = {'response': {'success': True, 'msg': 'Trip started'}}
return Response(response, status=HTTP_200_OK) | Creating trip object and setting passenger is_searching to false
:param request:
:return: | bookingapp/views.py | accept_ride | bhargava-kush/dj_uber | 0 | python | @authentication_classes([IsAuthenticated])
@permission_classes([IsAuthenticated])
@api_view(['POST'])
def accept_ride(request):
'\n Creating trip object and setting passenger is_searching to false\n :param request:\n :return:\n '
data = request.data
driver_obj = request.user.driver
passenger_obj = Passenger.objects.get(pk=data['passenger_id'])
trip_obj = Trip.objects.create(passenger=passenger_obj, driver=driver_obj, start_location=passenger_obj.current_location, end_location=passenger_obj.destination_location, status='IS_ACTIVE')
passenger_obj.is_searching = False
passenger_obj.save()
response = {'response': {'success': True, 'msg': 'Trip started'}}
return Response(response, status=HTTP_200_OK) | @authentication_classes([IsAuthenticated])
@permission_classes([IsAuthenticated])
@api_view(['POST'])
def accept_ride(request):
'\n Creating trip object and setting passenger is_searching to false\n :param request:\n :return:\n '
data = request.data
driver_obj = request.user.driver
passenger_obj = Passenger.objects.get(pk=data['passenger_id'])
trip_obj = Trip.objects.create(passenger=passenger_obj, driver=driver_obj, start_location=passenger_obj.current_location, end_location=passenger_obj.destination_location, status='IS_ACTIVE')
passenger_obj.is_searching = False
passenger_obj.save()
response = {'response': {'success': True, 'msg': 'Trip started'}}
return Response(response, status=HTTP_200_OK)<|docstring|>Creating trip object and setting passenger is_searching to false
:param request:
:return:<|endoftext|> |
f49a9cc62b5dc983b3343d7314710c37dc8feae5516ee7c90a32b6f037e7f674 | @authentication_classes([IsAuthenticated])
@permission_classes([IsAuthenticated])
@api_view(['GET'])
def request_ride(request):
'\n Passenger requesting for ride by setting is_searching to true\n :param request:\n :return:\n '
passenger_obj = request.user.passenger
last_trip = Trip.objects.filter(passenger=passenger_obj).last()
if last_trip:
if (last_trip.status == 'IS_ACTIVE'):
response = {'response': {'success': False, 'msg': 'Currently ride in process'}}
return Response(response, status=HTTP_400_BAD_REQUEST)
passenger_obj.is_searching = True
passenger_obj.save()
response = {'response': {'success': True, 'msg': 'Requesting ride'}}
return Response(response, status=HTTP_200_OK) | Passenger requesting for ride by setting is_searching to true
:param request:
:return: | bookingapp/views.py | request_ride | bhargava-kush/dj_uber | 0 | python | @authentication_classes([IsAuthenticated])
@permission_classes([IsAuthenticated])
@api_view(['GET'])
def request_ride(request):
'\n Passenger requesting for ride by setting is_searching to true\n :param request:\n :return:\n '
passenger_obj = request.user.passenger
last_trip = Trip.objects.filter(passenger=passenger_obj).last()
if last_trip:
if (last_trip.status == 'IS_ACTIVE'):
response = {'response': {'success': False, 'msg': 'Currently ride in process'}}
return Response(response, status=HTTP_400_BAD_REQUEST)
passenger_obj.is_searching = True
passenger_obj.save()
response = {'response': {'success': True, 'msg': 'Requesting ride'}}
return Response(response, status=HTTP_200_OK) | @authentication_classes([IsAuthenticated])
@permission_classes([IsAuthenticated])
@api_view(['GET'])
def request_ride(request):
'\n Passenger requesting for ride by setting is_searching to true\n :param request:\n :return:\n '
passenger_obj = request.user.passenger
last_trip = Trip.objects.filter(passenger=passenger_obj).last()
if last_trip:
if (last_trip.status == 'IS_ACTIVE'):
response = {'response': {'success': False, 'msg': 'Currently ride in process'}}
return Response(response, status=HTTP_400_BAD_REQUEST)
passenger_obj.is_searching = True
passenger_obj.save()
response = {'response': {'success': True, 'msg': 'Requesting ride'}}
return Response(response, status=HTTP_200_OK)<|docstring|>Passenger requesting for ride by setting is_searching to true
:param request:
:return:<|endoftext|> |
32d4d3d42dd734188384359f522ba09c1b21adc6f78bec1bbd5f4eedd38cd60d | @authentication_classes([IsAuthenticated])
@permission_classes([IsAuthenticated])
@api_view(['GET'])
def is_ride_accepted(request):
'\n Checking if ride is accepted or not\n :param request:\n :return:\n '
passenger_obj = request.user.passenger
last_trip = Trip.objects.filter(passenger=passenger_obj).last()
if (last_trip and (last_trip.status == 'IS_ACTIVE')):
response = {'response': {'success': True, 'msg': 'Ride is in progress'}}
return Response(response, status=HTTP_200_OK)
elif passenger_obj.is_searching:
response = {'response': {'success': True, 'msg': 'Searching'}}
return Response(response, status=HTTP_200_OK)
else:
response = {'response': {'success': True, 'msg': 'No ongoing trip nor searching.'}}
return Response(response, status=HTTP_200_OK) | Checking if ride is accepted or not
:param request:
:return: | bookingapp/views.py | is_ride_accepted | bhargava-kush/dj_uber | 0 | python | @authentication_classes([IsAuthenticated])
@permission_classes([IsAuthenticated])
@api_view(['GET'])
def is_ride_accepted(request):
'\n Checking if ride is accepted or not\n :param request:\n :return:\n '
passenger_obj = request.user.passenger
last_trip = Trip.objects.filter(passenger=passenger_obj).last()
if (last_trip and (last_trip.status == 'IS_ACTIVE')):
response = {'response': {'success': True, 'msg': 'Ride is in progress'}}
return Response(response, status=HTTP_200_OK)
elif passenger_obj.is_searching:
response = {'response': {'success': True, 'msg': 'Searching'}}
return Response(response, status=HTTP_200_OK)
else:
response = {'response': {'success': True, 'msg': 'No ongoing trip nor searching.'}}
return Response(response, status=HTTP_200_OK) | @authentication_classes([IsAuthenticated])
@permission_classes([IsAuthenticated])
@api_view(['GET'])
def is_ride_accepted(request):
'\n Checking if ride is accepted or not\n :param request:\n :return:\n '
passenger_obj = request.user.passenger
last_trip = Trip.objects.filter(passenger=passenger_obj).last()
if (last_trip and (last_trip.status == 'IS_ACTIVE')):
response = {'response': {'success': True, 'msg': 'Ride is in progress'}}
return Response(response, status=HTTP_200_OK)
elif passenger_obj.is_searching:
response = {'response': {'success': True, 'msg': 'Searching'}}
return Response(response, status=HTTP_200_OK)
else:
response = {'response': {'success': True, 'msg': 'No ongoing trip nor searching.'}}
return Response(response, status=HTTP_200_OK)<|docstring|>Checking if ride is accepted or not
:param request:
:return:<|endoftext|> |
220aa3d6ab6e3762bd40d59a41ae1566b2bbeb70ce8eda34513c7aded125d4d2 | def __call__(self, input, target, mask=None):
' Args:\n input [batch_num, class_num]:\n The direct prediction of classification fc layer.\n target [batch_num, class_num]:\n Binary target (0 or 1) for each sample each class. The value is -1\n when the sample is ignored.\n\n return: a scalar loss\n '
edges = self.edges
mmt = self.momentum
weights = tf.zeros_like(input, dtype=tf.float32)
if (mask is None):
mask = tf.ones_like(input, dtype=tf.float32)
def func1(weights, n):
if (mmt > 0):
tf.assign(self.acc_sum[i], ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)))
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / self.acc_sum[i])))
else:
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / num_in_bin)))
n += 1
return (weights, n)
g = tf.abs((tf.nn.sigmoid(input) - target))
valid = (mask > 0)
tot = tf.maximum(tf.reduce_sum(tf.cast(valid, tf.float32)), 1.0)
n = tf.Variable(0, trainable=False, dtype=tf.float32)
for i in range(self.bins):
inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid)
num_in_bin = tf.reduce_sum(tf.cast(inds, tf.float32))
(weights, n) = tf.cond((num_in_bin > 0), (lambda : func1(weights, n)), (lambda : (weights, n)))
weights = tf.cond((n > 0), (lambda : (weights / n)), (lambda : weights))
loss = (tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=(input * weights), labels=target)) / tot)
return loss | Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
return: a scalar loss | MRC/Hybrid/loss.py | __call__ | xiaolinpeter/Question_Answering_Models | 159 | python | def __call__(self, input, target, mask=None):
' Args:\n input [batch_num, class_num]:\n The direct prediction of classification fc layer.\n target [batch_num, class_num]:\n Binary target (0 or 1) for each sample each class. The value is -1\n when the sample is ignored.\n\n return: a scalar loss\n '
edges = self.edges
mmt = self.momentum
weights = tf.zeros_like(input, dtype=tf.float32)
if (mask is None):
mask = tf.ones_like(input, dtype=tf.float32)
def func1(weights, n):
if (mmt > 0):
tf.assign(self.acc_sum[i], ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)))
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / self.acc_sum[i])))
else:
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / num_in_bin)))
n += 1
return (weights, n)
g = tf.abs((tf.nn.sigmoid(input) - target))
valid = (mask > 0)
tot = tf.maximum(tf.reduce_sum(tf.cast(valid, tf.float32)), 1.0)
n = tf.Variable(0, trainable=False, dtype=tf.float32)
for i in range(self.bins):
inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid)
num_in_bin = tf.reduce_sum(tf.cast(inds, tf.float32))
(weights, n) = tf.cond((num_in_bin > 0), (lambda : func1(weights, n)), (lambda : (weights, n)))
weights = tf.cond((n > 0), (lambda : (weights / n)), (lambda : weights))
loss = (tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=(input * weights), labels=target)) / tot)
return loss | def __call__(self, input, target, mask=None):
' Args:\n input [batch_num, class_num]:\n The direct prediction of classification fc layer.\n target [batch_num, class_num]:\n Binary target (0 or 1) for each sample each class. The value is -1\n when the sample is ignored.\n\n return: a scalar loss\n '
edges = self.edges
mmt = self.momentum
weights = tf.zeros_like(input, dtype=tf.float32)
if (mask is None):
mask = tf.ones_like(input, dtype=tf.float32)
def func1(weights, n):
if (mmt > 0):
tf.assign(self.acc_sum[i], ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)))
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / self.acc_sum[i])))
else:
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / num_in_bin)))
n += 1
return (weights, n)
g = tf.abs((tf.nn.sigmoid(input) - target))
valid = (mask > 0)
tot = tf.maximum(tf.reduce_sum(tf.cast(valid, tf.float32)), 1.0)
n = tf.Variable(0, trainable=False, dtype=tf.float32)
for i in range(self.bins):
inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid)
num_in_bin = tf.reduce_sum(tf.cast(inds, tf.float32))
(weights, n) = tf.cond((num_in_bin > 0), (lambda : func1(weights, n)), (lambda : (weights, n)))
weights = tf.cond((n > 0), (lambda : (weights / n)), (lambda : weights))
loss = (tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=(input * weights), labels=target)) / tot)
return loss<|docstring|>Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
return: a scalar loss<|endoftext|> |
116f5faba01852c220843391b9157356b6598e720b5d562fe9541e052804bf99 | def __call__(self, input, target, mask=None):
' Args:\n input [batch_num, 4 (* class_num)]:\n The prediction of box regression layer. Channel number can be 4 or\n (4 * class_num) depending on whether it is class-agnostic.\n target [batch_num, 4 (* class_num)]:\n The target regression values with the same size of input.\n '
mu = self.mu
edges = self.edges
mmt = self.momentum
diff = (input - target)
loss = (tf.sqrt(((diff * diff) + (mu * mu))) - mu)
g = tf.abs((diff / tf.sqrt(((mu * mu) + (diff * diff)))))
weights = tf.zeros_like(g, dtype=tf.float32)
if (mask is None):
mask = tf.ones_like(input, dtype=tf.float32)
def func1(weights, n):
if (mmt > 0):
tf.assign(self.acc_sum[i], ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)))
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / self.acc_sum[i])))
else:
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / num_in_bin)))
n += 1
return (weights, n)
valid = (mask > 0)
tot = tf.maximum(tf.reduce_sum(tf.cast(valid, tf.float32)), 1.0)
n = tf.Variable(0, trainable=False, dtype=tf.float32)
for i in range(self.bins):
inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid)
num_in_bin = tf.reduce_sum(tf.cast(inds, tf.float32))
(weights, n) = tf.cond((num_in_bin > 0), (lambda : func1(weights, n)), (lambda : (weights, n)))
weights = tf.cond((n > 0), (lambda : (weights / n)), (lambda : weights))
loss = (loss * weights)
loss = (tf.reduce_sum(loss) / tot)
return loss | Args:
input [batch_num, 4 (* class_num)]:
The prediction of box regression layer. Channel number can be 4 or
(4 * class_num) depending on whether it is class-agnostic.
target [batch_num, 4 (* class_num)]:
The target regression values with the same size of input. | MRC/Hybrid/loss.py | __call__ | xiaolinpeter/Question_Answering_Models | 159 | python | def __call__(self, input, target, mask=None):
' Args:\n input [batch_num, 4 (* class_num)]:\n The prediction of box regression layer. Channel number can be 4 or\n (4 * class_num) depending on whether it is class-agnostic.\n target [batch_num, 4 (* class_num)]:\n The target regression values with the same size of input.\n '
mu = self.mu
edges = self.edges
mmt = self.momentum
diff = (input - target)
loss = (tf.sqrt(((diff * diff) + (mu * mu))) - mu)
g = tf.abs((diff / tf.sqrt(((mu * mu) + (diff * diff)))))
weights = tf.zeros_like(g, dtype=tf.float32)
if (mask is None):
mask = tf.ones_like(input, dtype=tf.float32)
def func1(weights, n):
if (mmt > 0):
tf.assign(self.acc_sum[i], ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)))
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / self.acc_sum[i])))
else:
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / num_in_bin)))
n += 1
return (weights, n)
valid = (mask > 0)
tot = tf.maximum(tf.reduce_sum(tf.cast(valid, tf.float32)), 1.0)
n = tf.Variable(0, trainable=False, dtype=tf.float32)
for i in range(self.bins):
inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid)
num_in_bin = tf.reduce_sum(tf.cast(inds, tf.float32))
(weights, n) = tf.cond((num_in_bin > 0), (lambda : func1(weights, n)), (lambda : (weights, n)))
weights = tf.cond((n > 0), (lambda : (weights / n)), (lambda : weights))
loss = (loss * weights)
loss = (tf.reduce_sum(loss) / tot)
return loss | def __call__(self, input, target, mask=None):
' Args:\n input [batch_num, 4 (* class_num)]:\n The prediction of box regression layer. Channel number can be 4 or\n (4 * class_num) depending on whether it is class-agnostic.\n target [batch_num, 4 (* class_num)]:\n The target regression values with the same size of input.\n '
mu = self.mu
edges = self.edges
mmt = self.momentum
diff = (input - target)
loss = (tf.sqrt(((diff * diff) + (mu * mu))) - mu)
g = tf.abs((diff / tf.sqrt(((mu * mu) + (diff * diff)))))
weights = tf.zeros_like(g, dtype=tf.float32)
if (mask is None):
mask = tf.ones_like(input, dtype=tf.float32)
def func1(weights, n):
if (mmt > 0):
tf.assign(self.acc_sum[i], ((mmt * self.acc_sum[i]) + ((1 - mmt) * num_in_bin)))
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / self.acc_sum[i])))
else:
weights = (weights + ((weights * tf.cast(inds, tf.float32)) * (tot / num_in_bin)))
n += 1
return (weights, n)
valid = (mask > 0)
tot = tf.maximum(tf.reduce_sum(tf.cast(valid, tf.float32)), 1.0)
n = tf.Variable(0, trainable=False, dtype=tf.float32)
for i in range(self.bins):
inds = (((g >= edges[i]) & (g < edges[(i + 1)])) & valid)
num_in_bin = tf.reduce_sum(tf.cast(inds, tf.float32))
(weights, n) = tf.cond((num_in_bin > 0), (lambda : func1(weights, n)), (lambda : (weights, n)))
weights = tf.cond((n > 0), (lambda : (weights / n)), (lambda : weights))
loss = (loss * weights)
loss = (tf.reduce_sum(loss) / tot)
return loss<|docstring|>Args:
input [batch_num, 4 (* class_num)]:
The prediction of box regression layer. Channel number can be 4 or
(4 * class_num) depending on whether it is class-agnostic.
target [batch_num, 4 (* class_num)]:
The target regression values with the same size of input.<|endoftext|> |
83cd3759b40012c15b8ebfd0cb96558682af4a20845909280821d8ef4f1bc035 | def calc(self, input, target, mask=None, is_mask=False):
' Args:\n input [batch_num, class_num]:\n The direct prediction of classification fc layer.\n target [batch_num, class_num]:\n Binary target (0 or 1) for each sample each class. The value is -1\n when the sample is ignored.\n mask [batch_num, class_num]\n '
(edges_left, edges_right) = (self.edges_left, self.edges_right)
mmt = self.momentum
self.g = tf.abs((tf.sigmoid(input) - target))
g = tf.expand_dims(self.g, axis=0)
g_greater_equal_edges_left = tf.greater_equal(g, edges_left)
g_less_edges_right = tf.less(g, edges_right)
zero_matrix = tf.cast(tf.zeros_like(g_greater_equal_edges_left), dtype=tf.float32)
if is_mask:
mask_greater_zero = tf.greater(mask, 0)
inds = tf.cast(tf.logical_and(tf.logical_and(g_greater_equal_edges_left, g_less_edges_right), mask_greater_zero), dtype=tf.float32)
tot = tf.maximum(tf.reduce_sum(tf.cast(mask_greater_zero, dtype=tf.float32)), 1.0)
else:
inds = tf.cast(tf.logical_and(g_greater_equal_edges_left, g_less_edges_right), dtype=tf.float32)
input_shape = tf.shape(input)
tot = tf.maximum(tf.cast((input_shape[0] * input_shape[1]), dtype=tf.float32), 1.0)
num_in_bin = tf.reduce_sum(inds, axis=[1, 2])
num_in_bin_greater_zero = tf.greater(num_in_bin, 0)
num_valid_bin = tf.reduce_sum(tf.cast(num_in_bin_greater_zero, dtype=tf.float32))
if (mmt > 0):
update = tf.assign(self.acc_sum, tf.where(num_in_bin_greater_zero, ((mmt * self.acc_sum) + ((1 - mmt) * num_in_bin)), self.acc_sum))
with tf.control_dependencies([update]):
self.acc_sum_tmp = tf.identity(self.acc_sum, name='updated_accsum')
acc_sum = tf.expand_dims(self.acc_sum_tmp, (- 1))
acc_sum = tf.expand_dims(acc_sum, (- 1))
acc_sum = (acc_sum + zero_matrix)
weights = tf.where(tf.equal(inds, 1), (tot / acc_sum), zero_matrix)
weights = tf.reduce_sum(weights, axis=0)
else:
num_in_bin = tf.expand_dims(num_in_bin, (- 1))
num_in_bin = tf.expand_dims(num_in_bin, (- 1))
num_in_bin = (num_in_bin + zero_matrix)
weights = tf.where(tf.equal(inds, 1), (tot / num_in_bin), zero_matrix)
weights = tf.reduce_sum(weights, axis=0)
weights = (weights / num_valid_bin)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=input)
loss = (tf.reduce_sum((loss * weights)) / tot)
return loss | Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
mask [batch_num, class_num] | MRC/Hybrid/loss.py | calc | xiaolinpeter/Question_Answering_Models | 159 | python | def calc(self, input, target, mask=None, is_mask=False):
' Args:\n input [batch_num, class_num]:\n The direct prediction of classification fc layer.\n target [batch_num, class_num]:\n Binary target (0 or 1) for each sample each class. The value is -1\n when the sample is ignored.\n mask [batch_num, class_num]\n '
(edges_left, edges_right) = (self.edges_left, self.edges_right)
mmt = self.momentum
self.g = tf.abs((tf.sigmoid(input) - target))
g = tf.expand_dims(self.g, axis=0)
g_greater_equal_edges_left = tf.greater_equal(g, edges_left)
g_less_edges_right = tf.less(g, edges_right)
zero_matrix = tf.cast(tf.zeros_like(g_greater_equal_edges_left), dtype=tf.float32)
if is_mask:
mask_greater_zero = tf.greater(mask, 0)
inds = tf.cast(tf.logical_and(tf.logical_and(g_greater_equal_edges_left, g_less_edges_right), mask_greater_zero), dtype=tf.float32)
tot = tf.maximum(tf.reduce_sum(tf.cast(mask_greater_zero, dtype=tf.float32)), 1.0)
else:
inds = tf.cast(tf.logical_and(g_greater_equal_edges_left, g_less_edges_right), dtype=tf.float32)
input_shape = tf.shape(input)
tot = tf.maximum(tf.cast((input_shape[0] * input_shape[1]), dtype=tf.float32), 1.0)
num_in_bin = tf.reduce_sum(inds, axis=[1, 2])
num_in_bin_greater_zero = tf.greater(num_in_bin, 0)
num_valid_bin = tf.reduce_sum(tf.cast(num_in_bin_greater_zero, dtype=tf.float32))
if (mmt > 0):
update = tf.assign(self.acc_sum, tf.where(num_in_bin_greater_zero, ((mmt * self.acc_sum) + ((1 - mmt) * num_in_bin)), self.acc_sum))
with tf.control_dependencies([update]):
self.acc_sum_tmp = tf.identity(self.acc_sum, name='updated_accsum')
acc_sum = tf.expand_dims(self.acc_sum_tmp, (- 1))
acc_sum = tf.expand_dims(acc_sum, (- 1))
acc_sum = (acc_sum + zero_matrix)
weights = tf.where(tf.equal(inds, 1), (tot / acc_sum), zero_matrix)
weights = tf.reduce_sum(weights, axis=0)
else:
num_in_bin = tf.expand_dims(num_in_bin, (- 1))
num_in_bin = tf.expand_dims(num_in_bin, (- 1))
num_in_bin = (num_in_bin + zero_matrix)
weights = tf.where(tf.equal(inds, 1), (tot / num_in_bin), zero_matrix)
weights = tf.reduce_sum(weights, axis=0)
weights = (weights / num_valid_bin)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=input)
loss = (tf.reduce_sum((loss * weights)) / tot)
return loss | def calc(self, input, target, mask=None, is_mask=False):
' Args:\n input [batch_num, class_num]:\n The direct prediction of classification fc layer.\n target [batch_num, class_num]:\n Binary target (0 or 1) for each sample each class. The value is -1\n when the sample is ignored.\n mask [batch_num, class_num]\n '
(edges_left, edges_right) = (self.edges_left, self.edges_right)
mmt = self.momentum
self.g = tf.abs((tf.sigmoid(input) - target))
g = tf.expand_dims(self.g, axis=0)
g_greater_equal_edges_left = tf.greater_equal(g, edges_left)
g_less_edges_right = tf.less(g, edges_right)
zero_matrix = tf.cast(tf.zeros_like(g_greater_equal_edges_left), dtype=tf.float32)
if is_mask:
mask_greater_zero = tf.greater(mask, 0)
inds = tf.cast(tf.logical_and(tf.logical_and(g_greater_equal_edges_left, g_less_edges_right), mask_greater_zero), dtype=tf.float32)
tot = tf.maximum(tf.reduce_sum(tf.cast(mask_greater_zero, dtype=tf.float32)), 1.0)
else:
inds = tf.cast(tf.logical_and(g_greater_equal_edges_left, g_less_edges_right), dtype=tf.float32)
input_shape = tf.shape(input)
tot = tf.maximum(tf.cast((input_shape[0] * input_shape[1]), dtype=tf.float32), 1.0)
num_in_bin = tf.reduce_sum(inds, axis=[1, 2])
num_in_bin_greater_zero = tf.greater(num_in_bin, 0)
num_valid_bin = tf.reduce_sum(tf.cast(num_in_bin_greater_zero, dtype=tf.float32))
if (mmt > 0):
update = tf.assign(self.acc_sum, tf.where(num_in_bin_greater_zero, ((mmt * self.acc_sum) + ((1 - mmt) * num_in_bin)), self.acc_sum))
with tf.control_dependencies([update]):
self.acc_sum_tmp = tf.identity(self.acc_sum, name='updated_accsum')
acc_sum = tf.expand_dims(self.acc_sum_tmp, (- 1))
acc_sum = tf.expand_dims(acc_sum, (- 1))
acc_sum = (acc_sum + zero_matrix)
weights = tf.where(tf.equal(inds, 1), (tot / acc_sum), zero_matrix)
weights = tf.reduce_sum(weights, axis=0)
else:
num_in_bin = tf.expand_dims(num_in_bin, (- 1))
num_in_bin = tf.expand_dims(num_in_bin, (- 1))
num_in_bin = (num_in_bin + zero_matrix)
weights = tf.where(tf.equal(inds, 1), (tot / num_in_bin), zero_matrix)
weights = tf.reduce_sum(weights, axis=0)
weights = (weights / num_valid_bin)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=input)
loss = (tf.reduce_sum((loss * weights)) / tot)
return loss<|docstring|>Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
mask [batch_num, class_num]<|endoftext|> |
a51b8e20db10555647b2be8b6d937e6b2ef65d29e4bc274b465d07cffa391127 | def enlist(lines_iter):
'\n arrange lines in a recursive list of tuples (item, [sub-items-touples])\n '
result = list()
list_stack = [result, None]
indent = 0
for line in lines_iter:
l = []
t = (line, l)
line_indent = _get_indent(line)
if (line_indent > indent):
list_stack.append(l)
list_stack[(- 2)].append(t)
elif (line_indent == indent):
list_stack[(- 1)] = l
list_stack[(- 2)].append(t)
else:
list_stack = list_stack[:((line_indent - indent) - 1)]
list_stack.append(l)
list_stack[(- 2)].append(t)
indent = line_indent
return result | arrange lines in a recursive list of tuples (item, [sub-items-touples]) | nu.py | enlist | fbtd/notes_utilities | 0 | python | def enlist(lines_iter):
'\n \n '
result = list()
list_stack = [result, None]
indent = 0
for line in lines_iter:
l = []
t = (line, l)
line_indent = _get_indent(line)
if (line_indent > indent):
list_stack.append(l)
list_stack[(- 2)].append(t)
elif (line_indent == indent):
list_stack[(- 1)] = l
list_stack[(- 2)].append(t)
else:
list_stack = list_stack[:((line_indent - indent) - 1)]
list_stack.append(l)
list_stack[(- 2)].append(t)
indent = line_indent
return result | def enlist(lines_iter):
'\n \n '
result = list()
list_stack = [result, None]
indent = 0
for line in lines_iter:
l = []
t = (line, l)
line_indent = _get_indent(line)
if (line_indent > indent):
list_stack.append(l)
list_stack[(- 2)].append(t)
elif (line_indent == indent):
list_stack[(- 1)] = l
list_stack[(- 2)].append(t)
else:
list_stack = list_stack[:((line_indent - indent) - 1)]
list_stack.append(l)
list_stack[(- 2)].append(t)
indent = line_indent
return result<|docstring|>arrange lines in a recursive list of tuples (item, [sub-items-touples])<|endoftext|> |
8a8988db754bd79a19b6153200e0ab4a0b1c20cc1aa9b1bbed6d606bcc711ede | def deepsort(l):
'\n Recursively sort in place each list\n '
l.sort(key=(lambda e: e[0]))
for elem in l:
deepsort(elem[1]) | Recursively sort in place each list | nu.py | deepsort | fbtd/notes_utilities | 0 | python | def deepsort(l):
'\n \n '
l.sort(key=(lambda e: e[0]))
for elem in l:
deepsort(elem[1]) | def deepsort(l):
'\n \n '
l.sort(key=(lambda e: e[0]))
for elem in l:
deepsort(elem[1])<|docstring|>Recursively sort in place each list<|endoftext|> |
cc60d63408191a67d80a1d4ff7022f13f8b2791bae368e492b6b59e183c080ca | def delist(l, result=None):
'\n returns touple of lines from the recursive list of tuples (item, [sub-items-touples])\n '
if (not result):
result = []
for (line, sub) in l:
result.append(line)
delist(sub, result=result)
return tuple(result) | returns touple of lines from the recursive list of tuples (item, [sub-items-touples]) | nu.py | delist | fbtd/notes_utilities | 0 | python | def delist(l, result=None):
'\n \n '
if (not result):
result = []
for (line, sub) in l:
result.append(line)
delist(sub, result=result)
return tuple(result) | def delist(l, result=None):
'\n \n '
if (not result):
result = []
for (line, sub) in l:
result.append(line)
delist(sub, result=result)
return tuple(result)<|docstring|>returns touple of lines from the recursive list of tuples (item, [sub-items-touples])<|endoftext|> |
43312aece65aa86954aeab0ee6cb0145773e689edb47b7f5e6535f134e0d3f7f | @pytest.fixture(name='mock_setup')
def mock_setups():
'Prevent setup.'
with patch('homeassistant.components.flipr.async_setup_entry', return_value=True):
(yield) | Prevent setup. | tests/components/flipr/test_config_flow.py | mock_setups | GrandMoff100/homeassistant-core | 30,023 | python | @pytest.fixture(name='mock_setup')
def mock_setups():
with patch('homeassistant.components.flipr.async_setup_entry', return_value=True):
(yield) | @pytest.fixture(name='mock_setup')
def mock_setups():
with patch('homeassistant.components.flipr.async_setup_entry', return_value=True):
(yield)<|docstring|>Prevent setup.<|endoftext|> |
288fd7ac4e5fffda6a0b6c557be14405fb3f11a46d0198b0ad049820f9cf53bb | async def test_show_form(hass):
'Test we get the form.'
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == config_entries.SOURCE_USER) | Test we get the form. | tests/components/flipr/test_config_flow.py | test_show_form | GrandMoff100/homeassistant-core | 30,023 | python | async def test_show_form(hass):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == config_entries.SOURCE_USER) | async def test_show_form(hass):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == config_entries.SOURCE_USER)<|docstring|>Test we get the form.<|endoftext|> |
6f892a17508ebdfd0efa9a5f1c75c7c93d727d91d84c18b290e9641d136a45a6 | async def test_invalid_credential(hass, mock_setup):
'Test invalid credential.'
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', side_effect=HTTPError()):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'bad_login', CONF_PASSWORD: 'bad_pass', CONF_FLIPR_ID: ''}))
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'invalid_auth'}) | Test invalid credential. | tests/components/flipr/test_config_flow.py | test_invalid_credential | GrandMoff100/homeassistant-core | 30,023 | python | async def test_invalid_credential(hass, mock_setup):
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', side_effect=HTTPError()):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'bad_login', CONF_PASSWORD: 'bad_pass', CONF_FLIPR_ID: }))
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'invalid_auth'}) | async def test_invalid_credential(hass, mock_setup):
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', side_effect=HTTPError()):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'bad_login', CONF_PASSWORD: 'bad_pass', CONF_FLIPR_ID: }))
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'invalid_auth'})<|docstring|>Test invalid credential.<|endoftext|> |
866b94646de93b1d751f07744149836622fa8f94ccb9d32d5d8870b5ed309726 | async def test_nominal_case(hass, mock_setup):
'Test valid login form.'
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', return_value=['flipid']) as mock_flipr_client:
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass', CONF_FLIPR_ID: 'flipid'}))
(await hass.async_block_till_done())
assert (len(mock_flipr_client.mock_calls) == 1)
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['title'] == 'flipid')
assert (result['data'] == {CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass', CONF_FLIPR_ID: 'flipid'}) | Test valid login form. | tests/components/flipr/test_config_flow.py | test_nominal_case | GrandMoff100/homeassistant-core | 30,023 | python | async def test_nominal_case(hass, mock_setup):
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', return_value=['flipid']) as mock_flipr_client:
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass', CONF_FLIPR_ID: 'flipid'}))
(await hass.async_block_till_done())
assert (len(mock_flipr_client.mock_calls) == 1)
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['title'] == 'flipid')
assert (result['data'] == {CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass', CONF_FLIPR_ID: 'flipid'}) | async def test_nominal_case(hass, mock_setup):
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', return_value=['flipid']) as mock_flipr_client:
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass', CONF_FLIPR_ID: 'flipid'}))
(await hass.async_block_till_done())
assert (len(mock_flipr_client.mock_calls) == 1)
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['title'] == 'flipid')
assert (result['data'] == {CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass', CONF_FLIPR_ID: 'flipid'})<|docstring|>Test valid login form.<|endoftext|> |
d727571876a2f8a3c0a3b6dbfea0b1a992a5fc897501d2564aef07c5d590ca7c | async def test_multiple_flip_id(hass, mock_setup):
'Test multiple flipr id adding a config step.'
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', return_value=['FLIP1', 'FLIP2']) as mock_flipr_client:
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'flipr_id')
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_FLIPR_ID: 'FLIP2'}))
assert (len(mock_flipr_client.mock_calls) == 1)
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['title'] == 'FLIP2')
assert (result['data'] == {CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass', CONF_FLIPR_ID: 'FLIP2'}) | Test multiple flipr id adding a config step. | tests/components/flipr/test_config_flow.py | test_multiple_flip_id | GrandMoff100/homeassistant-core | 30,023 | python | async def test_multiple_flip_id(hass, mock_setup):
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', return_value=['FLIP1', 'FLIP2']) as mock_flipr_client:
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'flipr_id')
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_FLIPR_ID: 'FLIP2'}))
assert (len(mock_flipr_client.mock_calls) == 1)
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['title'] == 'FLIP2')
assert (result['data'] == {CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass', CONF_FLIPR_ID: 'FLIP2'}) | async def test_multiple_flip_id(hass, mock_setup):
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', return_value=['FLIP1', 'FLIP2']) as mock_flipr_client:
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'flipr_id')
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_FLIPR_ID: 'FLIP2'}))
assert (len(mock_flipr_client.mock_calls) == 1)
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['title'] == 'FLIP2')
assert (result['data'] == {CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass', CONF_FLIPR_ID: 'FLIP2'})<|docstring|>Test multiple flipr id adding a config step.<|endoftext|> |
2453b085f2e72dc499263b92b3dae24b69debf89fdde92a559caa85841acbcb0 | async def test_no_flip_id(hass, mock_setup):
'Test no flipr id found.'
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', return_value=[]) as mock_flipr_client:
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass'}))
assert (result['step_id'] == 'user')
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'no_flipr_id_found'})
assert (len(mock_flipr_client.mock_calls) == 1) | Test no flipr id found. | tests/components/flipr/test_config_flow.py | test_no_flip_id | GrandMoff100/homeassistant-core | 30,023 | python | async def test_no_flip_id(hass, mock_setup):
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', return_value=[]) as mock_flipr_client:
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass'}))
assert (result['step_id'] == 'user')
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'no_flipr_id_found'})
assert (len(mock_flipr_client.mock_calls) == 1) | async def test_no_flip_id(hass, mock_setup):
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', return_value=[]) as mock_flipr_client:
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'dummylogin', CONF_PASSWORD: 'dummypass'}))
assert (result['step_id'] == 'user')
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'no_flipr_id_found'})
assert (len(mock_flipr_client.mock_calls) == 1)<|docstring|>Test no flipr id found.<|endoftext|> |
3d33c2ef43e36701d154ba88f8baa04969032a927b34451b2c6b69fa0ad75a2a | async def test_http_errors(hass, mock_setup):
'Test HTTP Errors.'
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', side_effect=Timeout()):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'nada', CONF_PASSWORD: 'nada', CONF_FLIPR_ID: ''}))
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'cannot_connect'})
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', side_effect=Exception('Bad request Boy :) --')):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'nada', CONF_PASSWORD: 'nada', CONF_FLIPR_ID: ''}))
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'unknown'}) | Test HTTP Errors. | tests/components/flipr/test_config_flow.py | test_http_errors | GrandMoff100/homeassistant-core | 30,023 | python | async def test_http_errors(hass, mock_setup):
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', side_effect=Timeout()):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'nada', CONF_PASSWORD: 'nada', CONF_FLIPR_ID: }))
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'cannot_connect'})
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', side_effect=Exception('Bad request Boy :) --')):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'nada', CONF_PASSWORD: 'nada', CONF_FLIPR_ID: }))
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'unknown'}) | async def test_http_errors(hass, mock_setup):
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', side_effect=Timeout()):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'nada', CONF_PASSWORD: 'nada', CONF_FLIPR_ID: }))
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'cannot_connect'})
with patch('flipr_api.FliprAPIRestClient.search_flipr_ids', side_effect=Exception('Bad request Boy :) --')):
result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER}, data={CONF_EMAIL: 'nada', CONF_PASSWORD: 'nada', CONF_FLIPR_ID: }))
assert (result['type'] == 'form')
assert (result['errors'] == {'base': 'unknown'})<|docstring|>Test HTTP Errors.<|endoftext|> |
0bdb7e271177f64661475fd600a922e29cbbe3a57b36505f8bd37be6d4af965a | def __init__(self, hidden_size, kernels=[2, 3, 4]):
'1DCNN layer with max pooling\n\n Args:\n hidden_size (int): embedding dimension\n kernels (list, optional): kernel sizes for convolution. Defaults to [2, 3, 4].\n '
super().__init__()
self.pool = nn.AdaptiveMaxPool1d(1)
self.convs = nn.ModuleList()
for k in kernels:
cv = nn.Conv1d(hidden_size, hidden_size, kernel_size=k, bias=False)
self.convs.append(cv) | 1DCNN layer with max pooling
Args:
hidden_size (int): embedding dimension
kernels (list, optional): kernel sizes for convolution. Defaults to [2, 3, 4]. | src/byte_search/cnn.py | __init__ | urchade/urchade-byte_search | 0 | python | def __init__(self, hidden_size, kernels=[2, 3, 4]):
'1DCNN layer with max pooling\n\n Args:\n hidden_size (int): embedding dimension\n kernels (list, optional): kernel sizes for convolution. Defaults to [2, 3, 4].\n '
super().__init__()
self.pool = nn.AdaptiveMaxPool1d(1)
self.convs = nn.ModuleList()
for k in kernels:
cv = nn.Conv1d(hidden_size, hidden_size, kernel_size=k, bias=False)
self.convs.append(cv) | def __init__(self, hidden_size, kernels=[2, 3, 4]):
'1DCNN layer with max pooling\n\n Args:\n hidden_size (int): embedding dimension\n kernels (list, optional): kernel sizes for convolution. Defaults to [2, 3, 4].\n '
super().__init__()
self.pool = nn.AdaptiveMaxPool1d(1)
self.convs = nn.ModuleList()
for k in kernels:
cv = nn.Conv1d(hidden_size, hidden_size, kernel_size=k, bias=False)
self.convs.append(cv)<|docstring|>1DCNN layer with max pooling
Args:
hidden_size (int): embedding dimension
kernels (list, optional): kernel sizes for convolution. Defaults to [2, 3, 4].<|endoftext|> |
4b5a0a89375849020c3a6517b06a4db435a40ea1909fa79909c875776e9ae853 | def forward(self, x):
'Forward function\n\n Args:\n x (torch.Tensor): [batch_size, length, hidden_size]\n\n Returns:\n torch.Tensor: [batch_size, hidden_size]\n '
x = x.transpose(1, 2)
convs = []
for conv in self.convs:
convolved = conv(x)
convolved = self.pool(convolved).squeeze((- 1))
convs.append(convolved)
convs = torch.stack(convs, dim=0)
return convs.max(0).values | Forward function
Args:
x (torch.Tensor): [batch_size, length, hidden_size]
Returns:
torch.Tensor: [batch_size, hidden_size] | src/byte_search/cnn.py | forward | urchade/urchade-byte_search | 0 | python | def forward(self, x):
'Forward function\n\n Args:\n x (torch.Tensor): [batch_size, length, hidden_size]\n\n Returns:\n torch.Tensor: [batch_size, hidden_size]\n '
x = x.transpose(1, 2)
convs = []
for conv in self.convs:
convolved = conv(x)
convolved = self.pool(convolved).squeeze((- 1))
convs.append(convolved)
convs = torch.stack(convs, dim=0)
return convs.max(0).values | def forward(self, x):
'Forward function\n\n Args:\n x (torch.Tensor): [batch_size, length, hidden_size]\n\n Returns:\n torch.Tensor: [batch_size, hidden_size]\n '
x = x.transpose(1, 2)
convs = []
for conv in self.convs:
convolved = conv(x)
convolved = self.pool(convolved).squeeze((- 1))
convs.append(convolved)
convs = torch.stack(convs, dim=0)
return convs.max(0).values<|docstring|>Forward function
Args:
x (torch.Tensor): [batch_size, length, hidden_size]
Returns:
torch.Tensor: [batch_size, hidden_size]<|endoftext|> |
4f55876b4f7564d12385e3a79098cfcefce2229473cf9f35c42fdc9b89059635 | def smooth_mesh(mesh, n_iter=4, lam=0.6307, mu=(- 0.6347), weights=None, bconstr=True, volume_corr=False):
'\n FE mesh smoothing.\n\n Based on:\n\n [1] Steven K. Boyd, Ralph Muller, Smooth surface meshing for automated\n finite element model generation from 3D image data, Journal of\n Biomechanics, Volume 39, Issue 7, 2006, Pages 1287-1295,\n ISSN 0021-9290, 10.1016/j.jbiomech.2005.03.006.\n (http://www.sciencedirect.com/science/article/pii/S0021929005001442)\n\n Parameters\n ----------\n mesh : mesh\n FE mesh.\n n_iter : integer, optional\n Number of iteration steps.\n lam : float, optional\n Smoothing factor, see [1].\n mu : float, optional\n Unshrinking factor, see [1].\n weights : array, optional\n Edge weights, see [1].\n bconstr: logical, optional\n Boundary constraints, if True only surface smoothing performed.\n volume_corr: logical, optional\n Correct volume after smoothing process.\n\n Returns\n -------\n coors : array\n Coordinates of mesh nodes.\n '
def laplacian(coors, weights):
n_nod = coors.shape[0]
displ = ((weights - sps.identity(n_nod)) * coors)
return displ
def taubin(coors0, weights, lam, mu, n_iter):
coors = coors0.copy()
for ii in range(n_iter):
displ = laplacian(coors, weights)
if (nm.mod(ii, 2) == 0):
coors += (lam * displ)
else:
coors += (mu * displ)
return coors
def get_volume(el, nd):
dim = nd.shape[1]
nnd = el.shape[1]
etype = ('%d_%d' % (dim, nnd))
if ((etype == 'quad') or (etype == 'hexahedron')):
el = elems_q2t(el)
nel = el.shape[0]
mul = (1.0 / factorial(dim))
if (dim == 3):
mul *= (- 1.0)
mtx = nm.ones((nel, (dim + 1), (dim + 1)), dtype=nm.double)
mtx[(:, :, :(- 1))] = nd[(el, :)]
vols = (mul * nm.linalg.det(mtx))
vol = vols.sum()
bc = nm.dot(vols, (mtx.sum(1)[(:, :(- 1))] / nnd))
bc /= vol
return (vol, bc)
n_nod = mesh.points.shape[0]
cells = mesh.cells[0]
if (weights is None):
node_group = (nm.ones((n_nod,), dtype=nm.int8) * 2)
(sndi, edges) = get_snodes_uedges(cells.data, my_types[cells.type])
if bconstr:
node_group[sndi] = 4
end1 = edges[(:, 0)]
end2 = edges[(:, 1)]
idxs = nm.where((node_group[end2] >= node_group[end1]))
rows1 = end1[idxs]
cols1 = end2[idxs]
idxs = nm.where((node_group[end1] >= node_group[end2]))
rows2 = end2[idxs]
cols2 = end1[idxs]
crows = nm.concatenate((rows1, rows2))
ccols = nm.concatenate((cols1, cols2))
costs = sps.coo_matrix((nm.ones_like(crows), (crows, ccols)), shape=(n_nod, n_nod), dtype=nm.double)
idxs = range(n_nod)
aux = sps.coo_matrix(((1.0 / nm.asarray(costs.sum(1)).squeeze()), (idxs, idxs)), shape=(n_nod, n_nod), dtype=nm.double)
weights = (aux.tocsc() * costs.tocsc()).tocsr()
coors = taubin(mesh.points, weights, lam, mu, n_iter)
if volume_corr:
(volume0, bc) = get_volume(cells.data, mesh.points)
(volume, _) = get_volume(cells.data, coors)
scale = (volume0 / volume)
coors = (((coors - bc) * scale) + bc)
return coors | FE mesh smoothing.
Based on:
[1] Steven K. Boyd, Ralph Muller, Smooth surface meshing for automated
finite element model generation from 3D image data, Journal of
Biomechanics, Volume 39, Issue 7, 2006, Pages 1287-1295,
ISSN 0021-9290, 10.1016/j.jbiomech.2005.03.006.
(http://www.sciencedirect.com/science/article/pii/S0021929005001442)
Parameters
----------
mesh : mesh
FE mesh.
n_iter : integer, optional
Number of iteration steps.
lam : float, optional
Smoothing factor, see [1].
mu : float, optional
Unshrinking factor, see [1].
weights : array, optional
Edge weights, see [1].
bconstr: logical, optional
Boundary constraints, if True only surface smoothing performed.
volume_corr: logical, optional
Correct volume after smoothing process.
Returns
-------
coors : array
Coordinates of mesh nodes. | dicom2fem/seg2fem.py | smooth_mesh | vlukes/dicom2fem | 8 | python | def smooth_mesh(mesh, n_iter=4, lam=0.6307, mu=(- 0.6347), weights=None, bconstr=True, volume_corr=False):
'\n FE mesh smoothing.\n\n Based on:\n\n [1] Steven K. Boyd, Ralph Muller, Smooth surface meshing for automated\n finite element model generation from 3D image data, Journal of\n Biomechanics, Volume 39, Issue 7, 2006, Pages 1287-1295,\n ISSN 0021-9290, 10.1016/j.jbiomech.2005.03.006.\n (http://www.sciencedirect.com/science/article/pii/S0021929005001442)\n\n Parameters\n ----------\n mesh : mesh\n FE mesh.\n n_iter : integer, optional\n Number of iteration steps.\n lam : float, optional\n Smoothing factor, see [1].\n mu : float, optional\n Unshrinking factor, see [1].\n weights : array, optional\n Edge weights, see [1].\n bconstr: logical, optional\n Boundary constraints, if True only surface smoothing performed.\n volume_corr: logical, optional\n Correct volume after smoothing process.\n\n Returns\n -------\n coors : array\n Coordinates of mesh nodes.\n '
def laplacian(coors, weights):
n_nod = coors.shape[0]
displ = ((weights - sps.identity(n_nod)) * coors)
return displ
def taubin(coors0, weights, lam, mu, n_iter):
coors = coors0.copy()
for ii in range(n_iter):
displ = laplacian(coors, weights)
if (nm.mod(ii, 2) == 0):
coors += (lam * displ)
else:
coors += (mu * displ)
return coors
def get_volume(el, nd):
dim = nd.shape[1]
nnd = el.shape[1]
etype = ('%d_%d' % (dim, nnd))
if ((etype == 'quad') or (etype == 'hexahedron')):
el = elems_q2t(el)
nel = el.shape[0]
mul = (1.0 / factorial(dim))
if (dim == 3):
mul *= (- 1.0)
mtx = nm.ones((nel, (dim + 1), (dim + 1)), dtype=nm.double)
mtx[(:, :, :(- 1))] = nd[(el, :)]
vols = (mul * nm.linalg.det(mtx))
vol = vols.sum()
bc = nm.dot(vols, (mtx.sum(1)[(:, :(- 1))] / nnd))
bc /= vol
return (vol, bc)
n_nod = mesh.points.shape[0]
cells = mesh.cells[0]
if (weights is None):
node_group = (nm.ones((n_nod,), dtype=nm.int8) * 2)
(sndi, edges) = get_snodes_uedges(cells.data, my_types[cells.type])
if bconstr:
node_group[sndi] = 4
end1 = edges[(:, 0)]
end2 = edges[(:, 1)]
idxs = nm.where((node_group[end2] >= node_group[end1]))
rows1 = end1[idxs]
cols1 = end2[idxs]
idxs = nm.where((node_group[end1] >= node_group[end2]))
rows2 = end2[idxs]
cols2 = end1[idxs]
crows = nm.concatenate((rows1, rows2))
ccols = nm.concatenate((cols1, cols2))
costs = sps.coo_matrix((nm.ones_like(crows), (crows, ccols)), shape=(n_nod, n_nod), dtype=nm.double)
idxs = range(n_nod)
aux = sps.coo_matrix(((1.0 / nm.asarray(costs.sum(1)).squeeze()), (idxs, idxs)), shape=(n_nod, n_nod), dtype=nm.double)
weights = (aux.tocsc() * costs.tocsc()).tocsr()
coors = taubin(mesh.points, weights, lam, mu, n_iter)
if volume_corr:
(volume0, bc) = get_volume(cells.data, mesh.points)
(volume, _) = get_volume(cells.data, coors)
scale = (volume0 / volume)
coors = (((coors - bc) * scale) + bc)
return coors | def smooth_mesh(mesh, n_iter=4, lam=0.6307, mu=(- 0.6347), weights=None, bconstr=True, volume_corr=False):
'\n FE mesh smoothing.\n\n Based on:\n\n [1] Steven K. Boyd, Ralph Muller, Smooth surface meshing for automated\n finite element model generation from 3D image data, Journal of\n Biomechanics, Volume 39, Issue 7, 2006, Pages 1287-1295,\n ISSN 0021-9290, 10.1016/j.jbiomech.2005.03.006.\n (http://www.sciencedirect.com/science/article/pii/S0021929005001442)\n\n Parameters\n ----------\n mesh : mesh\n FE mesh.\n n_iter : integer, optional\n Number of iteration steps.\n lam : float, optional\n Smoothing factor, see [1].\n mu : float, optional\n Unshrinking factor, see [1].\n weights : array, optional\n Edge weights, see [1].\n bconstr: logical, optional\n Boundary constraints, if True only surface smoothing performed.\n volume_corr: logical, optional\n Correct volume after smoothing process.\n\n Returns\n -------\n coors : array\n Coordinates of mesh nodes.\n '
def laplacian(coors, weights):
n_nod = coors.shape[0]
displ = ((weights - sps.identity(n_nod)) * coors)
return displ
def taubin(coors0, weights, lam, mu, n_iter):
coors = coors0.copy()
for ii in range(n_iter):
displ = laplacian(coors, weights)
if (nm.mod(ii, 2) == 0):
coors += (lam * displ)
else:
coors += (mu * displ)
return coors
def get_volume(el, nd):
dim = nd.shape[1]
nnd = el.shape[1]
etype = ('%d_%d' % (dim, nnd))
if ((etype == 'quad') or (etype == 'hexahedron')):
el = elems_q2t(el)
nel = el.shape[0]
mul = (1.0 / factorial(dim))
if (dim == 3):
mul *= (- 1.0)
mtx = nm.ones((nel, (dim + 1), (dim + 1)), dtype=nm.double)
mtx[(:, :, :(- 1))] = nd[(el, :)]
vols = (mul * nm.linalg.det(mtx))
vol = vols.sum()
bc = nm.dot(vols, (mtx.sum(1)[(:, :(- 1))] / nnd))
bc /= vol
return (vol, bc)
n_nod = mesh.points.shape[0]
cells = mesh.cells[0]
if (weights is None):
node_group = (nm.ones((n_nod,), dtype=nm.int8) * 2)
(sndi, edges) = get_snodes_uedges(cells.data, my_types[cells.type])
if bconstr:
node_group[sndi] = 4
end1 = edges[(:, 0)]
end2 = edges[(:, 1)]
idxs = nm.where((node_group[end2] >= node_group[end1]))
rows1 = end1[idxs]
cols1 = end2[idxs]
idxs = nm.where((node_group[end1] >= node_group[end2]))
rows2 = end2[idxs]
cols2 = end1[idxs]
crows = nm.concatenate((rows1, rows2))
ccols = nm.concatenate((cols1, cols2))
costs = sps.coo_matrix((nm.ones_like(crows), (crows, ccols)), shape=(n_nod, n_nod), dtype=nm.double)
idxs = range(n_nod)
aux = sps.coo_matrix(((1.0 / nm.asarray(costs.sum(1)).squeeze()), (idxs, idxs)), shape=(n_nod, n_nod), dtype=nm.double)
weights = (aux.tocsc() * costs.tocsc()).tocsr()
coors = taubin(mesh.points, weights, lam, mu, n_iter)
if volume_corr:
(volume0, bc) = get_volume(cells.data, mesh.points)
(volume, _) = get_volume(cells.data, coors)
scale = (volume0 / volume)
coors = (((coors - bc) * scale) + bc)
return coors<|docstring|>FE mesh smoothing.
Based on:
[1] Steven K. Boyd, Ralph Muller, Smooth surface meshing for automated
finite element model generation from 3D image data, Journal of
Biomechanics, Volume 39, Issue 7, 2006, Pages 1287-1295,
ISSN 0021-9290, 10.1016/j.jbiomech.2005.03.006.
(http://www.sciencedirect.com/science/article/pii/S0021929005001442)
Parameters
----------
mesh : mesh
FE mesh.
n_iter : integer, optional
Number of iteration steps.
lam : float, optional
Smoothing factor, see [1].
mu : float, optional
Unshrinking factor, see [1].
weights : array, optional
Edge weights, see [1].
bconstr: logical, optional
Boundary constraints, if True only surface smoothing performed.
volume_corr: logical, optional
Correct volume after smoothing process.
Returns
-------
coors : array
Coordinates of mesh nodes.<|endoftext|> |
85cf4cc76a7ff9422dd966614015be52f4f77967019eb36650cbaa74ca82c9f0 | def gen_mesh_from_voxels(voxels, dims, etype='q', mtype='v'):
"\n Generate FE mesh from voxels (volumetric data).\n\n Parameters\n ----------\n voxels : array\n Voxel matrix, 1=material.\n dims : array\n Size of one voxel.\n etype : integer, optional\n 'q' - quadrilateral or hexahedral elements\n 't' - triangular or tetrahedral elements\n mtype : integer, optional\n 'v' - volumetric mesh\n 's' - surface mesh\n\n Returns\n -------\n mesh : Mesh instance\n Finite element mesh.\n "
dims = dims.squeeze()
dim = len(dims)
nddims = (nm.array(voxels.shape) + 2)
nodemtx = nm.zeros(nddims, dtype=nm.int8)
vxidxs = nm.where(voxels)
set_nodemtx(nodemtx, vxidxs, etype)
ndidx = nm.where(nodemtx)
del nodemtx
coors = (nm.array(ndidx).transpose() * dims)
nnod = coors.shape[0]
nodeid = (- nm.ones(nddims, dtype=nm.int32))
nodeid[ndidx] = nm.arange(nnod)
if (mtype == 's'):
felems = []
nn = nm.zeros(nddims, dtype=nm.int8)
if (dim == 2):
(ix, iy) = vxidxs
if (mtype == 'v'):
elems = nm.array([nodeid[(ix, iy)], nodeid[((ix + 1), iy)], nodeid[((ix + 1), (iy + 1))], nodeid[(ix, (iy + 1))]]).transpose()
edim = 2
else:
fc = nm.zeros((nddims + (2,)), dtype=nm.int32)
fc[(ix, iy, :)] = nm.array([nodeid[(ix, (iy + 1))], nodeid[(ix, iy)]]).transpose()
fc[((ix + 1), iy, :)] = nm.array([nodeid[((ix + 1), iy)], nodeid[((ix + 1), (iy + 1))]]).transpose()
nn[(ix, iy)] = 1
nn[((ix + 1), iy)] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
fc.fill(0)
nn.fill(0)
fc[(ix, iy, :)] = nm.array([nodeid[(ix, iy)], nodeid[((ix + 1), iy)]]).transpose()
fc[(ix, (iy + 1), :)] = nm.array([nodeid[((ix + 1), (iy + 1))], nodeid[(ix, (iy + 1))]]).transpose()
nn[(ix, iy)] = 1
nn[(ix, (iy + 1))] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
elems = nm.concatenate(felems)
edim = 1
elif (dim == 3):
(ix, iy, iz) = vxidxs
if (mtype == 'v'):
elems = nm.array([nodeid[(ix, iy, iz)], nodeid[((ix + 1), iy, iz)], nodeid[((ix + 1), (iy + 1), iz)], nodeid[(ix, (iy + 1), iz)], nodeid[(ix, iy, (iz + 1))], nodeid[((ix + 1), iy, (iz + 1))], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[(ix, (iy + 1), (iz + 1))]]).transpose()
edim = 3
else:
fc = nm.zeros((tuple(nddims) + (4,)), dtype=nm.int32)
fc[(ix, iy, iz, :)] = nm.array([nodeid[(ix, iy, iz)], nodeid[(ix, iy, (iz + 1))], nodeid[(ix, (iy + 1), (iz + 1))], nodeid[(ix, (iy + 1), iz)]]).transpose()
fc[((ix + 1), iy, iz, :)] = nm.array([nodeid[((ix + 1), iy, iz)], nodeid[((ix + 1), (iy + 1), iz)], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[((ix + 1), iy, (iz + 1))]]).transpose()
nn[(ix, iy, iz)] = 1
nn[((ix + 1), iy, iz)] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
fc.fill(0)
nn.fill(0)
fc[(ix, iy, iz, :)] = nm.array([nodeid[(ix, iy, iz)], nodeid[((ix + 1), iy, iz)], nodeid[((ix + 1), iy, (iz + 1))], nodeid[(ix, iy, (iz + 1))]]).transpose()
fc[(ix, (iy + 1), iz, :)] = nm.array([nodeid[(ix, (iy + 1), iz)], nodeid[(ix, (iy + 1), (iz + 1))], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[((ix + 1), (iy + 1), iz)]]).transpose()
nn[(ix, iy, iz)] = 1
nn[(ix, (iy + 1), iz)] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
fc.fill(0)
nn.fill(0)
fc[(ix, iy, iz, :)] = nm.array([nodeid[(ix, iy, iz)], nodeid[(ix, (iy + 1), iz)], nodeid[((ix + 1), (iy + 1), iz)], nodeid[((ix + 1), iy, iz)]]).transpose()
fc[(ix, iy, (iz + 1), :)] = nm.array([nodeid[(ix, iy, (iz + 1))], nodeid[((ix + 1), iy, (iz + 1))], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[(ix, (iy + 1), (iz + 1))]]).transpose()
nn[(ix, iy, iz)] = 1
nn[(ix, iy, (iz + 1))] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
elems = nm.concatenate(felems)
edim = 2
if (mtype == 's'):
aux = nm.zeros((nnod,), dtype=nm.int32)
for ii in elems.T:
aux[ii] = 1
idx = nm.where(aux)
aux.fill(0)
nnod = idx[0].shape[0]
aux[idx] = range(nnod)
coors = coors[idx]
for ii in range(elems.shape[1]):
elems[(:, ii)] = aux[elems[(:, ii)]]
if (etype == 't'):
elems = elems_q2t(elems)
nelnd = elems.shape[1]
mesh = meshio.Mesh(coors, [(meshio_types[('%d_%d' % (edim, nelnd))], nm.ascontiguousarray(elems))])
return mesh | Generate FE mesh from voxels (volumetric data).
Parameters
----------
voxels : array
Voxel matrix, 1=material.
dims : array
Size of one voxel.
etype : integer, optional
'q' - quadrilateral or hexahedral elements
't' - triangular or tetrahedral elements
mtype : integer, optional
'v' - volumetric mesh
's' - surface mesh
Returns
-------
mesh : Mesh instance
Finite element mesh. | dicom2fem/seg2fem.py | gen_mesh_from_voxels | vlukes/dicom2fem | 8 | python | def gen_mesh_from_voxels(voxels, dims, etype='q', mtype='v'):
"\n Generate FE mesh from voxels (volumetric data).\n\n Parameters\n ----------\n voxels : array\n Voxel matrix, 1=material.\n dims : array\n Size of one voxel.\n etype : integer, optional\n 'q' - quadrilateral or hexahedral elements\n 't' - triangular or tetrahedral elements\n mtype : integer, optional\n 'v' - volumetric mesh\n 's' - surface mesh\n\n Returns\n -------\n mesh : Mesh instance\n Finite element mesh.\n "
dims = dims.squeeze()
dim = len(dims)
nddims = (nm.array(voxels.shape) + 2)
nodemtx = nm.zeros(nddims, dtype=nm.int8)
vxidxs = nm.where(voxels)
set_nodemtx(nodemtx, vxidxs, etype)
ndidx = nm.where(nodemtx)
del nodemtx
coors = (nm.array(ndidx).transpose() * dims)
nnod = coors.shape[0]
nodeid = (- nm.ones(nddims, dtype=nm.int32))
nodeid[ndidx] = nm.arange(nnod)
if (mtype == 's'):
felems = []
nn = nm.zeros(nddims, dtype=nm.int8)
if (dim == 2):
(ix, iy) = vxidxs
if (mtype == 'v'):
elems = nm.array([nodeid[(ix, iy)], nodeid[((ix + 1), iy)], nodeid[((ix + 1), (iy + 1))], nodeid[(ix, (iy + 1))]]).transpose()
edim = 2
else:
fc = nm.zeros((nddims + (2,)), dtype=nm.int32)
fc[(ix, iy, :)] = nm.array([nodeid[(ix, (iy + 1))], nodeid[(ix, iy)]]).transpose()
fc[((ix + 1), iy, :)] = nm.array([nodeid[((ix + 1), iy)], nodeid[((ix + 1), (iy + 1))]]).transpose()
nn[(ix, iy)] = 1
nn[((ix + 1), iy)] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
fc.fill(0)
nn.fill(0)
fc[(ix, iy, :)] = nm.array([nodeid[(ix, iy)], nodeid[((ix + 1), iy)]]).transpose()
fc[(ix, (iy + 1), :)] = nm.array([nodeid[((ix + 1), (iy + 1))], nodeid[(ix, (iy + 1))]]).transpose()
nn[(ix, iy)] = 1
nn[(ix, (iy + 1))] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
elems = nm.concatenate(felems)
edim = 1
elif (dim == 3):
(ix, iy, iz) = vxidxs
if (mtype == 'v'):
elems = nm.array([nodeid[(ix, iy, iz)], nodeid[((ix + 1), iy, iz)], nodeid[((ix + 1), (iy + 1), iz)], nodeid[(ix, (iy + 1), iz)], nodeid[(ix, iy, (iz + 1))], nodeid[((ix + 1), iy, (iz + 1))], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[(ix, (iy + 1), (iz + 1))]]).transpose()
edim = 3
else:
fc = nm.zeros((tuple(nddims) + (4,)), dtype=nm.int32)
fc[(ix, iy, iz, :)] = nm.array([nodeid[(ix, iy, iz)], nodeid[(ix, iy, (iz + 1))], nodeid[(ix, (iy + 1), (iz + 1))], nodeid[(ix, (iy + 1), iz)]]).transpose()
fc[((ix + 1), iy, iz, :)] = nm.array([nodeid[((ix + 1), iy, iz)], nodeid[((ix + 1), (iy + 1), iz)], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[((ix + 1), iy, (iz + 1))]]).transpose()
nn[(ix, iy, iz)] = 1
nn[((ix + 1), iy, iz)] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
fc.fill(0)
nn.fill(0)
fc[(ix, iy, iz, :)] = nm.array([nodeid[(ix, iy, iz)], nodeid[((ix + 1), iy, iz)], nodeid[((ix + 1), iy, (iz + 1))], nodeid[(ix, iy, (iz + 1))]]).transpose()
fc[(ix, (iy + 1), iz, :)] = nm.array([nodeid[(ix, (iy + 1), iz)], nodeid[(ix, (iy + 1), (iz + 1))], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[((ix + 1), (iy + 1), iz)]]).transpose()
nn[(ix, iy, iz)] = 1
nn[(ix, (iy + 1), iz)] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
fc.fill(0)
nn.fill(0)
fc[(ix, iy, iz, :)] = nm.array([nodeid[(ix, iy, iz)], nodeid[(ix, (iy + 1), iz)], nodeid[((ix + 1), (iy + 1), iz)], nodeid[((ix + 1), iy, iz)]]).transpose()
fc[(ix, iy, (iz + 1), :)] = nm.array([nodeid[(ix, iy, (iz + 1))], nodeid[((ix + 1), iy, (iz + 1))], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[(ix, (iy + 1), (iz + 1))]]).transpose()
nn[(ix, iy, iz)] = 1
nn[(ix, iy, (iz + 1))] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
elems = nm.concatenate(felems)
edim = 2
if (mtype == 's'):
aux = nm.zeros((nnod,), dtype=nm.int32)
for ii in elems.T:
aux[ii] = 1
idx = nm.where(aux)
aux.fill(0)
nnod = idx[0].shape[0]
aux[idx] = range(nnod)
coors = coors[idx]
for ii in range(elems.shape[1]):
elems[(:, ii)] = aux[elems[(:, ii)]]
if (etype == 't'):
elems = elems_q2t(elems)
nelnd = elems.shape[1]
mesh = meshio.Mesh(coors, [(meshio_types[('%d_%d' % (edim, nelnd))], nm.ascontiguousarray(elems))])
return mesh | def gen_mesh_from_voxels(voxels, dims, etype='q', mtype='v'):
"\n Generate FE mesh from voxels (volumetric data).\n\n Parameters\n ----------\n voxels : array\n Voxel matrix, 1=material.\n dims : array\n Size of one voxel.\n etype : integer, optional\n 'q' - quadrilateral or hexahedral elements\n 't' - triangular or tetrahedral elements\n mtype : integer, optional\n 'v' - volumetric mesh\n 's' - surface mesh\n\n Returns\n -------\n mesh : Mesh instance\n Finite element mesh.\n "
dims = dims.squeeze()
dim = len(dims)
nddims = (nm.array(voxels.shape) + 2)
nodemtx = nm.zeros(nddims, dtype=nm.int8)
vxidxs = nm.where(voxels)
set_nodemtx(nodemtx, vxidxs, etype)
ndidx = nm.where(nodemtx)
del nodemtx
coors = (nm.array(ndidx).transpose() * dims)
nnod = coors.shape[0]
nodeid = (- nm.ones(nddims, dtype=nm.int32))
nodeid[ndidx] = nm.arange(nnod)
if (mtype == 's'):
felems = []
nn = nm.zeros(nddims, dtype=nm.int8)
if (dim == 2):
(ix, iy) = vxidxs
if (mtype == 'v'):
elems = nm.array([nodeid[(ix, iy)], nodeid[((ix + 1), iy)], nodeid[((ix + 1), (iy + 1))], nodeid[(ix, (iy + 1))]]).transpose()
edim = 2
else:
fc = nm.zeros((nddims + (2,)), dtype=nm.int32)
fc[(ix, iy, :)] = nm.array([nodeid[(ix, (iy + 1))], nodeid[(ix, iy)]]).transpose()
fc[((ix + 1), iy, :)] = nm.array([nodeid[((ix + 1), iy)], nodeid[((ix + 1), (iy + 1))]]).transpose()
nn[(ix, iy)] = 1
nn[((ix + 1), iy)] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
fc.fill(0)
nn.fill(0)
fc[(ix, iy, :)] = nm.array([nodeid[(ix, iy)], nodeid[((ix + 1), iy)]]).transpose()
fc[(ix, (iy + 1), :)] = nm.array([nodeid[((ix + 1), (iy + 1))], nodeid[(ix, (iy + 1))]]).transpose()
nn[(ix, iy)] = 1
nn[(ix, (iy + 1))] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
elems = nm.concatenate(felems)
edim = 1
elif (dim == 3):
(ix, iy, iz) = vxidxs
if (mtype == 'v'):
elems = nm.array([nodeid[(ix, iy, iz)], nodeid[((ix + 1), iy, iz)], nodeid[((ix + 1), (iy + 1), iz)], nodeid[(ix, (iy + 1), iz)], nodeid[(ix, iy, (iz + 1))], nodeid[((ix + 1), iy, (iz + 1))], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[(ix, (iy + 1), (iz + 1))]]).transpose()
edim = 3
else:
fc = nm.zeros((tuple(nddims) + (4,)), dtype=nm.int32)
fc[(ix, iy, iz, :)] = nm.array([nodeid[(ix, iy, iz)], nodeid[(ix, iy, (iz + 1))], nodeid[(ix, (iy + 1), (iz + 1))], nodeid[(ix, (iy + 1), iz)]]).transpose()
fc[((ix + 1), iy, iz, :)] = nm.array([nodeid[((ix + 1), iy, iz)], nodeid[((ix + 1), (iy + 1), iz)], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[((ix + 1), iy, (iz + 1))]]).transpose()
nn[(ix, iy, iz)] = 1
nn[((ix + 1), iy, iz)] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
fc.fill(0)
nn.fill(0)
fc[(ix, iy, iz, :)] = nm.array([nodeid[(ix, iy, iz)], nodeid[((ix + 1), iy, iz)], nodeid[((ix + 1), iy, (iz + 1))], nodeid[(ix, iy, (iz + 1))]]).transpose()
fc[(ix, (iy + 1), iz, :)] = nm.array([nodeid[(ix, (iy + 1), iz)], nodeid[(ix, (iy + 1), (iz + 1))], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[((ix + 1), (iy + 1), iz)]]).transpose()
nn[(ix, iy, iz)] = 1
nn[(ix, (iy + 1), iz)] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
fc.fill(0)
nn.fill(0)
fc[(ix, iy, iz, :)] = nm.array([nodeid[(ix, iy, iz)], nodeid[(ix, (iy + 1), iz)], nodeid[((ix + 1), (iy + 1), iz)], nodeid[((ix + 1), iy, iz)]]).transpose()
fc[(ix, iy, (iz + 1), :)] = nm.array([nodeid[(ix, iy, (iz + 1))], nodeid[((ix + 1), iy, (iz + 1))], nodeid[((ix + 1), (iy + 1), (iz + 1))], nodeid[(ix, (iy + 1), (iz + 1))]]).transpose()
nn[(ix, iy, iz)] = 1
nn[(ix, iy, (iz + 1))] += 1
idx = nm.where((nn == 1))
felems.append(fc[idx])
elems = nm.concatenate(felems)
edim = 2
if (mtype == 's'):
aux = nm.zeros((nnod,), dtype=nm.int32)
for ii in elems.T:
aux[ii] = 1
idx = nm.where(aux)
aux.fill(0)
nnod = idx[0].shape[0]
aux[idx] = range(nnod)
coors = coors[idx]
for ii in range(elems.shape[1]):
elems[(:, ii)] = aux[elems[(:, ii)]]
if (etype == 't'):
elems = elems_q2t(elems)
nelnd = elems.shape[1]
mesh = meshio.Mesh(coors, [(meshio_types[('%d_%d' % (edim, nelnd))], nm.ascontiguousarray(elems))])
return mesh<|docstring|>Generate FE mesh from voxels (volumetric data).
Parameters
----------
voxels : array
Voxel matrix, 1=material.
dims : array
Size of one voxel.
etype : integer, optional
'q' - quadrilateral or hexahedral elements
't' - triangular or tetrahedral elements
mtype : integer, optional
'v' - volumetric mesh
's' - surface mesh
Returns
-------
mesh : Mesh instance
Finite element mesh.<|endoftext|> |
31d171a2e3f029f94d4393114e258afe049dc087f6a093104276010362ed45cc | def find_patches_from_slide(slide_path, base_truth_dir=BASE_TRUTH_DIR, filter_non_tissue=True):
'Returns a dataframe of all patches in slide\n input: slide_path: path to WSI file\n output: samples: dataframe with the following columns:\n slide_path: path of slide\n is_tissue: sample contains tissue\n is_tumor: truth status of sample\n tile_loc: coordinates of samples in slide\n \n \n option: base_truth_dir: directory of truth slides\n option: filter_non_tissue: Remove samples no tissue detected\n '
base_truth_dir = Path(base_truth_dir)
slide_contains_tumor = osp.basename(slide_path).startswith('tumor_')
with openslide.open_slide(slide_path) as slide:
thumbnail = slide.get_thumbnail(((slide.dimensions[0] / 256), (slide.dimensions[1] / 256)))
thumbnail_grey = np.array(thumbnail.convert('L'))
thresh = threshold_otsu(thumbnail_grey)
binary = (thumbnail_grey > thresh)
patches = pd.DataFrame(pd.DataFrame(binary).stack())
patches['is_tissue'] = (~ patches[0])
patches.drop(0, axis=1, inplace=True)
patches['slide_path'] = slide_path
if slide_contains_tumor:
truth_slide_path = (base_truth_dir / osp.basename(slide_path).replace('.tif', '_Mask.tif'))
with openslide.open_slide(str(truth_slide_path)) as truth:
thumbnail_truth = truth.get_thumbnail(((truth.dimensions[0] / 256), (truth.dimensions[1] / 256)))
patches_y = pd.DataFrame(pd.DataFrame(np.array(thumbnail_truth.convert('L'))).stack())
patches_y['is_tumor'] = (patches_y[0] > 0)
patches_y.drop(0, axis=1, inplace=True)
samples = pd.concat([patches, patches_y], axis=1)
else:
samples = patches
samples['is_tumor'] = False
if filter_non_tissue:
samples = samples[(samples.is_tissue == True)]
samples['tile_loc'] = list(samples.index)
samples.reset_index(inplace=True, drop=True)
return samples | Returns a dataframe of all patches in slide
input: slide_path: path to WSI file
output: samples: dataframe with the following columns:
slide_path: path of slide
is_tissue: sample contains tissue
is_tumor: truth status of sample
tile_loc: coordinates of samples in slide
option: base_truth_dir: directory of truth slides
option: filter_non_tissue: Remove samples no tissue detected | 4 - Prediction and Evaluation/Prediction_fcn_unet.py | find_patches_from_slide | raktim-mondol/DeepLearningCamelyon | 70 | python | def find_patches_from_slide(slide_path, base_truth_dir=BASE_TRUTH_DIR, filter_non_tissue=True):
'Returns a dataframe of all patches in slide\n input: slide_path: path to WSI file\n output: samples: dataframe with the following columns:\n slide_path: path of slide\n is_tissue: sample contains tissue\n is_tumor: truth status of sample\n tile_loc: coordinates of samples in slide\n \n \n option: base_truth_dir: directory of truth slides\n option: filter_non_tissue: Remove samples no tissue detected\n '
base_truth_dir = Path(base_truth_dir)
slide_contains_tumor = osp.basename(slide_path).startswith('tumor_')
with openslide.open_slide(slide_path) as slide:
thumbnail = slide.get_thumbnail(((slide.dimensions[0] / 256), (slide.dimensions[1] / 256)))
thumbnail_grey = np.array(thumbnail.convert('L'))
thresh = threshold_otsu(thumbnail_grey)
binary = (thumbnail_grey > thresh)
patches = pd.DataFrame(pd.DataFrame(binary).stack())
patches['is_tissue'] = (~ patches[0])
patches.drop(0, axis=1, inplace=True)
patches['slide_path'] = slide_path
if slide_contains_tumor:
truth_slide_path = (base_truth_dir / osp.basename(slide_path).replace('.tif', '_Mask.tif'))
with openslide.open_slide(str(truth_slide_path)) as truth:
thumbnail_truth = truth.get_thumbnail(((truth.dimensions[0] / 256), (truth.dimensions[1] / 256)))
patches_y = pd.DataFrame(pd.DataFrame(np.array(thumbnail_truth.convert('L'))).stack())
patches_y['is_tumor'] = (patches_y[0] > 0)
patches_y.drop(0, axis=1, inplace=True)
samples = pd.concat([patches, patches_y], axis=1)
else:
samples = patches
samples['is_tumor'] = False
if filter_non_tissue:
samples = samples[(samples.is_tissue == True)]
samples['tile_loc'] = list(samples.index)
samples.reset_index(inplace=True, drop=True)
return samples | def find_patches_from_slide(slide_path, base_truth_dir=BASE_TRUTH_DIR, filter_non_tissue=True):
'Returns a dataframe of all patches in slide\n input: slide_path: path to WSI file\n output: samples: dataframe with the following columns:\n slide_path: path of slide\n is_tissue: sample contains tissue\n is_tumor: truth status of sample\n tile_loc: coordinates of samples in slide\n \n \n option: base_truth_dir: directory of truth slides\n option: filter_non_tissue: Remove samples no tissue detected\n '
base_truth_dir = Path(base_truth_dir)
slide_contains_tumor = osp.basename(slide_path).startswith('tumor_')
with openslide.open_slide(slide_path) as slide:
thumbnail = slide.get_thumbnail(((slide.dimensions[0] / 256), (slide.dimensions[1] / 256)))
thumbnail_grey = np.array(thumbnail.convert('L'))
thresh = threshold_otsu(thumbnail_grey)
binary = (thumbnail_grey > thresh)
patches = pd.DataFrame(pd.DataFrame(binary).stack())
patches['is_tissue'] = (~ patches[0])
patches.drop(0, axis=1, inplace=True)
patches['slide_path'] = slide_path
if slide_contains_tumor:
truth_slide_path = (base_truth_dir / osp.basename(slide_path).replace('.tif', '_Mask.tif'))
with openslide.open_slide(str(truth_slide_path)) as truth:
thumbnail_truth = truth.get_thumbnail(((truth.dimensions[0] / 256), (truth.dimensions[1] / 256)))
patches_y = pd.DataFrame(pd.DataFrame(np.array(thumbnail_truth.convert('L'))).stack())
patches_y['is_tumor'] = (patches_y[0] > 0)
patches_y.drop(0, axis=1, inplace=True)
samples = pd.concat([patches, patches_y], axis=1)
else:
samples = patches
samples['is_tumor'] = False
if filter_non_tissue:
samples = samples[(samples.is_tissue == True)]
samples['tile_loc'] = list(samples.index)
samples.reset_index(inplace=True, drop=True)
return samples<|docstring|>Returns a dataframe of all patches in slide
input: slide_path: path to WSI file
output: samples: dataframe with the following columns:
slide_path: path of slide
is_tissue: sample contains tissue
is_tumor: truth status of sample
tile_loc: coordinates of samples in slide
option: base_truth_dir: directory of truth slides
option: filter_non_tissue: Remove samples no tissue detected<|endoftext|> |
b3d3120d68de8289f6357c527c783e3ef779791c6a0585c07c23a943bbd120ec | def gen_imgs(samples, batch_size, base_truth_dir=BASE_TRUTH_DIR, shuffle=False):
'This function returns a generator that \n yields tuples of (\n X: tensor, float - [batch_size, 256, 256, 3]\n y: tensor, int32 - [batch_size, 256, 256, NUM_CLASSES]\n )\n \n \n input: samples: samples dataframe\n input: batch_size: The number of images to return for each pull\n output: yield (X_train, y_train): generator of X, y tensors\n \n option: base_truth_dir: path, directory of truth slides\n option: shuffle: bool, if True shuffle samples\n '
num_samples = len(samples)
while 1:
if shuffle:
samples = samples.sample(frac=1)
for offset in range(0, num_samples, batch_size):
batch_samples = samples.iloc[offset:(offset + batch_size)]
images = []
masks = []
for (_, batch_sample) in batch_samples.iterrows():
slide_contains_tumor = osp.basename(batch_sample.slide_path).startswith('tumor_')
with openslide.open_slide(batch_sample.slide_path) as slide:
tiles = DeepZoomGenerator(slide, tile_size=256, overlap=0, limit_bounds=False)
img = tiles.get_tile((tiles.level_count - 1), batch_sample.tile_loc[::(- 1)])
if slide_contains_tumor:
truth_slide_path = (base_truth_dir / osp.basename(slide_path).replace('.tif', '_Mask.tif'))
with openslide.open_slide(str(truth_slide_path)) as truth:
truth_tiles = DeepZoomGenerator(truth, tile_size=256, overlap=0, limit_bounds=False)
mask = truth_tiles.get_tile((truth_tiles.level_count - 1), batch_sample.tile_loc[::(- 1)])
mask = (cv2.cvtColor(np.array(mask), cv2.COLOR_RGB2GRAY) > 0).astype(int)
else:
mask = np.zeros((256, 256))
images.append(np.array(img))
masks.append(mask)
X_train = np.array(images)
y_train = np.array(masks)
y_train = to_categorical(y_train, num_classes=2).reshape(y_train.shape[0], 256, 256, 2)
(yield (X_train, y_train)) | This function returns a generator that
yields tuples of (
X: tensor, float - [batch_size, 256, 256, 3]
y: tensor, int32 - [batch_size, 256, 256, NUM_CLASSES]
)
input: samples: samples dataframe
input: batch_size: The number of images to return for each pull
output: yield (X_train, y_train): generator of X, y tensors
option: base_truth_dir: path, directory of truth slides
option: shuffle: bool, if True shuffle samples | 4 - Prediction and Evaluation/Prediction_fcn_unet.py | gen_imgs | raktim-mondol/DeepLearningCamelyon | 70 | python | def gen_imgs(samples, batch_size, base_truth_dir=BASE_TRUTH_DIR, shuffle=False):
'This function returns a generator that \n yields tuples of (\n X: tensor, float - [batch_size, 256, 256, 3]\n y: tensor, int32 - [batch_size, 256, 256, NUM_CLASSES]\n )\n \n \n input: samples: samples dataframe\n input: batch_size: The number of images to return for each pull\n output: yield (X_train, y_train): generator of X, y tensors\n \n option: base_truth_dir: path, directory of truth slides\n option: shuffle: bool, if True shuffle samples\n '
num_samples = len(samples)
while 1:
if shuffle:
samples = samples.sample(frac=1)
for offset in range(0, num_samples, batch_size):
batch_samples = samples.iloc[offset:(offset + batch_size)]
images = []
masks = []
for (_, batch_sample) in batch_samples.iterrows():
slide_contains_tumor = osp.basename(batch_sample.slide_path).startswith('tumor_')
with openslide.open_slide(batch_sample.slide_path) as slide:
tiles = DeepZoomGenerator(slide, tile_size=256, overlap=0, limit_bounds=False)
img = tiles.get_tile((tiles.level_count - 1), batch_sample.tile_loc[::(- 1)])
if slide_contains_tumor:
truth_slide_path = (base_truth_dir / osp.basename(slide_path).replace('.tif', '_Mask.tif'))
with openslide.open_slide(str(truth_slide_path)) as truth:
truth_tiles = DeepZoomGenerator(truth, tile_size=256, overlap=0, limit_bounds=False)
mask = truth_tiles.get_tile((truth_tiles.level_count - 1), batch_sample.tile_loc[::(- 1)])
mask = (cv2.cvtColor(np.array(mask), cv2.COLOR_RGB2GRAY) > 0).astype(int)
else:
mask = np.zeros((256, 256))
images.append(np.array(img))
masks.append(mask)
X_train = np.array(images)
y_train = np.array(masks)
y_train = to_categorical(y_train, num_classes=2).reshape(y_train.shape[0], 256, 256, 2)
(yield (X_train, y_train)) | def gen_imgs(samples, batch_size, base_truth_dir=BASE_TRUTH_DIR, shuffle=False):
'This function returns a generator that \n yields tuples of (\n X: tensor, float - [batch_size, 256, 256, 3]\n y: tensor, int32 - [batch_size, 256, 256, NUM_CLASSES]\n )\n \n \n input: samples: samples dataframe\n input: batch_size: The number of images to return for each pull\n output: yield (X_train, y_train): generator of X, y tensors\n \n option: base_truth_dir: path, directory of truth slides\n option: shuffle: bool, if True shuffle samples\n '
num_samples = len(samples)
while 1:
if shuffle:
samples = samples.sample(frac=1)
for offset in range(0, num_samples, batch_size):
batch_samples = samples.iloc[offset:(offset + batch_size)]
images = []
masks = []
for (_, batch_sample) in batch_samples.iterrows():
slide_contains_tumor = osp.basename(batch_sample.slide_path).startswith('tumor_')
with openslide.open_slide(batch_sample.slide_path) as slide:
tiles = DeepZoomGenerator(slide, tile_size=256, overlap=0, limit_bounds=False)
img = tiles.get_tile((tiles.level_count - 1), batch_sample.tile_loc[::(- 1)])
if slide_contains_tumor:
truth_slide_path = (base_truth_dir / osp.basename(slide_path).replace('.tif', '_Mask.tif'))
with openslide.open_slide(str(truth_slide_path)) as truth:
truth_tiles = DeepZoomGenerator(truth, tile_size=256, overlap=0, limit_bounds=False)
mask = truth_tiles.get_tile((truth_tiles.level_count - 1), batch_sample.tile_loc[::(- 1)])
mask = (cv2.cvtColor(np.array(mask), cv2.COLOR_RGB2GRAY) > 0).astype(int)
else:
mask = np.zeros((256, 256))
images.append(np.array(img))
masks.append(mask)
X_train = np.array(images)
y_train = np.array(masks)
y_train = to_categorical(y_train, num_classes=2).reshape(y_train.shape[0], 256, 256, 2)
(yield (X_train, y_train))<|docstring|>This function returns a generator that
yields tuples of (
X: tensor, float - [batch_size, 256, 256, 3]
y: tensor, int32 - [batch_size, 256, 256, NUM_CLASSES]
)
input: samples: samples dataframe
input: batch_size: The number of images to return for each pull
output: yield (X_train, y_train): generator of X, y tensors
option: base_truth_dir: path, directory of truth slides
option: shuffle: bool, if True shuffle samples<|endoftext|> |
50bf78de16f994efe389efd8e37c87126eaf6a400cea7b62a2d9a36c39df6206 | @property
def end(self):
"\n Sets the end value for the y axis bins. The last bin may not\n end exactly at this value, we increment the bin edge by `size`\n from `start` until we reach or exceed `end`. Defaults to the\n maximum data value. Like `start`, for dates use a date string,\n and for category data `end` is based on the category serial\n numbers.\n \n The 'end' property accepts values of any type\n\n Returns\n -------\n Any\n "
return self['end'] | Sets the end value for the y axis bins. The last bin may not
end exactly at this value, we increment the bin edge by `size`
from `start` until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use a date string,
and for category data `end` is based on the category serial
numbers.
The 'end' property accepts values of any type
Returns
-------
Any | WatchDogs_Visualisation/oldApps/tweet-map/venv2/lib/python3.7/site-packages/plotly/graph_objs/histogram/__init__.py | end | tnreddy09/WatchDogs_StockMarketAnalysis | 6 | python | @property
def end(self):
"\n Sets the end value for the y axis bins. The last bin may not\n end exactly at this value, we increment the bin edge by `size`\n from `start` until we reach or exceed `end`. Defaults to the\n maximum data value. Like `start`, for dates use a date string,\n and for category data `end` is based on the category serial\n numbers.\n \n The 'end' property accepts values of any type\n\n Returns\n -------\n Any\n "
return self['end'] | @property
def end(self):
"\n Sets the end value for the y axis bins. The last bin may not\n end exactly at this value, we increment the bin edge by `size`\n from `start` until we reach or exceed `end`. Defaults to the\n maximum data value. Like `start`, for dates use a date string,\n and for category data `end` is based on the category serial\n numbers.\n \n The 'end' property accepts values of any type\n\n Returns\n -------\n Any\n "
return self['end']<|docstring|>Sets the end value for the y axis bins. The last bin may not
end exactly at this value, we increment the bin edge by `size`
from `start` until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use a date string,
and for category data `end` is based on the category serial
numbers.
The 'end' property accepts values of any type
Returns
-------
Any<|endoftext|> |
0e691e6e2369226b7116d9ec17cc203cca570656d6f1f9b6be188c52c91a32db | @property
def size(self):
'\n Sets the size of each y axis bin. Default behavior: If `nbinsy`\n is 0 or omitted, we choose a nice round bin size such that the\n number of bins is about the same as the typical number of\n samples in each bin. If `nbinsy` is provided, we choose a nice\n round bin size giving no more than that many bins. For date\n data, use milliseconds or "M<n>" for months, as in\n `axis.dtick`. For category data, the number of categories to\n bin together (always defaults to 1). If multiple non-overlaying\n histograms share a subplot, the first explicit `size` is used\n and all others discarded. If no `size` is provided,the sample\n data from all traces is combined to determine `size` as\n described above.\n \n The \'size\' property accepts values of any type\n\n Returns\n -------\n Any\n '
return self['size'] | Sets the size of each y axis bin. Default behavior: If `nbinsy`
is 0 or omitted, we choose a nice round bin size such that the
number of bins is about the same as the typical number of
samples in each bin. If `nbinsy` is provided, we choose a nice
round bin size giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as in
`axis.dtick`. For category data, the number of categories to
bin together (always defaults to 1). If multiple non-overlaying
histograms share a subplot, the first explicit `size` is used
and all others discarded. If no `size` is provided,the sample
data from all traces is combined to determine `size` as
described above.
The 'size' property accepts values of any type
Returns
-------
Any | WatchDogs_Visualisation/oldApps/tweet-map/venv2/lib/python3.7/site-packages/plotly/graph_objs/histogram/__init__.py | size | tnreddy09/WatchDogs_StockMarketAnalysis | 6 | python | @property
def size(self):
'\n Sets the size of each y axis bin. Default behavior: If `nbinsy`\n is 0 or omitted, we choose a nice round bin size such that the\n number of bins is about the same as the typical number of\n samples in each bin. If `nbinsy` is provided, we choose a nice\n round bin size giving no more than that many bins. For date\n data, use milliseconds or "M<n>" for months, as in\n `axis.dtick`. For category data, the number of categories to\n bin together (always defaults to 1). If multiple non-overlaying\n histograms share a subplot, the first explicit `size` is used\n and all others discarded. If no `size` is provided,the sample\n data from all traces is combined to determine `size` as\n described above.\n \n The \'size\' property accepts values of any type\n\n Returns\n -------\n Any\n '
return self['size'] | @property
def size(self):
'\n Sets the size of each y axis bin. Default behavior: If `nbinsy`\n is 0 or omitted, we choose a nice round bin size such that the\n number of bins is about the same as the typical number of\n samples in each bin. If `nbinsy` is provided, we choose a nice\n round bin size giving no more than that many bins. For date\n data, use milliseconds or "M<n>" for months, as in\n `axis.dtick`. For category data, the number of categories to\n bin together (always defaults to 1). If multiple non-overlaying\n histograms share a subplot, the first explicit `size` is used\n and all others discarded. If no `size` is provided,the sample\n data from all traces is combined to determine `size` as\n described above.\n \n The \'size\' property accepts values of any type\n\n Returns\n -------\n Any\n '
return self['size']<|docstring|>Sets the size of each y axis bin. Default behavior: If `nbinsy`
is 0 or omitted, we choose a nice round bin size such that the
number of bins is about the same as the typical number of
samples in each bin. If `nbinsy` is provided, we choose a nice
round bin size giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as in
`axis.dtick`. For category data, the number of categories to
bin together (always defaults to 1). If multiple non-overlaying
histograms share a subplot, the first explicit `size` is used
and all others discarded. If no `size` is provided,the sample
data from all traces is combined to determine `size` as
described above.
The 'size' property accepts values of any type
Returns
-------
Any<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.